aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-net8
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-cdc_ncm149
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-queues79
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-statistics201
-rw-r--r--Documentation/DocBook/80211.tmpl1
-rw-r--r--Documentation/devicetree/bindings/net/amd-xgbe-phy.txt17
-rw-r--r--Documentation/devicetree/bindings/net/amd-xgbe.txt34
-rw-r--r--Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt2
-rw-r--r--Documentation/devicetree/bindings/net/broadcom-systemport.txt29
-rw-r--r--Documentation/devicetree/bindings/net/can/xilinx_can.txt44
-rw-r--r--Documentation/devicetree/bindings/net/cpsw-phy-sel.txt4
-rw-r--r--Documentation/devicetree/bindings/net/fixed-link.txt42
-rw-r--r--Documentation/devicetree/bindings/net/fsl-tsec-phy.txt5
-rw-r--r--Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt36
-rw-r--r--Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt23
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ks8851.txt15
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ksz9021.txt49
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ksz90x1.txt83
-rw-r--r--Documentation/devicetree/bindings/net/nfc/pn544.txt35
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st21nfca.txt33
-rw-r--r--Documentation/devicetree/bindings/net/nfc/trf7970a.txt2
-rw-r--r--Documentation/devicetree/bindings/net/via-rhine.txt17
-rw-r--r--Documentation/driver-model/devres.txt5
-rw-r--r--Documentation/networking/bonding.txt44
-rw-r--r--Documentation/networking/can.txt35
-rw-r--r--Documentation/networking/cdc_mbim.txt339
-rw-r--r--Documentation/networking/filter.txt423
-rw-r--r--MAINTAINERS19
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi2
-rw-r--r--arch/arm/boot/dts/am4372.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-xp-matrix.dts4
-rw-r--r--arch/arm/boot/dts/vt8500.dtsi6
-rw-r--r--arch/arm/boot/dts/wm8650.dtsi6
-rw-r--r--arch/arm/boot/dts/wm8850.dtsi6
-rw-r--r--arch/arm/mach-tegra/board-paz00.c4
-rw-r--r--arch/arm/net/bpf_jit_32.c139
-rw-r--r--arch/mips/bcm47xx/sprom.c1
-rw-r--r--arch/powerpc/net/bpf_jit_64.S2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c157
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c32
-rw-r--r--arch/s390/net/bpf_jit_comp.c163
-rw-r--r--arch/sparc/include/asm/checksum_32.h12
-rw-r--r--arch/sparc/include/asm/checksum_64.h12
-rw-r--r--arch/sparc/net/bpf_jit_comp.c162
-rw-r--r--arch/x86/include/asm/checksum_64.h9
-rw-r--r--arch/x86/net/bpf_jit.S77
-rw-r--r--arch/x86/net/bpf_jit_comp.c1399
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/idt77252.c8
-rw-r--r--drivers/bluetooth/ath3k.c3
-rw-r--r--drivers/bluetooth/btmrvl_drv.h4
-rw-r--r--drivers/bluetooth/btmrvl_main.c19
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c103
-rw-r--r--drivers/bluetooth/btmrvl_sdio.h3
-rw-r--r--drivers/bluetooth/btusb.c155
-rw-r--r--drivers/bluetooth/hci_h4.c7
-rw-r--r--drivers/clk/ti/clk-43xx.c16
-rw-r--r--drivers/hv/channel_mgmt.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h6
-rw-r--r--drivers/hv/vmbus_drv.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c127
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c2
-rw-r--r--drivers/isdn/capi/Kconfig18
-rw-r--r--drivers/isdn/capi/capi.c4
-rw-r--r--drivers/isdn/capi/capidrv.c195
-rw-r--r--drivers/isdn/capi/capiutil.c200
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c111
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c4
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c14
-rw-r--r--drivers/mmc/host/mmc_spi.c18
-rw-r--r--drivers/net/bonding/bond_3ad.c62
-rw-r--r--drivers/net/bonding/bond_alb.c153
-rw-r--r--drivers/net/bonding/bond_alb.h1
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c218
-rw-r--r--drivers/net/bonding/bond_netlink.c8
-rw-r--r--drivers/net/bonding/bond_options.c66
-rw-r--r--drivers/net/bonding/bond_options.h2
-rw-r--r--drivers/net/bonding/bond_procfs.c16
-rw-r--r--drivers/net/bonding/bond_sysfs.c567
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/bonding/bonding.h143
-rw-r--r--drivers/net/can/Kconfig30
-rw-r--r--drivers/net/can/Makefile4
-rw-r--r--drivers/net/can/c_can/c_can.c15
-rw-r--r--drivers/net/can/c_can/c_can.h8
-rw-r--r--drivers/net/can/c_can/c_can_pci.c78
-rw-r--r--drivers/net/can/c_can/c_can_platform.c84
-rw-r--r--drivers/net/can/mscan/Kconfig2
-rw-r--r--drivers/net/can/rcar_can.c876
-rw-r--r--drivers/net/can/softing/softing_main.c20
-rw-r--r--drivers/net/can/spi/Kconfig10
-rw-r--r--drivers/net/can/spi/Makefile8
-rw-r--r--drivers/net/can/spi/mcp251x.c (renamed from drivers/net/can/mcp251x.c)95
-rw-r--r--drivers/net/can/usb/Kconfig12
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/gs_usb.c971
-rw-r--r--drivers/net/can/usb/kvaser_usb.c53
-rw-r--r--drivers/net/can/xilinx_can.c1208
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c2
-rw-r--r--drivers/net/dsa/mv88e6131.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx.c12
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/typhoon.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c4
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c2
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c2
-rw-r--r--drivers/net/ethernet/amd/Kconfig14
-rw-r--r--drivers/net/ethernet/amd/Makefile1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c3
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/hplance.c4
-rw-r--r--drivers/net/ethernet/amd/mvme147.c6
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h1007
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c375
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c556
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2182
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c1351
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c510
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c512
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c433
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h676
-rw-r--r--drivers/net/ethernet/arc/emac_main.c49
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c6
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1654
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h678
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h4
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c29
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c21
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c104
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c6
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c263
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h32
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c67
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c323
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c66
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c6
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c20
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c610
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h85
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c194
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c581
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fec.h13
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c661
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c22
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c14
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c4
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig27
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile5
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c1066
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c5
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h41
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c88
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h137
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c141
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c311
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1124
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c131
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h53
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c209
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c61
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h164
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h12
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c84
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h67
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c423
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c185
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c62
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c121
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h72
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h100
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h96
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c134
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c56
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h49
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c66
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h58
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h49
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c151
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c47
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c203
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c60
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c89
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c85
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c80
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c96
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c356
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c68
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c164
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c66
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h62
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c376
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c18
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c324
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c278
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c87
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c5
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c15
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c22
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c16
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c8
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c6
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h36
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c44
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c42
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c92
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c69
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c192
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c171
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c68
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c47
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c50
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c10
-rw-r--r--drivers/net/ethernet/sfc/io.h7
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c3
-rw-r--r--drivers/net/ethernet/sfc/tx.c22
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c17
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c62
-rw-r--r--drivers/net/ethernet/ti/cpsw.c114
-rw-r--r--drivers/net/ethernet/ti/cpts.c11
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c39
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c50
-rw-r--r--drivers/net/ethernet/tile/tilegx.c13
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/via/Kconfig2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c511
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h163
-rw-r--r--drivers/net/hyperv/netvsc.c529
-rw-r--r--drivers/net/hyperv/netvsc_drv.c129
-rw-r--r--drivers/net/hyperv/rndis_filter.c193
-rw-r--r--drivers/net/ieee802154/at86rf230.c133
-rw-r--r--drivers/net/ieee802154/fakelb.c6
-rw-r--r--drivers/net/ieee802154/mrf24j40.c33
-rw-r--r--drivers/net/irda/Kconfig3
-rw-r--r--drivers/net/irda/via-ircc.c7
-rw-r--r--drivers/net/irda/w83977af_ir.c33
-rw-r--r--drivers/net/macvlan.c262
-rw-r--r--drivers/net/ntb_netdev.c3
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c1357
-rw-r--r--drivers/net/phy/at803x.c39
-rw-r--r--drivers/net/phy/fixed.c81
-rw-r--r--drivers/net/phy/mdio_bus.c73
-rw-r--r--drivers/net/phy/micrel.c106
-rw-r--r--drivers/net/phy/phy_device.c50
-rw-r--r--drivers/net/phy/realtek.c88
-rw-r--r--drivers/net/phy/smsc.c3
-rw-r--r--drivers/net/phy/vitesse.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/team/team_mode_loadbalance.c12
-rw-r--r--drivers/net/tun.c54
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c129
-rw-r--r--drivers/net/usb/cdc_ncm.c740
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c13
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c6
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/virtio_net.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c14
-rw-r--r--drivers/net/vxlan.c187
-rw-r--r--drivers/net/wan/farsync.c31
-rw-r--r--drivers/net/wan/sdla.c4
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c7
-rw-r--r--drivers/net/wireless/at76c50x-usb.c180
-rw-r--r--drivers/net/wireless/at76c50x-usb.h26
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c383
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c366
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h26
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c109
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h37
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c587
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c990
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c336
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c183
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c90
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h104
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig30
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c23
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c31
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c21
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c253
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.h72
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c214
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h44
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c555
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c163
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c8
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c45
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c43
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c28
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h11
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c36
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h50
-rw-r--r--drivers/net/wireless/b43/Kconfig42
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/bus.h10
-rw-r--r--drivers/net/wireless/b43/main.c498
-rw-r--r--drivers/net/wireless/b43/phy_common.c96
-rw-r--r--drivers/net/wireless/b43/phy_common.h8
-rw-r--r--drivers/net/wireless/b43/phy_g.c6
-rw-r--r--drivers/net/wireless/b43/phy_n.c321
-rw-r--r--drivers/net/wireless/b43/phy_n.h1
-rw-r--r--drivers/net/wireless/b43/radio_2056.c1336
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c150
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h3
-rw-r--r--drivers/net/wireless/b43/wa.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c18
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c283
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.c332
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.h (renamed from drivers/net/wireless/brcm80211/brcmfmac/nvram.h)24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c80
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/nvram.c94
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c270
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c213
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c13
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/d11.c93
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_d11.h14
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h1
-rw-r--r--drivers/net/wireless/cw1200/sta.c3
-rw-r--r--drivers/net/wireless/cw1200/sta.h3
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.c3
-rw-r--r--drivers/net/wireless/iwlegacy/common.h3
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c19
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c39
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c18
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c29
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h41
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h (renamed from drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h)32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h35
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c73
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h60
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c114
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c129
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c61
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h38
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h46
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h54
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c119
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c204
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h60
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c102
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c97
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c23
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c410
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c491
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c45
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c189
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c71
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c87
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c14
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h33
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c86
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c137
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c197
-rw-r--r--drivers/net/wireless/libertas/cfg.c7
-rw-r--r--drivers/net/wireless/libertas/defs.h3
-rw-r--r--drivers/net/wireless/libertas/rx.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n.c45
-rw-r--r--drivers/net/wireless/mwifiex/11n.h3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c28
-rw-r--r--drivers/net/wireless/mwifiex/README7
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c19
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c25
-rw-r--r--drivers/net/wireless/mwifiex/decl.h8
-rw-r--r--drivers/net/wireless/mwifiex/fw.h25
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h2
-rw-r--r--drivers/net/wireless/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.h26
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c4
-rw-r--r--drivers/net/wireless/mwifiex/scan.c66
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c15
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h18
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c7
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c18
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c44
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c16
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c4
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c97
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c8
-rw-r--r--drivers/net/wireless/mwifiex/usb.c55
-rw-r--r--drivers/net/wireless/mwifiex/util.c6
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c22
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h5
-rw-r--r--drivers/net/wireless/orinoco/hw.c4
-rw-r--r--drivers/net/wireless/orinoco/hw.h4
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/orinoco/wext.c4
-rw-r--r--drivers/net/wireless/p54/main.c3
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/Makefile4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c27
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c11
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h6
-rw-r--r--drivers/net/wireless/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h6
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c5
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c68
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c44
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c28
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c69
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h4
-rw-r--r--drivers/net/xen-netback/common.h107
-rw-r--r--drivers/net/xen-netback/interface.c523
-rw-r--r--drivers/net/xen-netback/netback.c754
-rw-r--r--drivers/net/xen-netback/xenbus.c182
-rw-r--r--drivers/net/xen-netfront.c1123
-rw-r--r--drivers/nfc/Kconfig1
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/pn544/i2c.c154
-rw-r--r--drivers/nfc/port100.c36
-rw-r--r--drivers/nfc/st21nfca/Kconfig23
-rw-r--r--drivers/nfc/st21nfca/Makefile8
-rw-r--r--drivers/nfc/st21nfca/i2c.c724
-rw-r--r--drivers/nfc/st21nfca/st21nfca.c698
-rw-r--r--drivers/nfc/st21nfca/st21nfca.h87
-rw-r--r--drivers/nfc/trf7970a.c261
-rw-r--r--drivers/of/of_mdio.c206
-rw-r--r--drivers/ptp/ptp_clock.c5
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/ctcm_sysfs.c14
-rw-r--r--drivers/s390/net/lcs.c15
-rw-r--r--drivers/s390/net/qeth_core.h6
-rw-r--r--drivers/s390/net/qeth_core_main.c100
-rw-r--r--drivers/s390/net/qeth_core_sys.c22
-rw-r--r--drivers/s390/net/qeth_l2_main.c18
-rw-r--r--drivers/s390/net/qeth_l3_main.c21
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/staging/et131x/et131x.c2
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c2
-rw-r--r--drivers/staging/netlogic/xlr_net.c2
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/staging/rtl8192ee/core.c4
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c8
-rw-r--r--drivers/staging/rtl8821ae/core.c4
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c4
-rw-r--r--drivers/usb/gadget/u_ether.c4
-rw-r--r--include/linux/ath9k_platform.h2
-rw-r--r--include/linux/can/core.h6
-rw-r--r--include/linux/can/dev.h6
-rw-r--r--include/linux/can/led.h6
-rw-r--r--include/linux/can/platform/cc770.h6
-rw-r--r--include/linux/can/platform/mcp251x.h6
-rw-r--r--include/linux/can/platform/rcar_can.h17
-rw-r--r--include/linux/can/platform/sja1000.h6
-rw-r--r--include/linux/can/platform/ti_hecc.h6
-rw-r--r--include/linux/can/skb.h6
-rw-r--r--include/linux/cpumask.h8
-rw-r--r--include/linux/crc7.h8
-rw-r--r--include/linux/ethtool.h21
-rw-r--r--include/linux/filter.h384
-rw-r--r--include/linux/ieee80211.h1
-rw-r--r--include/linux/if_bridge.h19
-rw-r--r--include/linux/if_link.h3
-rw-r--r--include/linux/if_macvlan.h3
-rw-r--r--include/linux/if_vlan.h4
-rw-r--r--include/linux/isdn/capiutil.h5
-rw-r--r--include/linux/ktime.h24
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/netdev_features.h3
-rw-r--r--include/linux/netdevice.h99
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h8
-rw-r--r--include/linux/netlink.h3
-rw-r--r--include/linux/nl802154.h31
-rw-r--r--include/linux/of_mdio.h29
-rw-r--r--include/linux/phy.h8
-rw-r--r--include/linux/phy_fixed.h16
-rw-r--r--include/linux/platform_data/st21nfca.h32
-rw-r--r--include/linux/rfkill-gpio.h10
-rw-r--r--include/linux/skbuff.h151
-rw-r--r--include/linux/spi/at86rf230.h14
-rw-r--r--include/linux/ssb/ssb.h1
-rw-r--r--include/linux/tcp.h10
-rw-r--r--include/linux/udp.h24
-rw-r--r--include/linux/usb/cdc_ncm.h35
-rw-r--r--include/net/6lowpan.h1
-rw-r--r--include/net/addrconf.h5
-rw-r--r--include/net/af_ieee802154.h10
-rw-r--r--include/net/bluetooth/hci.h22
-rw-r--r--include/net/bluetooth/hci_core.h24
-rw-r--r--include/net/bluetooth/mgmt.h15
-rw-r--r--include/net/bluetooth/rfcomm.h6
-rw-r--r--include/net/cfg80211.h275
-rw-r--r--include/net/checksum.h2
-rw-r--r--include/net/dsa.h5
-rw-r--r--include/net/gre.h5
-rw-r--r--include/net/ieee802154.h9
-rw-r--r--include/net/ieee802154_netdev.h187
-rw-r--r--include/net/inet_ecn.h2
-rw-r--r--include/net/inet_hashtables.h8
-rw-r--r--include/net/inet_sock.h10
-rw-r--r--include/net/inetpeer.h17
-rw-r--r--include/net/ip.h83
-rw-r--r--include/net/ip6_checksum.h19
-rw-r--r--include/net/ip6_route.h2
-rw-r--r--include/net/ipv6.h24
-rw-r--r--include/net/mac80211.h155
-rw-r--r--include/net/net_namespace.h8
-rw-r--r--include/net/netfilter/nf_nat.h2
-rw-r--r--include/net/netfilter/nf_tables.h130
-rw-r--r--include/net/netfilter/nft_meta.h36
-rw-r--r--include/net/netns/ipv4.h7
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/nfc/digital.h4
-rw-r--r--include/net/nfc/hci.h1
-rw-r--r--include/net/nfc/nfc.h3
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/protocol.h1
-rw-r--r--include/net/regulatory.h6
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/structs.h6
-rw-r--r--include/net/secure_seq.h2
-rw-r--r--include/net/snmp.h32
-rw-r--r--include/net/sock.h6
-rw-r--r--include/net/tcp.h47
-rw-r--r--include/net/tso.h20
-rw-r--r--include/net/udp.h18
-rw-r--r--include/net/vxlan.h14
-rw-r--r--include/net/xfrm.h57
-rw-r--r--include/uapi/linux/audit.h8
-rw-r--r--include/uapi/linux/can.h6
-rw-r--r--include/uapi/linux/can/bcm.h6
-rw-r--r--include/uapi/linux/can/error.h6
-rw-r--r--include/uapi/linux/can/gw.h6
-rw-r--r--include/uapi/linux/can/netlink.h6
-rw-r--r--include/uapi/linux/can/raw.h6
-rw-r--r--include/uapi/linux/capability.h7
-rw-r--r--include/uapi/linux/ethtool.h35
-rw-r--r--include/uapi/linux/filter.h3
-rw-r--r--include/uapi/linux/if_fddi.h90
-rw-r--r--include/uapi/linux/if_link.h12
-rw-r--r--include/uapi/linux/if_tunnel.h2
-rw-r--r--include/uapi/linux/l2tp.h2
-rw-r--r--include/uapi/linux/neighbour.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h37
-rw-r--r--include/uapi/linux/netfilter/nfnetlink.h2
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_acct.h9
-rw-r--r--include/uapi/linux/nfc.h16
-rw-r--r--include/uapi/linux/nl80211.h67
-rw-r--r--include/uapi/linux/openvswitch.h4
-rw-r--r--include/uapi/linux/tipc.h23
-rw-r--r--include/uapi/linux/tipc_config.h10
-rw-r--r--include/uapi/linux/udp.h2
-rw-r--r--include/xen/interface/io/netif.h53
-rw-r--r--kernel/audit.c64
-rw-r--r--kernel/seccomp.c110
-rw-r--r--kernel/sysctl.c4
-rw-r--r--lib/Kconfig.debug13
-rw-r--r--lib/Makefile1
-rw-r--r--lib/cpumask.c63
-rw-r--r--lib/crc7.c84
-rw-r--r--lib/test_bpf.c1929
-rw-r--r--net/8021q/vlan_core.c6
-rw-r--r--net/8021q/vlan_dev.c62
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/atm/svc.c10
-rw-r--r--net/batman-adv/debugfs.c13
-rw-r--r--net/batman-adv/distributed-arp-table.c3
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/network-coding.c3
-rw-r--r--net/batman-adv/soft-interface.c2
-rw-r--r--net/batman-adv/sysfs.c14
-rw-r--r--net/bluetooth/6lowpan.c65
-rw-r--r--net/bluetooth/hci_conn.c84
-rw-r--r--net/bluetooth/hci_core.c84
-rw-r--r--net/bluetooth/hci_event.c311
-rw-r--r--net/bluetooth/hci_sock.c17
-rw-r--r--net/bluetooth/l2cap_core.c6
-rw-r--r--net/bluetooth/l2cap_sock.c5
-rw-r--r--net/bluetooth/lib.c1
-rw-r--r--net/bluetooth/mgmt.c278
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c20
-rw-r--r--net/bluetooth/smp.c160
-rw-r--r--net/bluetooth/smp.h30
-rw-r--r--net/bridge/Makefile4
-rw-r--r--net/bridge/br.c98
-rw-r--r--net/bridge/br_device.c16
-rw-r--r--net/bridge/br_fdb.c134
-rw-r--r--net/bridge/br_if.c126
-rw-r--r--net/bridge/br_input.c8
-rw-r--r--net/bridge/br_mdb.c4
-rw-r--r--net/bridge/br_multicast.c382
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/bridge/br_netlink.c3
-rw-r--r--net/bridge/br_notify.c118
-rw-r--r--net/bridge/br_private.h75
-rw-r--r--net/bridge/br_sysfs_br.c26
-rw-r--r--net/bridge/br_sysfs_if.c30
-rw-r--r--net/bridge/br_vlan.c154
-rw-r--r--net/bridge/netfilter/Kconfig17
-rw-r--r--net/bridge/netfilter/Makefile1
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c139
-rw-r--r--net/can/af_can.c31
-rw-r--r--net/can/af_can.h9
-rw-r--r--net/can/proc.c76
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/datagram.c14
-rw-r--r--net/core/dev.c98
-rw-r--r--net/core/dev_addr_lists.c85
-rw-r--r--net/core/ethtool.c215
-rw-r--r--net/core/filter.c1292
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/pktgen.c50
-rw-r--r--net/core/ptp_classifier.c4
-rw-r--r--net/core/rtnetlink.c73
-rw-r--r--net/core/secure_seq.c25
-rw-r--r--net/core/skbuff.c28
-rw-r--r--net/core/sock.c4
-rw-r--r--net/core/tso.c77
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/proto.c9
-rw-r--r--net/dccp/sysctl.c3
-rw-r--r--net/dccp/timer.c2
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/dns_resolver/dns_query.c8
-rw-r--r--net/dsa/slave.c2
-rw-r--r--net/ieee802154/6lowpan_rtnl.c207
-rw-r--r--net/ieee802154/dgram.c104
-rw-r--r--net/ieee802154/header_ops.c52
-rw-r--r--net/ieee802154/ieee802154.h19
-rw-r--r--net/ieee802154/netlink.c20
-rw-r--r--net/ieee802154/nl-mac.c809
-rw-r--r--net/ieee802154/nl_policy.c16
-rw-r--r--net/ieee802154/reassembly.c48
-rw-r--r--net/ipv4/af_inet.c110
-rw-r--r--net/ipv4/datagram.c20
-rw-r--r--net/ipv4/devinet.c9
-rw-r--r--net/ipv4/gre_demux.c27
-rw-r--r--net/ipv4/gre_offload.c16
-rw-r--r--net/ipv4/icmp.c23
-rw-r--r--net/ipv4/igmp.c16
-rw-r--r--net/ipv4/inet_connection_sock.c11
-rw-r--r--net/ipv4/inet_hashtables.c6
-rw-r--r--net/ipv4/inetpeer.c20
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_gre.c7
-rw-r--r--net/ipv4/ip_options.c6
-rw-r--r--net/ipv4/ip_output.c22
-rw-r--r--net/ipv4/ip_tunnel.c25
-rw-r--r--net/ipv4/ip_tunnel_core.c10
-rw-r--r--net/ipv4/ip_vti.c8
-rw-r--r--net/ipv4/ipip.c5
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/netfilter/iptable_nat.c14
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nft_chain_nat_ipv4.c12
-rw-r--r--net/ipv4/proc.c24
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c52
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c45
-rw-r--r--net/ipv4/tcp.c8
-rw-r--r--net/ipv4/tcp_bic.c5
-rw-r--r--net/ipv4/tcp_cong.c24
-rw-r--r--net/ipv4/tcp_cubic.c5
-rw-r--r--net/ipv4/tcp_fastopen.c219
-rw-r--r--net/ipv4/tcp_highspeed.c4
-rw-r--r--net/ipv4/tcp_htcp.c4
-rw-r--r--net/ipv4/tcp_hybla.c7
-rw-r--r--net/ipv4/tcp_illinois.c5
-rw-r--r--net/ipv4/tcp_input.c36
-rw-r--r--net/ipv4/tcp_ipv4.c303
-rw-r--r--net/ipv4/tcp_lp.c5
-rw-r--r--net/ipv4/tcp_metrics.c5
-rw-r--r--net/ipv4/tcp_minisocks.c31
-rw-r--r--net/ipv4/tcp_offload.c9
-rw-r--r--net/ipv4/tcp_output.c126
-rw-r--r--net/ipv4/tcp_scalable.c5
-rw-r--r--net/ipv4/tcp_vegas.c7
-rw-r--r--net/ipv4/tcp_veno.c9
-rw-r--r--net/ipv4/tcp_yeah.c5
-rw-r--r--net/ipv4/udp.c135
-rw-r--r--net/ipv4/udp_offload.c8
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c2
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv6/addrconf.c49
-rw-r--r--net/ipv6/addrconf_core.c2
-rw-r--r--net/ipv6/af_inet6.c45
-rw-r--r--net/ipv6/icmp.c41
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_checksum.c63
-rw-r--r--net/ipv6/ip6_fib.c12
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/ipv6/ip6_gre.c64
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_output.c24
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/ip6_vti.c3
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/netfilter/nft_chain_nat_ipv6.c12
-rw-r--r--net/ipv6/output_core.c27
-rw-r--r--net/ipv6/ping.c8
-rw-r--r--net/ipv6/proc.c14
-rw-r--r--net/ipv6/raw.c11
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/sit.c5
-rw-r--r--net/ipv6/syncookies.c4
-rw-r--r--net/ipv6/sysctl_net_ipv6.c7
-rw-r--r--net/ipv6/tcp_ipv6.c86
-rw-r--r--net/ipv6/udp.c66
-rw-r--r--net/ipv6/udp_offload.c5
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipv6/xfrm6_output.c6
-rw-r--r--net/ipx/af_ipx.c2
-rw-r--r--net/ipx/ipx_route.c3
-rw-r--r--net/iucv/af_iucv.c32
-rw-r--r--net/key/af_key.c34
-rw-r--r--net/l2tp/l2tp_core.c118
-rw-r--r--net/l2tp/l2tp_core.h4
-rw-r--r--net/l2tp/l2tp_ip.c1
-rw-r--r--net/l2tp/l2tp_ip6.c11
-rw-r--r--net/l2tp/l2tp_netlink.c10
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/aes_ccm.c37
-rw-r--r--net/mac80211/cfg.c591
-rw-r--r--net/mac80211/chan.c614
-rw-r--r--net/mac80211/debugfs.c2
-rw-r--r--net/mac80211/debugfs.h2
-rw-r--r--net/mac80211/debugfs_netdev.c6
-rw-r--r--net/mac80211/debugfs_netdev.h2
-rw-r--r--net/mac80211/driver-ops.h178
-rw-r--r--net/mac80211/ht.c22
-rw-r--r--net/mac80211/ibss.c78
-rw-r--r--net/mac80211/ieee80211_i.h50
-rw-r--r--net/mac80211/iface.c46
-rw-r--r--net/mac80211/key.c7
-rw-r--r--net/mac80211/main.c10
-rw-r--r--net/mac80211/mesh.c38
-rw-r--r--net/mac80211/mesh_hwmp.c5
-rw-r--r--net/mac80211/mesh_pathtbl.c6
-rw-r--r--net/mac80211/mesh_sync.c2
-rw-r--r--net/mac80211/michael.h1
-rw-r--r--net/mac80211/mlme.c50
-rw-r--r--net/mac80211/rc80211_minstrel.c12
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c22
-rw-r--r--net/mac80211/rx.c19
-rw-r--r--net/mac80211/scan.c25
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/status.c25
-rw-r--r--net/mac80211/tdls.c325
-rw-r--r--net/mac80211/trace.h117
-rw-r--r--net/mac80211/tx.c189
-rw-r--r--net/mac80211/util.c191
-rw-r--r--net/mac80211/wpa.c5
-rw-r--r--net/mac802154/Kconfig4
-rw-r--r--net/mac802154/Makefile3
-rw-r--r--net/mac802154/llsec.c1070
-rw-r--r--net/mac802154/llsec.h108
-rw-r--r--net/mac802154/mac802154.h44
-rw-r--r--net/mac802154/mac_cmd.c42
-rw-r--r--net/mac802154/mib.c187
-rw-r--r--net/mac802154/monitor.c3
-rw-r--r--net/mac802154/rx.c13
-rw-r--r--net/mac802154/wpan.c176
-rw-r--r--net/mpls/mpls_gso.c1
-rw-r--r--net/netfilter/ipset/ip_set_core.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c22
-rw-r--r--net/netfilter/nf_nat_core.c24
-rw-r--r--net/netfilter/nf_tables_api.c1269
-rw-r--r--net/netfilter/nfnetlink.c10
-rw-r--r--net/netfilter/nfnetlink_acct.c86
-rw-r--r--net/netfilter/nft_ct.c96
-rw-r--r--net/netfilter/nft_hash.c59
-rw-r--r--net/netfilter/nft_lookup.c10
-rw-r--r--net/netfilter/nft_meta.c103
-rw-r--r--net/netfilter/nft_rbtree.c43
-rw-r--r--net/netfilter/xt_bpf.c5
-rw-r--r--net/netfilter/xt_nfacct.c5
-rw-r--r--net/netfilter/xt_recent.c5
-rw-r--r--net/netlink/af_netlink.c70
-rw-r--r--net/netlink/af_netlink.h6
-rw-r--r--net/netlink/genetlink.c6
-rw-r--r--net/nfc/digital.h1
-rw-r--r--net/nfc/digital_core.c26
-rw-r--r--net/nfc/digital_dep.c5
-rw-r--r--net/nfc/digital_technology.c230
-rw-r--r--net/nfc/hci/command.c6
-rw-r--r--net/nfc/hci/core.c47
-rw-r--r--net/nfc/llcp_commands.c2
-rw-r--r--net/nfc/llcp_core.c13
-rw-r--r--net/nfc/nci/core.c9
-rw-r--r--net/nfc/nci/ntf.c7
-rw-r--r--net/nfc/nfc.h6
-rw-r--r--net/nfc/rawsock.c94
-rw-r--r--net/openvswitch/actions.c4
-rw-r--r--net/openvswitch/datapath.c778
-rw-r--r--net/openvswitch/datapath.h8
-rw-r--r--net/openvswitch/flow.c188
-rw-r--r--net/openvswitch/flow.h53
-rw-r--r--net/openvswitch/flow_netlink.c186
-rw-r--r--net/openvswitch/flow_netlink.h1
-rw-r--r--net/openvswitch/flow_table.c121
-rw-r--r--net/openvswitch/flow_table.h4
-rw-r--r--net/openvswitch/vport-gre.c4
-rw-r--r--net/openvswitch/vport-internal_dev.c2
-rw-r--r--net/openvswitch/vport-vxlan.c7
-rw-r--r--net/openvswitch/vport.h6
-rw-r--r--net/rds/ib_send.c4
-rw-r--r--net/rds/iw_send.c4
-rw-r--r--net/rds/iw_sysctl.c3
-rw-r--r--net/rds/rdma_transport.c2
-rw-r--r--net/rds/sysctl.c3
-rw-r--r--net/rds/tcp_listen.c2
-rw-r--r--net/rfkill/rfkill-gpio.c59
-rw-r--r--net/sched/cls_api.c26
-rw-r--r--net/sched/cls_basic.c10
-rw-r--r--net/sched/cls_bpf.c14
-rw-r--r--net/sched/cls_cgroup.c4
-rw-r--r--net/sched/cls_flow.c4
-rw-r--r--net/sched/cls_fw.c10
-rw-r--r--net/sched/cls_route.c11
-rw-r--r--net/sched/cls_rsvp.h4
-rw-r--r--net/sched/cls_tcindex.c8
-rw-r--r--net/sched/cls_u32.c10
-rw-r--r--net/sched/sch_api.c10
-rw-r--r--net/sched/sch_choke.c7
-rw-r--r--net/sched/sch_drr.c4
-rw-r--r--net/sched/sch_fq.c5
-rw-r--r--net/sched/sch_fq_codel.c7
-rw-r--r--net/sched/sch_hhf.c9
-rw-r--r--net/sched/sch_netem.c7
-rw-r--r--net/sched/sch_sfq.c7
-rw-r--r--net/sctp/associola.c170
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/proc.c2
-rw-r--r--net/sctp/protocol.c11
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/socket.c13
-rw-r--r--net/sctp/sysctl.c21
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/sctp/ulpqueue.c4
-rw-r--r--net/sunrpc/socklib.c3
-rw-r--r--net/sunrpc/xprtsock.c3
-rw-r--r--net/tipc/Makefile2
-rw-r--r--net/tipc/bcast.c194
-rw-r--r--net/tipc/bcast.h9
-rw-r--r--net/tipc/bearer.c153
-rw-r--r--net/tipc/bearer.h47
-rw-r--r--net/tipc/config.c12
-rw-r--r--net/tipc/core.c14
-rw-r--r--net/tipc/core.h10
-rw-r--r--net/tipc/discover.c281
-rw-r--r--net/tipc/discover.h1
-rw-r--r--net/tipc/eth_media.c51
-rw-r--r--net/tipc/handler.c134
-rw-r--r--net/tipc/ib_media.c34
-rw-r--r--net/tipc/link.c216
-rw-r--r--net/tipc/link.h21
-rw-r--r--net/tipc/msg.c55
-rw-r--r--net/tipc/msg.h5
-rw-r--r--net/tipc/name_distr.c78
-rw-r--r--net/tipc/name_distr.h35
-rw-r--r--net/tipc/name_table.c14
-rw-r--r--net/tipc/net.c71
-rw-r--r--net/tipc/net.h4
-rw-r--r--net/tipc/node.c110
-rw-r--r--net/tipc/node.h88
-rw-r--r--net/tipc/node_subscr.c9
-rw-r--r--net/tipc/node_subscr.h2
-rw-r--r--net/tipc/port.c39
-rw-r--r--net/tipc/port.h10
-rw-r--r--net/tipc/socket.c121
-rw-r--r--net/tipc/socket.h4
-rw-r--r--net/unix/af_unix.c8
-rw-r--r--net/wireless/Kconfig37
-rw-r--r--net/wireless/ap.c4
-rw-r--r--net/wireless/chan.c175
-rw-r--r--net/wireless/core.c147
-rw-r--r--net/wireless/core.h53
-rw-r--r--net/wireless/ethtool.c10
-rw-r--r--net/wireless/genregdb.awk14
-rw-r--r--net/wireless/ibss.c43
-rw-r--r--net/wireless/mesh.c32
-rw-r--r--net/wireless/mlme.c38
-rw-r--r--net/wireless/nl80211.c654
-rw-r--r--net/wireless/nl80211.h3
-rw-r--r--net/wireless/rdev-ops.h15
-rw-r--r--net/wireless/reg.c156
-rw-r--r--net/wireless/reg.h18
-rw-r--r--net/wireless/scan.c162
-rw-r--r--net/wireless/sme.c48
-rw-r--r--net/wireless/trace.h66
-rw-r--r--net/wireless/util.c209
-rw-r--r--net/wireless/wext-compat.c40
-rw-r--r--net/wireless/wext-compat.h2
-rw-r--r--net/wireless/wext-sme.c12
-rw-r--r--net/xfrm/xfrm_output.c5
-rw-r--r--net/xfrm/xfrm_policy.c56
-rw-r--r--net/xfrm/xfrm_proc.c3
-rw-r--r--net/xfrm/xfrm_state.c37
-rw-r--r--net/xfrm/xfrm_user.c89
-rw-r--r--security/selinux/include/classmap.h2
-rw-r--r--tools/net/bpf_exp.l1
-rw-r--r--tools/net/bpf_exp.y11
-rw-r--r--tools/net/bpf_jit_disasm.c20
-rw-r--r--tools/testing/selftests/net/Makefile8
1265 files changed, 61457 insertions, 22872 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net
index d922060e455d..416c5d59f52e 100644
--- a/Documentation/ABI/testing/sysfs-class-net
+++ b/Documentation/ABI/testing/sysfs-class-net
@@ -169,6 +169,14 @@ Description:
169 "unknown", "notpresent", "down", "lowerlayerdown", "testing", 169 "unknown", "notpresent", "down", "lowerlayerdown", "testing",
170 "dormant", "up". 170 "dormant", "up".
171 171
172What: /sys/class/net/<iface>/phys_port_id
173Date: July 2013
174KernelVersion: 3.12
175Contact: netdev@vger.kernel.org
176Description:
177 Indicates the interface unique physical port identifier within
178 the NIC, as a string.
179
172What: /sys/class/net/<iface>/speed 180What: /sys/class/net/<iface>/speed
173Date: October 2009 181Date: October 2009
174KernelVersion: 2.6.33 182KernelVersion: 2.6.33
diff --git a/Documentation/ABI/testing/sysfs-class-net-cdc_ncm b/Documentation/ABI/testing/sysfs-class-net-cdc_ncm
new file mode 100644
index 000000000000..5cedf72df358
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-net-cdc_ncm
@@ -0,0 +1,149 @@
1What: /sys/class/net/<iface>/cdc_ncm/min_tx_pkt
2Date: May 2014
3KernelVersion: 3.16
4Contact: Bjørn Mork <bjorn@mork.no>
5Description:
6 The driver will pad NCM Transfer Blocks (NTBs) longer
7 than this to tx_max, allowing the device to receive
8 tx_max sized frames with no terminating short
9 packet. NTBs shorter than this limit are transmitted
10 as-is, without any padding, and are terminated with a
11 short USB packet.
12
13 Padding to tx_max allows the driver to transmit NTBs
14 back-to-back without any interleaving short USB
15 packets. This reduces the number of short packet
16 interrupts in the device, and represents a tradeoff
17 between USB bus bandwidth and device DMA optimization.
18
19 Set to 0 to pad all frames. Set greater than tx_max to
20 disable all padding.
21
22What: /sys/class/net/<iface>/cdc_ncm/rx_max
23Date: May 2014
24KernelVersion: 3.16
25Contact: Bjørn Mork <bjorn@mork.no>
26Description:
27 The maximum NTB size for RX. Cannot exceed the
28 maximum value supported by the device. Must allow at
29 least one max sized datagram plus headers.
30
31 The actual limits are device dependent. See
32 dwNtbInMaxSize.
33
34 Note: Some devices will silently ignore changes to
35 this value, resulting in oversized NTBs and
36 corresponding framing errors.
37
38What: /sys/class/net/<iface>/cdc_ncm/tx_max
39Date: May 2014
40KernelVersion: 3.16
41Contact: Bjørn Mork <bjorn@mork.no>
42Description:
43 The maximum NTB size for TX. Cannot exceed the
44 maximum value supported by the device. Must allow at
45 least one max sized datagram plus headers.
46
47 The actual limits are device dependent. See
48 dwNtbOutMaxSize.
49
50What: /sys/class/net/<iface>/cdc_ncm/tx_timer_usecs
51Date: May 2014
52KernelVersion: 3.16
53Contact: Bjørn Mork <bjorn@mork.no>
54Description:
55 Datagram aggregation timeout in µs. The driver will
56 wait up to 3 times this timeout for more datagrams to
57 aggregate before transmitting an NTB frame.
58
59 Valid range: 5 to 4000000
60
61 Set to 0 to disable aggregation.
62
63The following read-only attributes all represent fields of the
64structure defined in section 6.2.1 "GetNtbParameters" of "Universal
65Serial Bus Communications Class Subclass Specifications for Network
66Control Model Devices" (CDC NCM), Revision 1.0 (Errata 1), November
6724, 2010 from USB Implementers Forum, Inc. The descriptions are
68quoted from table 6-3 of CDC NCM: "NTB Parameter Structure".
69
70What: /sys/class/net/<iface>/cdc_ncm/bmNtbFormatsSupported
71Date: May 2014
72KernelVersion: 3.16
73Contact: Bjørn Mork <bjorn@mork.no>
74Description:
75 Bit 0: 16-bit NTB supported (set to 1)
76 Bit 1: 32-bit NTB supported
77 Bits 2 – 15: reserved (reset to zero; must be ignored by host)
78
79What: /sys/class/net/<iface>/cdc_ncm/dwNtbInMaxSize
80Date: May 2014
81KernelVersion: 3.16
82Contact: Bjørn Mork <bjorn@mork.no>
83Description:
84 IN NTB Maximum Size in bytes
85
86What: /sys/class/net/<iface>/cdc_ncm/wNdpInDivisor
87Date: May 2014
88KernelVersion: 3.16
89Contact: Bjørn Mork <bjorn@mork.no>
90Description:
91 Divisor used for IN NTB Datagram payload alignment
92
93What: /sys/class/net/<iface>/cdc_ncm/wNdpInPayloadRemainder
94Date: May 2014
95KernelVersion: 3.16
96Contact: Bjørn Mork <bjorn@mork.no>
97Description:
98 Remainder used to align input datagram payload within
99 the NTB: (Payload Offset) mod (wNdpInDivisor) =
100 wNdpInPayloadRemainder
101
102What: /sys/class/net/<iface>/cdc_ncm/wNdpInAlignment
103Date: May 2014
104KernelVersion: 3.16
105Contact: Bjørn Mork <bjorn@mork.no>
106Description:
107 NDP alignment modulus for NTBs on the IN pipe. Shall
108 be a power of 2, and shall be at least 4.
109
110What: /sys/class/net/<iface>/cdc_ncm/dwNtbOutMaxSize
111Date: May 2014
112KernelVersion: 3.16
113Contact: Bjørn Mork <bjorn@mork.no>
114Description:
115 OUT NTB Maximum Size
116
117What: /sys/class/net/<iface>/cdc_ncm/wNdpOutDivisor
118Date: May 2014
119KernelVersion: 3.16
120Contact: Bjørn Mork <bjorn@mork.no>
121Description:
122 OUT NTB Datagram alignment modulus
123
124What: /sys/class/net/<iface>/cdc_ncm/wNdpOutPayloadRemainder
125Date: May 2014
126KernelVersion: 3.16
127Contact: Bjørn Mork <bjorn@mork.no>
128Description:
129 Remainder used to align output datagram payload
130 offsets within the NTB: Padding, shall be transmitted
131 as zero by function, and ignored by host. (Payload
132 Offset) mod (wNdpOutDivisor) = wNdpOutPayloadRemainder
133
134What: /sys/class/net/<iface>/cdc_ncm/wNdpOutAlignment
135Date: May 2014
136KernelVersion: 3.16
137Contact: Bjørn Mork <bjorn@mork.no>
138Description:
139 NDP alignment modulus for use in NTBs on the OUT
140 pipe. Shall be a power of 2, and shall be at least 4.
141
142What: /sys/class/net/<iface>/cdc_ncm/wNtbOutMaxDatagrams
143Date: May 2014
144KernelVersion: 3.16
145Contact: Bjørn Mork <bjorn@mork.no>
146Description:
147 Maximum number of datagrams that the host may pack
148 into a single OUT NTB. Zero means that the device
149 imposes no limit.
diff --git a/Documentation/ABI/testing/sysfs-class-net-queues b/Documentation/ABI/testing/sysfs-class-net-queues
new file mode 100644
index 000000000000..5e9aeb91d355
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-net-queues
@@ -0,0 +1,79 @@
1What: /sys/class/<iface>/queues/rx-<queue>/rps_cpus
2Date: March 2010
3KernelVersion: 2.6.35
4Contact: netdev@vger.kernel.org
5Description:
6 Mask of the CPU(s) currently enabled to participate into the
7 Receive Packet Steering packet processing flow for this
8 network device queue. Possible values depend on the number
9 of available CPU(s) in the system.
10
11What: /sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt
12Date: April 2010
13KernelVersion: 2.6.35
14Contact: netdev@vger.kernel.org
15Description:
16 Number of Receive Packet Steering flows being currently
17 processed by this particular network device receive queue.
18
19What: /sys/class/<iface>/queues/tx-<queue>/tx_timeout
20Date: November 2011
21KernelVersion: 3.3
22Contact: netdev@vger.kernel.org
23Description:
24 Indicates the number of transmit timeout events seen by this
25 network interface transmit queue.
26
27What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus
28Date: November 2010
29KernelVersion: 2.6.38
30Contact: netdev@vger.kernel.org
31Description:
32 Mask of the CPU(s) currently enabled to participate into the
33 Transmit Packet Steering packet processing flow for this
34 network device transmit queue. Possible vaules depend on the
35 number of available CPU(s) in the system.
36
37What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
38Date: November 2011
39KernelVersion: 3.3
40Contact: netdev@vger.kernel.org
41Description:
42 Indicates the hold time in milliseconds to measure the slack
43 of this particular network device transmit queue.
44 Default value is 1000.
45
46What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
47Date: November 2011
48KernelVersion: 3.3
49Contact: netdev@vger.kernel.org
50Description:
51 Indicates the number of bytes (objects) in flight on this
52 network device transmit queue.
53
54What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit
55Date: November 2011
56KernelVersion: 3.3
57Contact: netdev@vger.kernel.org
58Description:
59 Indicates the current limit of bytes allowed to be queued
60 on this network device transmit queue. This value is clamped
61 to be within the bounds defined by limit_max and limit_min.
62
63What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
64Date: November 2011
65KernelVersion: 3.3
66Contact: netdev@vger.kernel.org
67Description:
68 Indicates the absolute maximum limit of bytes allowed to be
69 queued on this network device transmit queue. See
70 include/linux/dynamic_queue_limits.h for the default value.
71
72What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
73Date: November 2011
74KernelVersion: 3.3
75Contact: netdev@vger.kernel.org
76Description:
77 Indicates the absolute minimum limit of bytes allowed to be
78 queued on this network device transmit queue. Default value is
79 0.
diff --git a/Documentation/ABI/testing/sysfs-class-net-statistics b/Documentation/ABI/testing/sysfs-class-net-statistics
new file mode 100644
index 000000000000..397118de7b5e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-net-statistics
@@ -0,0 +1,201 @@
1What: /sys/class/<iface>/statistics/collisions
2Date: April 2005
3KernelVersion: 2.6.12
4Contact: netdev@vger.kernel.org
5Description:
6 Indicates the number of collisions seen by this network device.
7 This value might not be relevant with all MAC layers.
8
9What: /sys/class/<iface>/statistics/multicast
10Date: April 2005
11KernelVersion: 2.6.12
12Contact: netdev@vger.kernel.org
13Description:
14 Indicates the number of multicast packets received by this
15 network device.
16
17What: /sys/class/<iface>/statistics/rx_bytes
18Date: April 2005
19KernelVersion: 2.6.12
20Contact: netdev@vger.kernel.org
21Description:
22 Indicates the number of bytes received by this network device.
23 See the network driver for the exact meaning of when this
24 value is incremented.
25
26What: /sys/class/<iface>/statistics/rx_compressed
27Date: April 2005
28KernelVersion: 2.6.12
29Contact: netdev@vger.kernel.org
30Description:
31 Indicates the number of compressed packets received by this
32 network device. This value might only be relevant for interfaces
33 that support packet compression (e.g: PPP).
34
35What: /sys/class/<iface>/statistics/rx_crc_errors
36Date: April 2005
37KernelVersion: 2.6.12
38Contact: netdev@vger.kernel.org
39Description:
40 Indicates the number of packets received with a CRC (FCS) error
41 by this network device. Note that the specific meaning might
42 depend on the MAC layer used by the interface.
43
44What: /sys/class/<iface>/statistics/rx_dropped
45Date: April 2005
46KernelVersion: 2.6.12
47Contact: netdev@vger.kernel.org
48Description:
49 Indicates the number of packets received by the network device
50 but dropped, that are not forwarded to the upper layers for
51 packet processing. See the network driver for the exact
52 meaning of this value.
53
54What: /sys/class/<iface>/statistics/rx_fifo_errors
55Date: April 2005
56KernelVersion: 2.6.12
57Contact: netdev@vger.kernel.org
58Description:
59 Indicates the number of receive FIFO errors seen by this
60 network device. See the network driver for the exact
61 meaning of this value.
62
63What: /sys/class/<iface>/statistics/rx_frame_errors
64Date: April 2005
65KernelVersion: 2.6.12
66Contact: netdev@vger.kernel.org
67Description:
68 Indicates the number of received frames with error, such as
69 alignment errors. Note that the specific meaning depends on
70 on the MAC layer protocol used. See the network driver for
71 the exact meaning of this value.
72
73What: /sys/class/<iface>/statistics/rx_length_errors
74Date: April 2005
75KernelVersion: 2.6.12
76Contact: netdev@vger.kernel.org
77Description:
78 Indicates the number of received error packet with a length
79 error, oversized or undersized. See the network driver for the
80 exact meaning of this value.
81
82What: /sys/class/<iface>/statistics/rx_missed_errors
83Date: April 2005
84KernelVersion: 2.6.12
85Contact: netdev@vger.kernel.org
86Description:
87 Indicates the number of received packets that have been missed
88 due to lack of capacity in the receive side. See the network
89 driver for the exact meaning of this value.
90
91What: /sys/class/<iface>/statistics/rx_over_errors
92Date: April 2005
93KernelVersion: 2.6.12
94Contact: netdev@vger.kernel.org
95Description:
96 Indicates the number of received packets that are oversized
97 compared to what the network device is configured to accept
98 (e.g: larger than MTU). See the network driver for the exact
99 meaning of this value.
100
101What: /sys/class/<iface>/statistics/rx_packets
102Date: April 2005
103KernelVersion: 2.6.12
104Contact: netdev@vger.kernel.org
105Description:
106 Indicates the total number of good packets received by this
107 network device.
108
109What: /sys/class/<iface>/statistics/tx_aborted_errors
110Date: April 2005
111KernelVersion: 2.6.12
112Contact: netdev@vger.kernel.org
113Description:
114 Indicates the number of packets that have been aborted
115 during transmission by a network device (e.g: because of
116 a medium collision). See the network driver for the exact
117 meaning of this value.
118
119What: /sys/class/<iface>/statistics/tx_bytes
120Date: April 2005
121KernelVersion: 2.6.12
122Contact: netdev@vger.kernel.org
123Description:
124 Indicates the number of bytes transmitted by a network
125 device. See the network driver for the exact meaning of this
126 value, in particular whether this accounts for all successfully
127 transmitted packets or all packets that have been queued for
128 transmission.
129
130What: /sys/class/<iface>/statistics/tx_carrier_errors
131Date: April 2005
132KernelVersion: 2.6.12
133Contact: netdev@vger.kernel.org
134Description:
135 Indicates the number of packets that could not be transmitted
136 because of carrier errors (e.g: physical link down). See the
137 network driver for the exact meaning of this value.
138
139What: /sys/class/<iface>/statistics/tx_compressed
140Date: April 2005
141KernelVersion: 2.6.12
142Contact: netdev@vger.kernel.org
143Description:
144 Indicates the number of transmitted compressed packets. Note
145 this might only be relevant for devices that support
146 compression (e.g: PPP).
147
148What: /sys/class/<iface>/statistics/tx_dropped
149Date: April 2005
150KernelVersion: 2.6.12
151Contact: netdev@vger.kernel.org
152Description:
153 Indicates the number of packets dropped during transmission.
154 See the driver for the exact reasons as to why the packets were
155 dropped.
156
157What: /sys/class/<iface>/statistics/tx_errors
158Date: April 2005
159KernelVersion: 2.6.12
160Contact: netdev@vger.kernel.org
161Description:
162 Indicates the number of packets in error during transmission by
163 a network device. See the driver for the exact reasons as to
164 why the packets were dropped.
165
166What: /sys/class/<iface>/statistics/tx_fifo_errors
167Date: April 2005
168KernelVersion: 2.6.12
169Contact: netdev@vger.kernel.org
170Description:
171 Indicates the number of packets having caused a transmit
172 FIFO error. See the driver for the exact reasons as to why the
173 packets were dropped.
174
175What: /sys/class/<iface>/statistics/tx_heartbeat_errors
176Date: April 2005
177KernelVersion: 2.6.12
178Contact: netdev@vger.kernel.org
179Description:
180 Indicates the number of packets transmitted that have been
181 reported as heartbeat errors. See the driver for the exact
182 reasons as to why the packets were dropped.
183
184What: /sys/class/<iface>/statistics/tx_packets
185Date: April 2005
186KernelVersion: 2.6.12
187Contact: netdev@vger.kernel.org
188Description:
189 Indicates the number of packets transmitted by a network
190 device. See the driver for whether this reports the number of all
191 attempted or successful transmissions.
192
193What: /sys/class/<iface>/statistics/tx_window_errors
194Date: April 2005
195KernelVersion: 2.6.12
196Contact: netdev@vger.kernel.org
197Description:
198 Indicates the number of packets not successfully transmitted
199 due to a window collision. The specific meaning depends on the
200 MAC layer used. On Ethernet this is usually used to report
201 late collisions errors.
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index 044b76436e83..d9b9416c989f 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -100,6 +100,7 @@
100!Finclude/net/cfg80211.h wdev_priv 100!Finclude/net/cfg80211.h wdev_priv
101!Finclude/net/cfg80211.h ieee80211_iface_limit 101!Finclude/net/cfg80211.h ieee80211_iface_limit
102!Finclude/net/cfg80211.h ieee80211_iface_combination 102!Finclude/net/cfg80211.h ieee80211_iface_combination
103!Finclude/net/cfg80211.h cfg80211_check_combinations
103 </chapter> 104 </chapter>
104 <chapter> 105 <chapter>
105 <title>Actions and configuration</title> 106 <title>Actions and configuration</title>
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
new file mode 100644
index 000000000000..d01ed63d3ebb
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
@@ -0,0 +1,17 @@
1* AMD 10GbE PHY driver (amd-xgbe-phy)
2
3Required properties:
4- compatible: Should be "amd,xgbe-phy-seattle-v1a" and
5 "ethernet-phy-ieee802.3-c45"
6- reg: Address and length of the register sets for the device
7 - SerDes Rx/Tx registers
8 - SerDes integration registers (1/2)
9 - SerDes integration registers (2/2)
10
11Example:
12 xgbe_phy@e1240800 {
13 compatible = "amd,xgbe-phy-seattle-v1a", "ethernet-phy-ieee802.3-c45";
14 reg = <0 0xe1240800 0 0x00400>,
15 <0 0xe1250000 0 0x00060>,
16 <0 0xe1250080 0 0x00004>;
17 };
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe.txt b/Documentation/devicetree/bindings/net/amd-xgbe.txt
new file mode 100644
index 000000000000..ea0c7908a3b8
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/amd-xgbe.txt
@@ -0,0 +1,34 @@
1* AMD 10GbE driver (amd-xgbe)
2
3Required properties:
4- compatible: Should be "amd,xgbe-seattle-v1a"
5- reg: Address and length of the register sets for the device
6 - MAC registers
7 - PCS registers
8- interrupt-parent: Should be the phandle for the interrupt controller
9 that services interrupts for this device
10- interrupts: Should contain the amd-xgbe interrupt
11- clocks: Should be the DMA clock for the amd-xgbe device (used for
12 calculating the correct Rx interrupt watchdog timer value on a DMA
13 channel for coalescing)
14- clock-names: Should be the name of the DMA clock, "dma_clk"
15- phy-handle: See ethernet.txt file in the same directory
16- phy-mode: See ethernet.txt file in the same directory
17
18Optional properties:
19- mac-address: mac address to be assigned to the device. Can be overridden
20 by UEFI.
21
22Example:
23 xgbe@e0700000 {
24 compatible = "amd,xgbe-seattle-v1a";
25 reg = <0 0xe0700000 0 0x80000>,
26 <0 0xe0780000 0 0x80000>;
27 interrupt-parent = <&gic>;
28 interrupts = <0 325 4>;
29 clocks = <&xgbe_clk>;
30 clock-names = "dma_clk";
31 phy-handle = <&phy>;
32 phy-mode = "xgmii";
33 mac-address = [ 02 a1 a2 a3 a4 a5 ];
34 };
diff --git a/Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt b/Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt
index f2febb94550e..451fef26b4df 100644
--- a/Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt
+++ b/Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt
@@ -24,7 +24,7 @@ Optional properties:
24- fixed-link: When the GENET interface is connected to a MoCA hardware block or 24- fixed-link: When the GENET interface is connected to a MoCA hardware block or
25 when operating in a RGMII to RGMII type of connection, or when the MDIO bus is 25 when operating in a RGMII to RGMII type of connection, or when the MDIO bus is
26 voluntarily disabled, this property should be used to describe the "fixed link". 26 voluntarily disabled, this property should be used to describe the "fixed link".
27 See Documentation/devicetree/bindings/net/fsl-tsec-phy.txt for information on 27 See Documentation/devicetree/bindings/net/fixed-link.txt for information on
28 the property specifics 28 the property specifics
29 29
30Required child nodes: 30Required child nodes:
diff --git a/Documentation/devicetree/bindings/net/broadcom-systemport.txt b/Documentation/devicetree/bindings/net/broadcom-systemport.txt
new file mode 100644
index 000000000000..c183ea90d9bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/broadcom-systemport.txt
@@ -0,0 +1,29 @@
1* Broadcom BCM7xxx Ethernet Systemport Controller (SYSTEMPORT)
2
3Required properties:
4- compatible: should be one of "brcm,systemport-v1.00" or "brcm,systemport"
5- reg: address and length of the register set for the device.
6- interrupts: interrupts for the device, first cell must be for the the rx
7 interrupts, and the second cell should be for the transmit queues
8- local-mac-address: Ethernet MAC address (48 bits) of this adapter
9- phy-mode: Should be a string describing the PHY interface to the
10 Ethernet switch/PHY, see Documentation/devicetree/bindings/net/ethernet.txt
11- fixed-link: see Documentation/devicetree/bindings/net/fixed-link.txt for
12 the property specific details
13
14Optional properties:
15- systemport,num-tier2-arb: number of tier 2 arbiters, an integer
16- systemport,num-tier1-arb: number of tier 1 arbiters, an integer
17- systemport,num-txq: number of HW transmit queues, an integer
18- systemport,num-rxq: number of HW receive queues, an integer
19
20Example:
21ethernet@f04a0000 {
22 compatible = "brcm,systemport-v1.00";
23 reg = <0xf04a0000 0x4650>;
24 local-mac-address = [ 00 11 22 33 44 55 ];
25 fixed-link = <0 1 1000 0 0>;
26 phy-mode = "gmii";
27 interrupts = <0x0 0x16 0x0>,
28 <0x0 0x17 0x0>;
29};
diff --git a/Documentation/devicetree/bindings/net/can/xilinx_can.txt b/Documentation/devicetree/bindings/net/can/xilinx_can.txt
new file mode 100644
index 000000000000..fe38847d8e26
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/xilinx_can.txt
@@ -0,0 +1,44 @@
1Xilinx Axi CAN/Zynq CANPS controller Device Tree Bindings
2---------------------------------------------------------
3
4Required properties:
5- compatible : Should be "xlnx,zynq-can-1.0" for Zynq CAN
6 controllers and "xlnx,axi-can-1.00.a" for Axi CAN
7 controllers.
8- reg : Physical base address and size of the Axi CAN/Zynq
9 CANPS registers map.
10- interrupts : Property with a value describing the interrupt
11 number.
12- interrupt-parent : Must be core interrupt controller
13- clock-names : List of input clock names - "can_clk", "pclk"
14 (For CANPS), "can_clk" , "s_axi_aclk"(For AXI CAN)
15 (See clock bindings for details).
16- clocks : Clock phandles (see clock bindings for details).
17- tx-fifo-depth : Can Tx fifo depth.
18- rx-fifo-depth : Can Rx fifo depth.
19
20
21Example:
22
23For Zynq CANPS Dts file:
24 zynq_can_0: can@e0008000 {
25 compatible = "xlnx,zynq-can-1.0";
26 clocks = <&clkc 19>, <&clkc 36>;
27 clock-names = "can_clk", "pclk";
28 reg = <0xe0008000 0x1000>;
29 interrupts = <0 28 4>;
30 interrupt-parent = <&intc>;
31 tx-fifo-depth = <0x40>;
32 rx-fifo-depth = <0x40>;
33 };
34For Axi CAN Dts file:
35 axi_can_0: axi-can@40000000 {
36 compatible = "xlnx,axi-can-1.00.a";
37 clocks = <&clkc 0>, <&clkc 1>;
38 clock-names = "can_clk","s_axi_aclk" ;
39 reg = <0x40000000 0x10000>;
40 interrupt-parent = <&intc>;
41 interrupts = <0 59 1>;
42 tx-fifo-depth = <0x40>;
43 rx-fifo-depth = <0x40>;
44 };
diff --git a/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt b/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt
index 7ff57a119f81..764c0c79b43d 100644
--- a/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt
+++ b/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt
@@ -2,7 +2,9 @@ TI CPSW Phy mode Selection Device Tree Bindings
2----------------------------------------------- 2-----------------------------------------------
3 3
4Required properties: 4Required properties:
5- compatible : Should be "ti,am3352-cpsw-phy-sel" 5- compatible : Should be "ti,am3352-cpsw-phy-sel" for am335x platform and
6 "ti,dra7xx-cpsw-phy-sel" for dra7xx platform
7 "ti,am43xx-cpsw-phy-sel" for am43xx platform
6- reg : physical base address and size of the cpsw 8- reg : physical base address and size of the cpsw
7 registers map 9 registers map
8- reg-names : names of the register map given in "reg" node 10- reg-names : names of the register map given in "reg" node
diff --git a/Documentation/devicetree/bindings/net/fixed-link.txt b/Documentation/devicetree/bindings/net/fixed-link.txt
new file mode 100644
index 000000000000..82bf7e0f47b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fixed-link.txt
@@ -0,0 +1,42 @@
1Fixed link Device Tree binding
2------------------------------
3
4Some Ethernet MACs have a "fixed link", and are not connected to a
5normal MDIO-managed PHY device. For those situations, a Device Tree
6binding allows to describe a "fixed link".
7
8Such a fixed link situation is described by creating a 'fixed-link'
9sub-node of the Ethernet MAC device node, with the following
10properties:
11
12* 'speed' (integer, mandatory), to indicate the link speed. Accepted
13 values are 10, 100 and 1000
14* 'full-duplex' (boolean, optional), to indicate that full duplex is
15 used. When absent, half duplex is assumed.
16* 'pause' (boolean, optional), to indicate that pause should be
17 enabled.
18* 'asym-pause' (boolean, optional), to indicate that asym_pause should
19 be enabled.
20
21Old, deprecated 'fixed-link' binding:
22
23* A 'fixed-link' property in the Ethernet MAC node, with 5 cells, of the
24 form <a b c d e> with the following accepted values:
25 - a: emulated PHY ID, choose any but but unique to the all specified
26 fixed-links, from 0 to 31
27 - b: duplex configuration: 0 for half duplex, 1 for full duplex
28 - c: link speed in Mbits/sec, accepted values are: 10, 100 and 1000
29 - d: pause configuration: 0 for no pause, 1 for pause
30 - e: asymmetric pause configuration: 0 for no asymmetric pause, 1 for
31 asymmetric pause
32
33Example:
34
35ethernet@0 {
36 ...
37 fixed-link {
38 speed = <1000>;
39 full-duplex;
40 };
41 ...
42};
diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
index 737cdef4f903..be6ea8960f20 100644
--- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
@@ -42,10 +42,7 @@ Properties:
42 interrupt. For TSEC and eTSEC devices, the first interrupt is 42 interrupt. For TSEC and eTSEC devices, the first interrupt is
43 transmit, the second is receive, and the third is error. 43 transmit, the second is receive, and the third is error.
44 - phy-handle : See ethernet.txt file in the same directory. 44 - phy-handle : See ethernet.txt file in the same directory.
45 - fixed-link : <a b c d e> where a is emulated phy id - choose any, 45 - fixed-link : See fixed-link.txt in the same directory.
46 but unique to the all specified fixed-links, b is duplex - 0 half,
47 1 full, c is link speed - d#10/d#100/d#1000, d is pause - 0 no
48 pause, 1 pause, e is asym_pause - 0 no asym_pause, 1 asym_pause.
49 - phy-connection-type : See ethernet.txt file in the same directory. 46 - phy-connection-type : See ethernet.txt file in the same directory.
50 This property is only really needed if the connection is of type 47 This property is only really needed if the connection is of type
51 "rgmii-id", as all other connection types are detected by hardware. 48 "rgmii-id", as all other connection types are detected by hardware.
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt b/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
new file mode 100644
index 000000000000..75d398bb1fbb
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
@@ -0,0 +1,36 @@
1Hisilicon hix5hd2 gmac controller
2
3Required properties:
4- compatible: should be "hisilicon,hix5hd2-gmac".
5- reg: specifies base physical address(s) and size of the device registers.
6 The first region is the MAC register base and size.
7 The second region is external interface control register.
8- interrupts: should contain the MAC interrupt.
9- #address-cells: must be <1>.
10- #size-cells: must be <0>.
11- phy-mode: see ethernet.txt [1].
12- phy-handle: see ethernet.txt [1].
13- mac-address: see ethernet.txt [1].
14- clocks: clock phandle and specifier pair.
15
16- PHY subnode: inherits from phy binding [2]
17
18[1] Documentation/devicetree/bindings/net/ethernet.txt
19[2] Documentation/devicetree/bindings/net/phy.txt
20
21Example:
22 gmac0: ethernet@f9840000 {
23 compatible = "hisilicon,hix5hd2-gmac";
24 reg = <0xf9840000 0x1000>,<0xf984300c 0x4>;
25 interrupts = <0 71 4>;
26 #address-cells = <1>;
27 #size-cells = <0>;
28 phy-mode = "mii";
29 phy-handle = <&phy2>;
30 mac-address = [00 00 00 00 00 00];
31 clocks = <&clock HIX5HD2_MAC0_CLK>;
32
33 phy2: ethernet-phy@2 {
34 reg = <2>;
35 };
36 };
diff --git a/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt b/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt
new file mode 100644
index 000000000000..d3bbdded4cbe
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt
@@ -0,0 +1,23 @@
1* AT86RF230 IEEE 802.15.4 *
2
3Required properties:
4 - compatible: should be "atmel,at86rf230", "atmel,at86rf231",
5 "atmel,at86rf233" or "atmel,at86rf212"
6 - spi-max-frequency: maximal bus speed, should be set to 7500000 depends
7 sync or async operation mode
8 - reg: the chipselect index
9 - interrupts: the interrupt generated by the device
10
11Optional properties:
12 - reset-gpio: GPIO spec for the rstn pin
13 - sleep-gpio: GPIO spec for the slp_tr pin
14
15Example:
16
17 at86rf231@0 {
18 compatible = "atmel,at86rf231";
19 spi-max-frequency = <7500000>;
20 reg = <0>;
21 interrupts = <19 1>;
22 interrupt-parent = <&gpio3>;
23 };
diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
index d54d0cc79487..bbdf9a7359a2 100644
--- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt
+++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
@@ -1,9 +1,18 @@
1Micrel KS8851 Ethernet mac 1Micrel KS8851 Ethernet mac (MLL)
2 2
3Required properties: 3Required properties:
4- compatible = "micrel,ks8851-ml" of parallel interface 4- compatible = "micrel,ks8851-mll" of parallel interface
5- reg : 2 physical address and size of registers for data and command 5- reg : 2 physical address and size of registers for data and command
6- interrupts : interrupt connection 6- interrupts : interrupt connection
7 7
8Micrel KS8851 Ethernet mac (SPI)
9
10Required properties:
11- compatible = "micrel,ks8851" or the deprecated "ks8851"
12- reg : chip select number
13- interrupts : interrupt connection
14
8Optional properties: 15Optional properties:
9- vdd-supply: supply for Ethernet mac 16- vdd-supply: analog 3.3V supply for Ethernet mac
17- vdd-io-supply: digital 1.8V IO supply for Ethernet mac
18- reset-gpios: reset_n input pin
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz9021.txt b/Documentation/devicetree/bindings/net/micrel-ksz9021.txt
deleted file mode 100644
index 997a63f1aea1..000000000000
--- a/Documentation/devicetree/bindings/net/micrel-ksz9021.txt
+++ /dev/null
@@ -1,49 +0,0 @@
1Micrel KSZ9021 Gigabit Ethernet PHY
2
3Some boards require special tuning values, particularly when it comes to
4clock delays. You can specify clock delay values by adding
5micrel-specific properties to an Ethernet OF device node.
6
7All skew control options are specified in picoseconds. The minimum
8value is 0, and the maximum value is 3000.
9
10Optional properties:
11 - rxc-skew-ps : Skew control of RXC pad
12 - rxdv-skew-ps : Skew control of RX CTL pad
13 - txc-skew-ps : Skew control of TXC pad
14 - txen-skew-ps : Skew control of TX_CTL pad
15 - rxd0-skew-ps : Skew control of RX data 0 pad
16 - rxd1-skew-ps : Skew control of RX data 1 pad
17 - rxd2-skew-ps : Skew control of RX data 2 pad
18 - rxd3-skew-ps : Skew control of RX data 3 pad
19 - txd0-skew-ps : Skew control of TX data 0 pad
20 - txd1-skew-ps : Skew control of TX data 1 pad
21 - txd2-skew-ps : Skew control of TX data 2 pad
22 - txd3-skew-ps : Skew control of TX data 3 pad
23
24Examples:
25
26 /* Attach to an Ethernet device with autodetected PHY */
27 &enet {
28 rxc-skew-ps = <3000>;
29 rxdv-skew-ps = <0>;
30 txc-skew-ps = <3000>;
31 txen-skew-ps = <0>;
32 status = "okay";
33 };
34
35 /* Attach to an explicitly-specified PHY */
36 mdio {
37 phy0: ethernet-phy@0 {
38 rxc-skew-ps = <3000>;
39 rxdv-skew-ps = <0>;
40 txc-skew-ps = <3000>;
41 txen-skew-ps = <0>;
42 reg = <0>;
43 };
44 };
45 ethernet@70000 {
46 status = "okay";
47 phy = <&phy0>;
48 phy-mode = "rgmii-id";
49 };
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
new file mode 100644
index 000000000000..692076fda0e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
@@ -0,0 +1,83 @@
1Micrel KSZ9021/KSZ9031 Gigabit Ethernet PHY
2
3Some boards require special tuning values, particularly when it comes to
4clock delays. You can specify clock delay values by adding
5micrel-specific properties to an Ethernet OF device node.
6
7Note that these settings are applied after any phy-specific fixup from
8phy_fixup_list (see phy_init_hw() from drivers/net/phy/phy_device.c),
9and therefore may overwrite them.
10
11KSZ9021:
12
13 All skew control options are specified in picoseconds. The minimum
14 value is 0, the maximum value is 3000, and it is incremented by 200ps
15 steps.
16
17 Optional properties:
18
19 - rxc-skew-ps : Skew control of RXC pad
20 - rxdv-skew-ps : Skew control of RX CTL pad
21 - txc-skew-ps : Skew control of TXC pad
22 - txen-skew-ps : Skew control of TX CTL pad
23 - rxd0-skew-ps : Skew control of RX data 0 pad
24 - rxd1-skew-ps : Skew control of RX data 1 pad
25 - rxd2-skew-ps : Skew control of RX data 2 pad
26 - rxd3-skew-ps : Skew control of RX data 3 pad
27 - txd0-skew-ps : Skew control of TX data 0 pad
28 - txd1-skew-ps : Skew control of TX data 1 pad
29 - txd2-skew-ps : Skew control of TX data 2 pad
30 - txd3-skew-ps : Skew control of TX data 3 pad
31
32KSZ9031:
33
34 All skew control options are specified in picoseconds. The minimum
35 value is 0, and the maximum is property-dependent. The increment
36 step is 60ps.
37
38 Optional properties:
39
40 Maximum value of 1860:
41
42 - rxc-skew-ps : Skew control of RX clock pad
43 - txc-skew-ps : Skew control of TX clock pad
44
45 Maximum value of 900:
46
47 - rxdv-skew-ps : Skew control of RX CTL pad
48 - txen-skew-ps : Skew control of TX CTL pad
49 - rxd0-skew-ps : Skew control of RX data 0 pad
50 - rxd1-skew-ps : Skew control of RX data 1 pad
51 - rxd2-skew-ps : Skew control of RX data 2 pad
52 - rxd3-skew-ps : Skew control of RX data 3 pad
53 - txd0-skew-ps : Skew control of TX data 0 pad
54 - txd1-skew-ps : Skew control of TX data 1 pad
55 - txd2-skew-ps : Skew control of TX data 2 pad
56 - txd3-skew-ps : Skew control of TX data 3 pad
57
58Examples:
59
60 /* Attach to an Ethernet device with autodetected PHY */
61 &enet {
62 rxc-skew-ps = <3000>;
63 rxdv-skew-ps = <0>;
64 txc-skew-ps = <3000>;
65 txen-skew-ps = <0>;
66 status = "okay";
67 };
68
69 /* Attach to an explicitly-specified PHY */
70 mdio {
71 phy0: ethernet-phy@0 {
72 rxc-skew-ps = <3000>;
73 rxdv-skew-ps = <0>;
74 txc-skew-ps = <3000>;
75 txen-skew-ps = <0>;
76 reg = <0>;
77 };
78 };
79 ethernet@70000 {
80 status = "okay";
81 phy = <&phy0>;
82 phy-mode = "rgmii-id";
83 };
diff --git a/Documentation/devicetree/bindings/net/nfc/pn544.txt b/Documentation/devicetree/bindings/net/nfc/pn544.txt
new file mode 100644
index 000000000000..dab69f36167c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nfc/pn544.txt
@@ -0,0 +1,35 @@
1* NXP Semiconductors PN544 NFC Controller
2
3Required properties:
4- compatible: Should be "nxp,pn544-i2c".
5- clock-frequency: I²C work frequency.
6- reg: address on the bus
7- interrupt-parent: phandle for the interrupt gpio controller
8- interrupts: GPIO interrupt to which the chip is connected
9- enable-gpios: Output GPIO pin used for enabling/disabling the PN544
10- firmware-gpios: Output GPIO pin used to enter firmware download mode
11
12Optional SoC Specific Properties:
13- pinctrl-names: Contains only one value - "default".
14- pintctrl-0: Specifies the pin control groups used for this controller.
15
16Example (for ARM-based BeagleBone with PN544 on I2C2):
17
18&i2c2 {
19
20 status = "okay";
21
22 pn544: pn544@28 {
23
24 compatible = "nxp,pn544-i2c";
25
26 reg = <0x28>;
27 clock-frequency = <400000>;
28
29 interrupt-parent = <&gpio1>;
30 interrupts = <17 GPIO_ACTIVE_HIGH>;
31
32 enable-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
33 firmware-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
34 };
35};
diff --git a/Documentation/devicetree/bindings/net/nfc/st21nfca.txt b/Documentation/devicetree/bindings/net/nfc/st21nfca.txt
new file mode 100644
index 000000000000..e4faa2e8dfeb
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nfc/st21nfca.txt
@@ -0,0 +1,33 @@
1* STMicroelectronics SAS. ST21NFCA NFC Controller
2
3Required properties:
4- compatible: Should be "st,st21nfca_i2c".
5- clock-frequency: I²C work frequency.
6- reg: address on the bus
7- interrupt-parent: phandle for the interrupt gpio controller
8- interrupts: GPIO interrupt to which the chip is connected
9- enable-gpios: Output GPIO pin used for enabling/disabling the ST21NFCA
10
11Optional SoC Specific Properties:
12- pinctrl-names: Contains only one value - "default".
13- pintctrl-0: Specifies the pin control groups used for this controller.
14
15Example (for ARM-based BeagleBoard xM with ST21NFCA on I2C2):
16
17&i2c2 {
18
19 status = "okay";
20
21 st21nfca: st21nfca@1 {
22
23 compatible = "st,st21nfca_i2c";
24
25 reg = <0x01>;
26 clock-frequency = <400000>;
27
28 interrupt-parent = <&gpio5>;
29 interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
30
31 enable-gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>;
32 };
33};
diff --git a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt
index 8dd3ef7bc56b..1e436133685f 100644
--- a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt
+++ b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt
@@ -12,6 +12,7 @@ Required properties:
12Optional SoC Specific Properties: 12Optional SoC Specific Properties:
13- pinctrl-names: Contains only one value - "default". 13- pinctrl-names: Contains only one value - "default".
14- pintctrl-0: Specifies the pin control groups used for this controller. 14- pintctrl-0: Specifies the pin control groups used for this controller.
15- autosuspend-delay: Specify autosuspend delay in milliseconds.
15 16
16Example (for ARM-based BeagleBone with TRF7970A on SPI1): 17Example (for ARM-based BeagleBone with TRF7970A on SPI1):
17 18
@@ -29,6 +30,7 @@ Example (for ARM-based BeagleBone with TRF7970A on SPI1):
29 ti,enable-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>, 30 ti,enable-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>,
30 <&gpio2 5 GPIO_ACTIVE_LOW>; 31 <&gpio2 5 GPIO_ACTIVE_LOW>;
31 vin-supply = <&ldo3_reg>; 32 vin-supply = <&ldo3_reg>;
33 autosuspend-delay = <30000>;
32 status = "okay"; 34 status = "okay";
33 }; 35 };
34}; 36};
diff --git a/Documentation/devicetree/bindings/net/via-rhine.txt b/Documentation/devicetree/bindings/net/via-rhine.txt
new file mode 100644
index 000000000000..334eca2bf937
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/via-rhine.txt
@@ -0,0 +1,17 @@
1* VIA Rhine 10/100 Network Controller
2
3Required properties:
4- compatible : Should be "via,vt8500-rhine" for integrated
5 Rhine controllers found in VIA VT8500, WonderMedia WM8950
6 and similar. These are listed as 1106:3106 rev. 0x84 on the
7 virtual PCI bus under vendor-provided kernels
8- reg : Address and length of the io space
9- interrupts : Should contain the controller interrupt line
10
11Examples:
12
13ethernet@d8004000 {
14 compatible = "via,vt8500-rhine";
15 reg = <0xd8004000 0x100>;
16 interrupts = <10>;
17};
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 89472558011e..1525e30483fd 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -318,3 +318,8 @@ GPIO
318 devm_gpiod_get_optional() 318 devm_gpiod_get_optional()
319 devm_gpiod_get_index_optional() 319 devm_gpiod_get_index_optional()
320 devm_gpiod_put() 320 devm_gpiod_put()
321
322MDIO
323 devm_mdiobus_alloc()
324 devm_mdiobus_alloc_size()
325 devm_mdiobus_free()
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index a383c00392d0..9c723ecd0025 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -585,13 +585,19 @@ mode
585 balance-tlb or 5 585 balance-tlb or 5
586 586
587 Adaptive transmit load balancing: channel bonding that 587 Adaptive transmit load balancing: channel bonding that
588 does not require any special switch support. The 588 does not require any special switch support.
589 outgoing traffic is distributed according to the 589
590 current load (computed relative to the speed) on each 590 In tlb_dynamic_lb=1 mode; the outgoing traffic is
591 slave. Incoming traffic is received by the current 591 distributed according to the current load (computed
592 slave. If the receiving slave fails, another slave 592 relative to the speed) on each slave.
593 takes over the MAC address of the failed receiving 593
594 slave. 594 In tlb_dynamic_lb=0 mode; the load balancing based on
595 current load is disabled and the load is distributed
596 only using the hash distribution.
597
598 Incoming traffic is received by the current slave.
599 If the receiving slave fails, another slave takes over
600 the MAC address of the failed receiving slave.
595 601
596 Prerequisite: 602 Prerequisite:
597 603
@@ -736,6 +742,28 @@ primary_reselect
736 742
737 This option was added for bonding version 3.6.0. 743 This option was added for bonding version 3.6.0.
738 744
745tlb_dynamic_lb
746
747 Specifies if dynamic shuffling of flows is enabled in tlb
748 mode. The value has no effect on any other modes.
749
750 The default behavior of tlb mode is to shuffle active flows across
751 slaves based on the load in that interval. This gives nice lb
752 characteristics but can cause packet reordering. If re-ordering is
753 a concern use this variable to disable flow shuffling and rely on
754 load balancing provided solely by the hash distribution.
755 xmit-hash-policy can be used to select the appropriate hashing for
756 the setup.
757
758 The sysfs entry can be used to change the setting per bond device
759 and the initial value is derived from the module parameter. The
760 sysfs entry is allowed to be changed only if the bond device is
761 down.
762
763 The default value is "1" that enables flow shuffling while value "0"
764 disables it. This option was added in bonding driver 3.7.1
765
766
739updelay 767updelay
740 768
741 Specifies the time, in milliseconds, to wait before enabling a 769 Specifies the time, in milliseconds, to wait before enabling a
@@ -769,7 +797,7 @@ use_carrier
769xmit_hash_policy 797xmit_hash_policy
770 798
771 Selects the transmit hash policy to use for slave selection in 799 Selects the transmit hash policy to use for slave selection in
772 balance-xor and 802.3ad modes. Possible values are: 800 balance-xor, 802.3ad, and tlb modes. Possible values are:
773 801
774 layer2 802 layer2
775 803
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index 4f7ae5261364..2236d6dcb7da 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -469,6 +469,41 @@ solution for a couple of reasons:
469 having this 'send only' use-case we may remove the receive list in the 469 having this 'send only' use-case we may remove the receive list in the
470 Kernel to save a little (really a very little!) CPU usage. 470 Kernel to save a little (really a very little!) CPU usage.
471 471
472 4.1.1.1 CAN filter usage optimisation
473
474 The CAN filters are processed in per-device filter lists at CAN frame
475 reception time. To reduce the number of checks that need to be performed
476 while walking through the filter lists the CAN core provides an optimized
477 filter handling when the filter subscription focusses on a single CAN ID.
478
479 For the possible 2048 SFF CAN identifiers the identifier is used as an index
480 to access the corresponding subscription list without any further checks.
481 For the 2^29 possible EFF CAN identifiers a 10 bit XOR folding is used as
482 hash function to retrieve the EFF table index.
483
484 To benefit from the optimized filters for single CAN identifiers the
485 CAN_SFF_MASK or CAN_EFF_MASK have to be set into can_filter.mask together
486 with set CAN_EFF_FLAG and CAN_RTR_FLAG bits. A set CAN_EFF_FLAG bit in the
487 can_filter.mask makes clear that it matters whether a SFF or EFF CAN ID is
488 subscribed. E.g. in the example from above
489
490 rfilter[0].can_id = 0x123;
491 rfilter[0].can_mask = CAN_SFF_MASK;
492
493 both SFF frames with CAN ID 0x123 and EFF frames with 0xXXXXX123 can pass.
494
495 To filter for only 0x123 (SFF) and 0x12345678 (EFF) CAN identifiers the
496 filter has to be defined in this way to benefit from the optimized filters:
497
498 struct can_filter rfilter[2];
499
500 rfilter[0].can_id = 0x123;
501 rfilter[0].can_mask = (CAN_EFF_FLAG | CAN_RTR_FLAG | CAN_SFF_MASK);
502 rfilter[1].can_id = 0x12345678 | CAN_EFF_FLAG;
503 rfilter[1].can_mask = (CAN_EFF_FLAG | CAN_RTR_FLAG | CAN_EFF_MASK);
504
505 setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, &rfilter, sizeof(rfilter));
506
472 4.1.2 RAW socket option CAN_RAW_ERR_FILTER 507 4.1.2 RAW socket option CAN_RAW_ERR_FILTER
473 508
474 As described in chapter 3.4 the CAN interface driver can generate so 509 As described in chapter 3.4 the CAN interface driver can generate so
diff --git a/Documentation/networking/cdc_mbim.txt b/Documentation/networking/cdc_mbim.txt
new file mode 100644
index 000000000000..a15ea602aa52
--- /dev/null
+++ b/Documentation/networking/cdc_mbim.txt
@@ -0,0 +1,339 @@
1 cdc_mbim - Driver for CDC MBIM Mobile Broadband modems
2 ========================================================
3
4The cdc_mbim driver supports USB devices conforming to the "Universal
5Serial Bus Communications Class Subclass Specification for Mobile
6Broadband Interface Model" [1], which is a further development of
7"Universal Serial Bus Communications Class Subclass Specifications for
8Network Control Model Devices" [2] optimized for Mobile Broadband
9devices, aka "3G/LTE modems".
10
11
12Command Line Parameters
13=======================
14
15The cdc_mbim driver has no parameters of its own. But the probing
16behaviour for NCM 1.0 backwards compatible MBIM functions (an
17"NCM/MBIM function" as defined in section 3.2 of [1]) is affected
18by a cdc_ncm driver parameter:
19
20prefer_mbim
21-----------
22Type: Boolean
23Valid Range: N/Y (0-1)
24Default Value: Y (MBIM is preferred)
25
26This parameter sets the system policy for NCM/MBIM functions. Such
27functions will be handled by either the cdc_ncm driver or the cdc_mbim
28driver depending on the prefer_mbim setting. Setting prefer_mbim=N
29makes the cdc_mbim driver ignore these functions and lets the cdc_ncm
30driver handle them instead.
31
32The parameter is writable, and can be changed at any time. A manual
33unbind/bind is required to make the change effective for NCM/MBIM
34functions bound to the "wrong" driver
35
36
37Basic usage
38===========
39
40MBIM functions are inactive when unmanaged. The cdc_mbim driver only
41provides an userspace interface to the MBIM control channel, and will
42not participate in the management of the function. This implies that a
43userspace MBIM management application always is required to enable a
44MBIM function.
45
46Such userspace applications includes, but are not limited to:
47 - mbimcli (included with the libmbim [3] library), and
48 - ModemManager [4]
49
50Establishing a MBIM IP session reequires at least these actions by the
51management application:
52 - open the control channel
53 - configure network connection settings
54 - connect to network
55 - configure IP interface
56
57Management application development
58----------------------------------
59The driver <-> userspace interfaces are described below. The MBIM
60control channel protocol is described in [1].
61
62
63MBIM control channel userspace ABI
64==================================
65
66/dev/cdc-wdmX character device
67------------------------------
68The driver creates a two-way pipe to the MBIM function control channel
69using the cdc-wdm driver as a subdriver. The userspace end of the
70control channel pipe is a /dev/cdc-wdmX character device.
71
72The cdc_mbim driver does not process or police messages on the control
73channel. The channel is fully delegated to the userspace management
74application. It is therefore up to this application to ensure that it
75complies with all the control channel requirements in [1].
76
77The cdc-wdmX device is created as a child of the MBIM control
78interface USB device. The character device associated with a specific
79MBIM function can be looked up using sysfs. For example:
80
81 bjorn@nemi:~$ ls /sys/bus/usb/drivers/cdc_mbim/2-4:2.12/usbmisc
82 cdc-wdm0
83
84 bjorn@nemi:~$ grep . /sys/bus/usb/drivers/cdc_mbim/2-4:2.12/usbmisc/cdc-wdm0/dev
85 180:0
86
87
88USB configuration descriptors
89-----------------------------
90The wMaxControlMessage field of the CDC MBIM functional descriptor
91limits the maximum control message size. The managament application is
92responsible for negotiating a control message size complying with the
93requirements in section 9.3.1 of [1], taking this descriptor field
94into consideration.
95
96The userspace application can access the CDC MBIM functional
97descriptor of a MBIM function using either of the two USB
98configuration descriptor kernel interfaces described in [6] or [7].
99
100See also the ioctl documentation below.
101
102
103Fragmentation
104-------------
105The userspace application is responsible for all control message
106fragmentation and defragmentaion, as described in section 9.5 of [1].
107
108
109/dev/cdc-wdmX write()
110---------------------
111The MBIM control messages from the management application *must not*
112exceed the negotiated control message size.
113
114
115/dev/cdc-wdmX read()
116--------------------
117The management application *must* accept control messages of up the
118negotiated control message size.
119
120
121/dev/cdc-wdmX ioctl()
122--------------------
123IOCTL_WDM_MAX_COMMAND: Get Maximum Command Size
124This ioctl returns the wMaxControlMessage field of the CDC MBIM
125functional descriptor for MBIM devices. This is intended as a
126convenience, eliminating the need to parse the USB descriptors from
127userspace.
128
129 #include <stdio.h>
130 #include <fcntl.h>
131 #include <sys/ioctl.h>
132 #include <linux/types.h>
133 #include <linux/usb/cdc-wdm.h>
134 int main()
135 {
136 __u16 max;
137 int fd = open("/dev/cdc-wdm0", O_RDWR);
138 if (!ioctl(fd, IOCTL_WDM_MAX_COMMAND, &max))
139 printf("wMaxControlMessage is %d\n", max);
140 }
141
142
143Custom device services
144----------------------
145The MBIM specification allows vendors to freely define additional
146services. This is fully supported by the cdc_mbim driver.
147
148Support for new MBIM services, including vendor specified services, is
149implemented entirely in userspace, like the rest of the MBIM control
150protocol
151
152New services should be registered in the MBIM Registry [5].
153
154
155
156MBIM data channel userspace ABI
157===============================
158
159wwanY network device
160--------------------
161The cdc_mbim driver represents the MBIM data channel as a single
162network device of the "wwan" type. This network device is initially
163mapped to MBIM IP session 0.
164
165
166Multiplexed IP sessions (IPS)
167-----------------------------
168MBIM allows multiplexing up to 256 IP sessions over a single USB data
169channel. The cdc_mbim driver models such IP sessions as 802.1q VLAN
170subdevices of the master wwanY device, mapping MBIM IP session Z to
171VLAN ID Z for all values of Z greater than 0.
172
173The device maximum Z is given in the MBIM_DEVICE_CAPS_INFO structure
174described in section 10.5.1 of [1].
175
176The userspace management application is responsible for adding new
177VLAN links prior to establishing MBIM IP sessions where the SessionId
178is greater than 0. These links can be added by using the normal VLAN
179kernel interfaces, either ioctl or netlink.
180
181For example, adding a link for a MBIM IP session with SessionId 3:
182
183 ip link add link wwan0 name wwan0.3 type vlan id 3
184
185The driver will automatically map the "wwan0.3" network device to MBIM
186IP session 3.
187
188
189Device Service Streams (DSS)
190----------------------------
191MBIM also allows up to 256 non-IP data streams to be multiplexed over
192the same shared USB data channel. The cdc_mbim driver models these
193sessions as another set of 802.1q VLAN subdevices of the master wwanY
194device, mapping MBIM DSS session A to VLAN ID (256 + A) for all values
195of A.
196
197The device maximum A is given in the MBIM_DEVICE_SERVICES_INFO
198structure described in section 10.5.29 of [1].
199
200The DSS VLAN subdevices are used as a practical interface between the
201shared MBIM data channel and a MBIM DSS aware userspace application.
202It is not intended to be presented as-is to an end user. The
203assumption is that an userspace application initiating a DSS session
204also takes care of the necessary framing of the DSS data, presenting
205the stream to the end user in an appropriate way for the stream type.
206
207The network device ABI requires a dummy ethernet header for every DSS
208data frame being transported. The contents of this header is
209arbitrary, with the following exceptions:
210 - TX frames using an IP protocol (0x0800 or 0x86dd) will be dropped
211 - RX frames will have the protocol field set to ETH_P_802_3 (but will
212 not be properly formatted 802.3 frames)
213 - RX frames will have the destination address set to the hardware
214 address of the master device
215
216The DSS supporting userspace management application is responsible for
217adding the dummy ethernet header on TX and stripping it on RX.
218
219This is a simple example using tools commonly available, exporting
220DssSessionId 5 as a pty character device pointed to by a /dev/nmea
221symlink:
222
223 ip link add link wwan0 name wwan0.dss5 type vlan id 261
224 ip link set dev wwan0.dss5 up
225 socat INTERFACE:wwan0.dss5,type=2 PTY:,echo=0,link=/dev/nmea
226
227This is only an example, most suitable for testing out a DSS
228service. Userspace applications supporting specific MBIM DSS services
229are expected to use the tools and programming interfaces required by
230that service.
231
232Note that adding VLAN links for DSS sessions is entirely optional. A
233management application may instead choose to bind a packet socket
234directly to the master network device, using the received VLAN tags to
235map frames to the correct DSS session and adding 18 byte VLAN ethernet
236headers with the appropriate tag on TX. In this case using a socket
237filter is recommended, matching only the DSS VLAN subset. This avoid
238unnecessary copying of unrelated IP session data to userspace. For
239example:
240
241 static struct sock_filter dssfilter[] = {
242 /* use special negative offsets to get VLAN tag */
243 BPF_STMT(BPF_LD|BPF_B|BPF_ABS, SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
244 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 1, 0, 6), /* true */
245
246 /* verify DSS VLAN range */
247 BPF_STMT(BPF_LD|BPF_H|BPF_ABS, SKF_AD_OFF + SKF_AD_VLAN_TAG),
248 BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 256, 0, 4), /* 256 is first DSS VLAN */
249 BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 512, 3, 0), /* 511 is last DSS VLAN */
250
251 /* verify ethertype */
252 BPF_STMT(BPF_LD|BPF_H|BPF_ABS, 2 * ETH_ALEN),
253 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, ETH_P_802_3, 0, 1),
254
255 BPF_STMT(BPF_RET|BPF_K, (u_int)-1), /* accept */
256 BPF_STMT(BPF_RET|BPF_K, 0), /* ignore */
257 };
258
259
260
261Tagged IP session 0 VLAN
262------------------------
263As described above, MBIM IP session 0 is treated as special by the
264driver. It is initially mapped to untagged frames on the wwanY
265network device.
266
267This mapping implies a few restrictions on multiplexed IPS and DSS
268sessions, which may not always be practical:
269 - no IPS or DSS session can use a frame size greater than the MTU on
270 IP session 0
271 - no IPS or DSS session can be in the up state unless the network
272 device representing IP session 0 also is up
273
274These problems can be avoided by optionally making the driver map IP
275session 0 to a VLAN subdevice, similar to all other IP sessions. This
276behaviour is triggered by adding a VLAN link for the magic VLAN ID
2774094. The driver will then immediately start mapping MBIM IP session
2780 to this VLAN, and will drop untagged frames on the master wwanY
279device.
280
281Tip: It might be less confusing to the end user to name this VLAN
282subdevice after the MBIM SessionID instead of the VLAN ID. For
283example:
284
285 ip link add link wwan0 name wwan0.0 type vlan id 4094
286
287
288VLAN mapping
289------------
290
291Summarizing the cdc_mbim driver mapping described above, we have this
292relationship between VLAN tags on the wwanY network device and MBIM
293sessions on the shared USB data channel:
294
295 VLAN ID MBIM type MBIM SessionID Notes
296 ---------------------------------------------------------
297 untagged IPS 0 a)
298 1 - 255 IPS 1 - 255 <VLANID>
299 256 - 511 DSS 0 - 255 <VLANID - 256>
300 512 - 4093 b)
301 4094 IPS 0 c)
302
303 a) if no VLAN ID 4094 link exists, else dropped
304 b) unsupported VLAN range, unconditionally dropped
305 c) if a VLAN ID 4094 link exists, else dropped
306
307
308
309
310References
311==========
312
313[1] USB Implementers Forum, Inc. - "Universal Serial Bus
314 Communications Class Subclass Specification for Mobile Broadband
315 Interface Model", Revision 1.0 (Errata 1), May 1, 2013
316 - http://www.usb.org/developers/docs/devclass_docs/
317
318[2] USB Implementers Forum, Inc. - "Universal Serial Bus
319 Communications Class Subclass Specifications for Network Control
320 Model Devices", Revision 1.0 (Errata 1), November 24, 2010
321 - http://www.usb.org/developers/docs/devclass_docs/
322
323[3] libmbim - "a glib-based library for talking to WWAN modems and
324 devices which speak the Mobile Interface Broadband Model (MBIM)
325 protocol"
326 - http://www.freedesktop.org/wiki/Software/libmbim/
327
328[4] ModemManager - "a DBus-activated daemon which controls mobile
329 broadband (2G/3G/4G) devices and connections"
330 - http://www.freedesktop.org/wiki/Software/ModemManager/
331
332[5] "MBIM (Mobile Broadband Interface Model) Registry"
333 - http://compliance.usb.org/mbim/
334
335[6] "/proc/bus/usb filesystem output"
336 - Documentation/usb/proc_usb_info.txt
337
338[7] "/sys/bus/usb/devices/.../descriptors"
339 - Documentation/ABI/stable/sysfs-bus-usb
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index e3ba753cb714..ee78eba78a9d 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -281,6 +281,7 @@ Possible BPF extensions are shown in the following table:
281 cpu raw_smp_processor_id() 281 cpu raw_smp_processor_id()
282 vlan_tci vlan_tx_tag_get(skb) 282 vlan_tci vlan_tx_tag_get(skb)
283 vlan_pr vlan_tx_tag_present(skb) 283 vlan_pr vlan_tx_tag_present(skb)
284 rand prandom_u32()
284 285
285These extensions can also be prefixed with '#'. 286These extensions can also be prefixed with '#'.
286Examples for low-level BPF: 287Examples for low-level BPF:
@@ -308,6 +309,18 @@ Examples for low-level BPF:
308 ret #-1 309 ret #-1
309 drop: ret #0 310 drop: ret #0
310 311
312** icmp random packet sampling, 1 in 4
313 ldh [12]
314 jne #0x800, drop
315 ldb [23]
316 jneq #1, drop
317 # get a random uint32 number
318 ld rand
319 mod #4
320 jneq #1, drop
321 ret #-1
322 drop: ret #0
323
311** SECCOMP filter example: 324** SECCOMP filter example:
312 325
313 ld [4] /* offsetof(struct seccomp_data, arch) */ 326 ld [4] /* offsetof(struct seccomp_data, arch) */
@@ -548,42 +561,43 @@ toolchain for developing and testing the kernel's JIT compiler.
548 561
549BPF kernel internals 562BPF kernel internals
550-------------------- 563--------------------
551Internally, for the kernel interpreter, a different BPF instruction set 564Internally, for the kernel interpreter, a different instruction set
552format with similar underlying principles from BPF described in previous 565format with similar underlying principles from BPF described in previous
553paragraphs is being used. However, the instruction set format is modelled 566paragraphs is being used. However, the instruction set format is modelled
554closer to the underlying architecture to mimic native instruction sets, so 567closer to the underlying architecture to mimic native instruction sets, so
555that a better performance can be achieved (more details later). 568that a better performance can be achieved (more details later). This new
569ISA is called 'eBPF' or 'internal BPF' interchangeably. (Note: eBPF which
570originates from [e]xtended BPF is not the same as BPF extensions! While
571eBPF is an ISA, BPF extensions date back to classic BPF's 'overloading'
572of BPF_LD | BPF_{B,H,W} | BPF_ABS instruction.)
556 573
557It is designed to be JITed with one to one mapping, which can also open up 574It is designed to be JITed with one to one mapping, which can also open up
558the possibility for GCC/LLVM compilers to generate optimized BPF code through 575the possibility for GCC/LLVM compilers to generate optimized eBPF code through
559a BPF backend that performs almost as fast as natively compiled code. 576an eBPF backend that performs almost as fast as natively compiled code.
560 577
561The new instruction set was originally designed with the possible goal in 578The new instruction set was originally designed with the possible goal in
562mind to write programs in "restricted C" and compile into BPF with a optional 579mind to write programs in "restricted C" and compile into eBPF with a optional
563GCC/LLVM backend, so that it can just-in-time map to modern 64-bit CPUs with 580GCC/LLVM backend, so that it can just-in-time map to modern 64-bit CPUs with
564minimal performance overhead over two steps, that is, C -> BPF -> native code. 581minimal performance overhead over two steps, that is, C -> eBPF -> native code.
565 582
566Currently, the new format is being used for running user BPF programs, which 583Currently, the new format is being used for running user BPF programs, which
567includes seccomp BPF, classic socket filters, cls_bpf traffic classifier, 584includes seccomp BPF, classic socket filters, cls_bpf traffic classifier,
568team driver's classifier for its load-balancing mode, netfilter's xt_bpf 585team driver's classifier for its load-balancing mode, netfilter's xt_bpf
569extension, PTP dissector/classifier, and much more. They are all internally 586extension, PTP dissector/classifier, and much more. They are all internally
570converted by the kernel into the new instruction set representation and run 587converted by the kernel into the new instruction set representation and run
571in the extended interpreter. For in-kernel handlers, this all works 588in the eBPF interpreter. For in-kernel handlers, this all works transparently
572transparently by using sk_unattached_filter_create() for setting up the 589by using sk_unattached_filter_create() for setting up the filter, resp.
573filter, resp. sk_unattached_filter_destroy() for destroying it. The macro 590sk_unattached_filter_destroy() for destroying it. The macro
574SK_RUN_FILTER(filter, ctx) transparently invokes the right BPF function to 591SK_RUN_FILTER(filter, ctx) transparently invokes eBPF interpreter or JITed
575run the filter. 'filter' is a pointer to struct sk_filter that we got from 592code to run the filter. 'filter' is a pointer to struct sk_filter that we
576sk_unattached_filter_create(), and 'ctx' the given context (e.g. skb pointer). 593got from sk_unattached_filter_create(), and 'ctx' the given context (e.g.
577All constraints and restrictions from sk_chk_filter() apply before a 594skb pointer). All constraints and restrictions from sk_chk_filter() apply
578conversion to the new layout is being done behind the scenes! 595before a conversion to the new layout is being done behind the scenes!
579 596
580Currently, for JITing, the user BPF format is being used and current BPF JIT 597Currently, the classic BPF format is being used for JITing on most of the
581compilers reused whenever possible. In other words, we do not (yet!) perform 598architectures. Only x86-64 performs JIT compilation from eBPF instruction set,
582a JIT compilation in the new layout, however, future work will successively 599however, future work will migrate other JIT compilers as well, so that they
583migrate traditional JIT compilers into the new instruction format as well, so 600will profit from the very same benefits.
584that they will profit from the very same benefits. Thus, when speaking about
585JIT in the following, a JIT compiler (TBD) for the new instruction format is
586meant in this context.
587 601
588Some core changes of the new internal format: 602Some core changes of the new internal format:
589 603
@@ -592,35 +606,35 @@ Some core changes of the new internal format:
592 The old format had two registers A and X, and a hidden frame pointer. The 606 The old format had two registers A and X, and a hidden frame pointer. The
593 new layout extends this to be 10 internal registers and a read-only frame 607 new layout extends this to be 10 internal registers and a read-only frame
594 pointer. Since 64-bit CPUs are passing arguments to functions via registers 608 pointer. Since 64-bit CPUs are passing arguments to functions via registers
595 the number of args from BPF program to in-kernel function is restricted 609 the number of args from eBPF program to in-kernel function is restricted
596 to 5 and one register is used to accept return value from an in-kernel 610 to 5 and one register is used to accept return value from an in-kernel
597 function. Natively, x86_64 passes first 6 arguments in registers, aarch64/ 611 function. Natively, x86_64 passes first 6 arguments in registers, aarch64/
598 sparcv9/mips64 have 7 - 8 registers for arguments; x86_64 has 6 callee saved 612 sparcv9/mips64 have 7 - 8 registers for arguments; x86_64 has 6 callee saved
599 registers, and aarch64/sparcv9/mips64 have 11 or more callee saved registers. 613 registers, and aarch64/sparcv9/mips64 have 11 or more callee saved registers.
600 614
601 Therefore, BPF calling convention is defined as: 615 Therefore, eBPF calling convention is defined as:
602 616
603 * R0 - return value from in-kernel function 617 * R0 - return value from in-kernel function, and exit value for eBPF program
604 * R1 - R5 - arguments from BPF program to in-kernel function 618 * R1 - R5 - arguments from eBPF program to in-kernel function
605 * R6 - R9 - callee saved registers that in-kernel function will preserve 619 * R6 - R9 - callee saved registers that in-kernel function will preserve
606 * R10 - read-only frame pointer to access stack 620 * R10 - read-only frame pointer to access stack
607 621
608 Thus, all BPF registers map one to one to HW registers on x86_64, aarch64, 622 Thus, all eBPF registers map one to one to HW registers on x86_64, aarch64,
609 etc, and BPF calling convention maps directly to ABIs used by the kernel on 623 etc, and eBPF calling convention maps directly to ABIs used by the kernel on
610 64-bit architectures. 624 64-bit architectures.
611 625
612 On 32-bit architectures JIT may map programs that use only 32-bit arithmetic 626 On 32-bit architectures JIT may map programs that use only 32-bit arithmetic
613 and may let more complex programs to be interpreted. 627 and may let more complex programs to be interpreted.
614 628
615 R0 - R5 are scratch registers and BPF program needs spill/fill them if 629 R0 - R5 are scratch registers and eBPF program needs spill/fill them if
616 necessary across calls. Note that there is only one BPF program (== one BPF 630 necessary across calls. Note that there is only one eBPF program (== one
617 main routine) and it cannot call other BPF functions, it can only call 631 eBPF main routine) and it cannot call other eBPF functions, it can only
618 predefined in-kernel functions, though. 632 call predefined in-kernel functions, though.
619 633
620- Register width increases from 32-bit to 64-bit: 634- Register width increases from 32-bit to 64-bit:
621 635
622 Still, the semantics of the original 32-bit ALU operations are preserved 636 Still, the semantics of the original 32-bit ALU operations are preserved
623 via 32-bit subregisters. All BPF registers are 64-bit with 32-bit lower 637 via 32-bit subregisters. All eBPF registers are 64-bit with 32-bit lower
624 subregisters that zero-extend into 64-bit if they are being written to. 638 subregisters that zero-extend into 64-bit if they are being written to.
625 That behavior maps directly to x86_64 and arm64 subregister definition, but 639 That behavior maps directly to x86_64 and arm64 subregister definition, but
626 makes other JITs more difficult. 640 makes other JITs more difficult.
@@ -631,8 +645,8 @@ Some core changes of the new internal format:
631 645
632 Operation is 64-bit, because on 64-bit architectures, pointers are also 646 Operation is 64-bit, because on 64-bit architectures, pointers are also
633 64-bit wide, and we want to pass 64-bit values in/out of kernel functions, 647 64-bit wide, and we want to pass 64-bit values in/out of kernel functions,
634 so 32-bit BPF registers would otherwise require to define register-pair 648 so 32-bit eBPF registers would otherwise require to define register-pair
635 ABI, thus, there won't be able to use a direct BPF register to HW register 649 ABI, thus, there won't be able to use a direct eBPF register to HW register
636 mapping and JIT would need to do combine/split/move operations for every 650 mapping and JIT would need to do combine/split/move operations for every
637 register in and out of the function, which is complex, bug prone and slow. 651 register in and out of the function, which is complex, bug prone and slow.
638 Another reason is the use of atomic 64-bit counters. 652 Another reason is the use of atomic 64-bit counters.
@@ -646,14 +660,145 @@ Some core changes of the new internal format:
646- Introduces bpf_call insn and register passing convention for zero overhead 660- Introduces bpf_call insn and register passing convention for zero overhead
647 calls from/to other kernel functions: 661 calls from/to other kernel functions:
648 662
649 After a kernel function call, R1 - R5 are reset to unreadable and R0 has a 663 Before an in-kernel function call, the internal BPF program needs to
650 return type of the function. Since R6 - R9 are callee saved, their state is 664 place function arguments into R1 to R5 registers to satisfy calling
651 preserved across the call. 665 convention, then the interpreter will take them from registers and pass
652 666 to in-kernel function. If R1 - R5 registers are mapped to CPU registers
653Also in the new design, BPF is limited to 4096 insns, which means that any 667 that are used for argument passing on given architecture, the JIT compiler
668 doesn't need to emit extra moves. Function arguments will be in the correct
669 registers and BPF_CALL instruction will be JITed as single 'call' HW
670 instruction. This calling convention was picked to cover common call
671 situations without performance penalty.
672
673 After an in-kernel function call, R1 - R5 are reset to unreadable and R0 has
674 a return value of the function. Since R6 - R9 are callee saved, their state
675 is preserved across the call.
676
677 For example, consider three C functions:
678
679 u64 f1() { return (*_f2)(1); }
680 u64 f2(u64 a) { return f3(a + 1, a); }
681 u64 f3(u64 a, u64 b) { return a - b; }
682
683 GCC can compile f1, f3 into x86_64:
684
685 f1:
686 movl $1, %edi
687 movq _f2(%rip), %rax
688 jmp *%rax
689 f3:
690 movq %rdi, %rax
691 subq %rsi, %rax
692 ret
693
694 Function f2 in eBPF may look like:
695
696 f2:
697 bpf_mov R2, R1
698 bpf_add R1, 1
699 bpf_call f3
700 bpf_exit
701
702 If f2 is JITed and the pointer stored to '_f2'. The calls f1 -> f2 -> f3 and
703 returns will be seamless. Without JIT, __sk_run_filter() interpreter needs to
704 be used to call into f2.
705
706 For practical reasons all eBPF programs have only one argument 'ctx' which is
707 already placed into R1 (e.g. on __sk_run_filter() startup) and the programs
708 can call kernel functions with up to 5 arguments. Calls with 6 or more arguments
709 are currently not supported, but these restrictions can be lifted if necessary
710 in the future.
711
712 On 64-bit architectures all register map to HW registers one to one. For
713 example, x86_64 JIT compiler can map them as ...
714
715 R0 - rax
716 R1 - rdi
717 R2 - rsi
718 R3 - rdx
719 R4 - rcx
720 R5 - r8
721 R6 - rbx
722 R7 - r13
723 R8 - r14
724 R9 - r15
725 R10 - rbp
726
727 ... since x86_64 ABI mandates rdi, rsi, rdx, rcx, r8, r9 for argument passing
728 and rbx, r12 - r15 are callee saved.
729
730 Then the following internal BPF pseudo-program:
731
732 bpf_mov R6, R1 /* save ctx */
733 bpf_mov R2, 2
734 bpf_mov R3, 3
735 bpf_mov R4, 4
736 bpf_mov R5, 5
737 bpf_call foo
738 bpf_mov R7, R0 /* save foo() return value */
739 bpf_mov R1, R6 /* restore ctx for next call */
740 bpf_mov R2, 6
741 bpf_mov R3, 7
742 bpf_mov R4, 8
743 bpf_mov R5, 9
744 bpf_call bar
745 bpf_add R0, R7
746 bpf_exit
747
748 After JIT to x86_64 may look like:
749
750 push %rbp
751 mov %rsp,%rbp
752 sub $0x228,%rsp
753 mov %rbx,-0x228(%rbp)
754 mov %r13,-0x220(%rbp)
755 mov %rdi,%rbx
756 mov $0x2,%esi
757 mov $0x3,%edx
758 mov $0x4,%ecx
759 mov $0x5,%r8d
760 callq foo
761 mov %rax,%r13
762 mov %rbx,%rdi
763 mov $0x2,%esi
764 mov $0x3,%edx
765 mov $0x4,%ecx
766 mov $0x5,%r8d
767 callq bar
768 add %r13,%rax
769 mov -0x228(%rbp),%rbx
770 mov -0x220(%rbp),%r13
771 leaveq
772 retq
773
774 Which is in this example equivalent in C to:
775
776 u64 bpf_filter(u64 ctx)
777 {
778 return foo(ctx, 2, 3, 4, 5) + bar(ctx, 6, 7, 8, 9);
779 }
780
781 In-kernel functions foo() and bar() with prototype: u64 (*)(u64 arg1, u64
782 arg2, u64 arg3, u64 arg4, u64 arg5); will receive arguments in proper
783 registers and place their return value into '%rax' which is R0 in eBPF.
784 Prologue and epilogue are emitted by JIT and are implicit in the
785 interpreter. R0-R5 are scratch registers, so eBPF program needs to preserve
786 them across the calls as defined by calling convention.
787
788 For example the following program is invalid:
789
790 bpf_mov R1, 1
791 bpf_call foo
792 bpf_mov R0, R1
793 bpf_exit
794
795 After the call the registers R1-R5 contain junk values and cannot be read.
796 In the future an eBPF verifier can be used to validate internal BPF programs.
797
798Also in the new design, eBPF is limited to 4096 insns, which means that any
654program will terminate quickly and will only call a fixed number of kernel 799program will terminate quickly and will only call a fixed number of kernel
655functions. Original BPF and the new format are two operand instructions, 800functions. Original BPF and the new format are two operand instructions,
656which helps to do one-to-one mapping between BPF insn and x86 insn during JIT. 801which helps to do one-to-one mapping between eBPF insn and x86 insn during JIT.
657 802
658The input context pointer for invoking the interpreter function is generic, 803The input context pointer for invoking the interpreter function is generic,
659its content is defined by a specific use case. For seccomp register R1 points 804its content is defined by a specific use case. For seccomp register R1 points
@@ -661,7 +806,26 @@ to seccomp_data, for converted BPF filters R1 points to a skb.
661 806
662A program, that is translated internally consists of the following elements: 807A program, that is translated internally consists of the following elements:
663 808
664 op:16, jt:8, jf:8, k:32 ==> op:8, a_reg:4, x_reg:4, off:16, imm:32 809 op:16, jt:8, jf:8, k:32 ==> op:8, dst_reg:4, src_reg:4, off:16, imm:32
810
811So far 87 internal BPF instructions were implemented. 8-bit 'op' opcode field
812has room for new instructions. Some of them may use 16/24/32 byte encoding. New
813instructions must be multiple of 8 bytes to preserve backward compatibility.
814
815Internal BPF is a general purpose RISC instruction set. Not every register and
816every instruction are used during translation from original BPF to new format.
817For example, socket filters are not using 'exclusive add' instruction, but
818tracing filters may do to maintain counters of events, for example. Register R9
819is not used by socket filters either, but more complex filters may be running
820out of registers and would have to resort to spill/fill to stack.
821
822Internal BPF can used as generic assembler for last step performance
823optimizations, socket filters and seccomp are using it as assembler. Tracing
824filters may use it as assembler to generate code from kernel. In kernel usage
825may not be bounded by security considerations, since generated internal BPF code
826may be optimizing internal code path and not being exposed to the user space.
827Safety of internal BPF can come from a verifier (TBD). In such use cases as
828described, it may be used as safe instruction set.
665 829
666Just like the original BPF, the new format runs within a controlled environment, 830Just like the original BPF, the new format runs within a controlled environment,
667is deterministic and the kernel can easily prove that. The safety of the program 831is deterministic and the kernel can easily prove that. The safety of the program
@@ -670,6 +834,181 @@ loops and other CFG validation; second step starts from the first insn and
670descends all possible paths. It simulates execution of every insn and observes 834descends all possible paths. It simulates execution of every insn and observes
671the state change of registers and stack. 835the state change of registers and stack.
672 836
837eBPF opcode encoding
838--------------------
839
840eBPF is reusing most of the opcode encoding from classic to simplify conversion
841of classic BPF to eBPF. For arithmetic and jump instructions the 8-bit 'code'
842field is divided into three parts:
843
844 +----------------+--------+--------------------+
845 | 4 bits | 1 bit | 3 bits |
846 | operation code | source | instruction class |
847 +----------------+--------+--------------------+
848 (MSB) (LSB)
849
850Three LSB bits store instruction class which is one of:
851
852 Classic BPF classes: eBPF classes:
853
854 BPF_LD 0x00 BPF_LD 0x00
855 BPF_LDX 0x01 BPF_LDX 0x01
856 BPF_ST 0x02 BPF_ST 0x02
857 BPF_STX 0x03 BPF_STX 0x03
858 BPF_ALU 0x04 BPF_ALU 0x04
859 BPF_JMP 0x05 BPF_JMP 0x05
860 BPF_RET 0x06 [ class 6 unused, for future if needed ]
861 BPF_MISC 0x07 BPF_ALU64 0x07
862
863When BPF_CLASS(code) == BPF_ALU or BPF_JMP, 4th bit encodes source operand ...
864
865 BPF_K 0x00
866 BPF_X 0x08
867
868 * in classic BPF, this means:
869
870 BPF_SRC(code) == BPF_X - use register X as source operand
871 BPF_SRC(code) == BPF_K - use 32-bit immediate as source operand
872
873 * in eBPF, this means:
874
875 BPF_SRC(code) == BPF_X - use 'src_reg' register as source operand
876 BPF_SRC(code) == BPF_K - use 32-bit immediate as source operand
877
878... and four MSB bits store operation code.
879
880If BPF_CLASS(code) == BPF_ALU or BPF_ALU64 [ in eBPF ], BPF_OP(code) is one of:
881
882 BPF_ADD 0x00
883 BPF_SUB 0x10
884 BPF_MUL 0x20
885 BPF_DIV 0x30
886 BPF_OR 0x40
887 BPF_AND 0x50
888 BPF_LSH 0x60
889 BPF_RSH 0x70
890 BPF_NEG 0x80
891 BPF_MOD 0x90
892 BPF_XOR 0xa0
893 BPF_MOV 0xb0 /* eBPF only: mov reg to reg */
894 BPF_ARSH 0xc0 /* eBPF only: sign extending shift right */
895 BPF_END 0xd0 /* eBPF only: endianness conversion */
896
897If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of:
898
899 BPF_JA 0x00
900 BPF_JEQ 0x10
901 BPF_JGT 0x20
902 BPF_JGE 0x30
903 BPF_JSET 0x40
904 BPF_JNE 0x50 /* eBPF only: jump != */
905 BPF_JSGT 0x60 /* eBPF only: signed '>' */
906 BPF_JSGE 0x70 /* eBPF only: signed '>=' */
907 BPF_CALL 0x80 /* eBPF only: function call */
908 BPF_EXIT 0x90 /* eBPF only: function return */
909
910So BPF_ADD | BPF_X | BPF_ALU means 32-bit addition in both classic BPF
911and eBPF. There are only two registers in classic BPF, so it means A += X.
912In eBPF it means dst_reg = (u32) dst_reg + (u32) src_reg; similarly,
913BPF_XOR | BPF_K | BPF_ALU means A ^= imm32 in classic BPF and analogous
914src_reg = (u32) src_reg ^ (u32) imm32 in eBPF.
915
916Classic BPF is using BPF_MISC class to represent A = X and X = A moves.
917eBPF is using BPF_MOV | BPF_X | BPF_ALU code instead. Since there are no
918BPF_MISC operations in eBPF, the class 7 is used as BPF_ALU64 to mean
919exactly the same operations as BPF_ALU, but with 64-bit wide operands
920instead. So BPF_ADD | BPF_X | BPF_ALU64 means 64-bit addition, i.e.:
921dst_reg = dst_reg + src_reg
922
923Classic BPF wastes the whole BPF_RET class to represent a single 'ret'
924operation. Classic BPF_RET | BPF_K means copy imm32 into return register
925and perform function exit. eBPF is modeled to match CPU, so BPF_JMP | BPF_EXIT
926in eBPF means function exit only. The eBPF program needs to store return
927value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is currently
928unused and reserved for future use.
929
930For load and store instructions the 8-bit 'code' field is divided as:
931
932 +--------+--------+-------------------+
933 | 3 bits | 2 bits | 3 bits |
934 | mode | size | instruction class |
935 +--------+--------+-------------------+
936 (MSB) (LSB)
937
938Size modifier is one of ...
939
940 BPF_W 0x00 /* word */
941 BPF_H 0x08 /* half word */
942 BPF_B 0x10 /* byte */
943 BPF_DW 0x18 /* eBPF only, double word */
944
945... which encodes size of load/store operation:
946
947 B - 1 byte
948 H - 2 byte
949 W - 4 byte
950 DW - 8 byte (eBPF only)
951
952Mode modifier is one of:
953
954 BPF_IMM 0x00 /* classic BPF only, reserved in eBPF */
955 BPF_ABS 0x20
956 BPF_IND 0x40
957 BPF_MEM 0x60
958 BPF_LEN 0x80 /* classic BPF only, reserved in eBPF */
959 BPF_MSH 0xa0 /* classic BPF only, reserved in eBPF */
960 BPF_XADD 0xc0 /* eBPF only, exclusive add */
961
962eBPF has two non-generic instructions: (BPF_ABS | <size> | BPF_LD) and
963(BPF_IND | <size> | BPF_LD) which are used to access packet data.
964
965They had to be carried over from classic to have strong performance of
966socket filters running in eBPF interpreter. These instructions can only
967be used when interpreter context is a pointer to 'struct sk_buff' and
968have seven implicit operands. Register R6 is an implicit input that must
969contain pointer to sk_buff. Register R0 is an implicit output which contains
970the data fetched from the packet. Registers R1-R5 are scratch registers
971and must not be used to store the data across BPF_ABS | BPF_LD or
972BPF_IND | BPF_LD instructions.
973
974These instructions have implicit program exit condition as well. When
975eBPF program is trying to access the data beyond the packet boundary,
976the interpreter will abort the execution of the program. JIT compilers
977therefore must preserve this property. src_reg and imm32 fields are
978explicit inputs to these instructions.
979
980For example:
981
982 BPF_IND | BPF_W | BPF_LD means:
983
984 R0 = ntohl(*(u32 *) (((struct sk_buff *) R6)->data + src_reg + imm32))
985 and R1 - R5 were scratched.
986
987Unlike classic BPF instruction set, eBPF has generic load/store operations:
988
989BPF_MEM | <size> | BPF_STX: *(size *) (dst_reg + off) = src_reg
990BPF_MEM | <size> | BPF_ST: *(size *) (dst_reg + off) = imm32
991BPF_MEM | <size> | BPF_LDX: dst_reg = *(size *) (src_reg + off)
992BPF_XADD | BPF_W | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg
993BPF_XADD | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg
994
995Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. Note that 1 and
9962 byte atomic increments are not supported.
997
998Testing
999-------
1000
1001Next to the BPF toolchain, the kernel also ships a test module that contains
1002various test cases for classic and internal BPF that can be executed against
1003the BPF interpreter and JIT compiler. It can be found in lib/test_bpf.c and
1004enabled via Kconfig:
1005
1006 CONFIG_TEST_BPF=m
1007
1008After the module has been built and installed, the test suite can be executed
1009via insmod or modprobe against 'test_bpf' module. Results of the test cases
1010including timings in nsec can be found in the kernel log (dmesg).
1011
673Misc 1012Misc
674---- 1013----
675 1014
diff --git a/MAINTAINERS b/MAINTAINERS
index c2297fab77c8..055f95238d88 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -604,6 +604,13 @@ L: amd64-microcode@amd64.org
604S: Maintained 604S: Maintained
605F: arch/x86/kernel/microcode_amd.c 605F: arch/x86/kernel/microcode_amd.c
606 606
607AMD XGBE DRIVER
608M: Tom Lendacky <thomas.lendacky@amd.com>
609L: netdev@vger.kernel.org
610S: Supported
611F: drivers/net/ethernet/amd/xgbe/
612F: drivers/net/phy/amd-xgbe-phy.c
613
607AMS (Apple Motion Sensor) DRIVER 614AMS (Apple Motion Sensor) DRIVER
608M: Michael Hanselmann <linux-kernel@hansmi.ch> 615M: Michael Hanselmann <linux-kernel@hansmi.ch>
609S: Supported 616S: Supported
@@ -1894,7 +1901,7 @@ F: drivers/net/ethernet/broadcom/bnx2.*
1894F: drivers/net/ethernet/broadcom/bnx2_* 1901F: drivers/net/ethernet/broadcom/bnx2_*
1895 1902
1896BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER 1903BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
1897M: Ariel Elior <ariele@broadcom.com> 1904M: Ariel Elior <ariel.elior@qlogic.com>
1898L: netdev@vger.kernel.org 1905L: netdev@vger.kernel.org
1899S: Supported 1906S: Supported
1900F: drivers/net/ethernet/broadcom/bnx2x/ 1907F: drivers/net/ethernet/broadcom/bnx2x/
@@ -1974,6 +1981,12 @@ S: Maintained
1974F: drivers/bcma/ 1981F: drivers/bcma/
1975F: include/linux/bcma/ 1982F: include/linux/bcma/
1976 1983
1984BROADCOM SYSTEMPORT ETHERNET DRIVER
1985M: Florian Fainelli <f.fainelli@gmail.com>
1986L: netdev@vger.kernel.org
1987S: Supported
1988F: drivers/net/ethernet/broadcom/bcmsysport.*
1989
1977BROCADE BFA FC SCSI DRIVER 1990BROCADE BFA FC SCSI DRIVER
1978M: Anil Gurumurthy <anil.gurumurthy@qlogic.com> 1991M: Anil Gurumurthy <anil.gurumurthy@qlogic.com>
1979M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com> 1992M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
@@ -2230,9 +2243,8 @@ F: drivers/platform/chrome/
2230CISCO VIC ETHERNET NIC DRIVER 2243CISCO VIC ETHERNET NIC DRIVER
2231M: Christian Benvenuti <benve@cisco.com> 2244M: Christian Benvenuti <benve@cisco.com>
2232M: Sujith Sankar <ssujith@cisco.com> 2245M: Sujith Sankar <ssujith@cisco.com>
2233M: Govindarajulu Varadarajan <govindarajulu90@gmail.com> 2246M: Govindarajulu Varadarajan <_govind@gmx.com>
2234M: Neel Patel <neepatel@cisco.com> 2247M: Neel Patel <neepatel@cisco.com>
2235M: Nishank Trivedi <nistrive@cisco.com>
2236S: Supported 2248S: Supported
2237F: drivers/net/ethernet/cisco/enic/ 2249F: drivers/net/ethernet/cisco/enic/
2238 2250
@@ -6168,6 +6180,7 @@ F: include/uapi/linux/netdevice.h
6168F: tools/net/ 6180F: tools/net/
6169F: tools/testing/selftests/net/ 6181F: tools/testing/selftests/net/
6170F: lib/random32.c 6182F: lib/random32.c
6183F: lib/test_bpf.c
6171 6184
6172NETWORKING [IPv4/IPv6] 6185NETWORKING [IPv4/IPv6]
6173M: "David S. Miller" <davem@davemloft.net> 6186M: "David S. Miller" <davem@davemloft.net>
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 9f53e824b037..4a4e02d0ce9e 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -662,6 +662,8 @@
662 mac: ethernet@4a100000 { 662 mac: ethernet@4a100000 {
663 compatible = "ti,cpsw"; 663 compatible = "ti,cpsw";
664 ti,hwmods = "cpgmac0"; 664 ti,hwmods = "cpgmac0";
665 clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
666 clock-names = "fck", "cpts";
665 cpdma_channels = <8>; 667 cpdma_channels = <8>;
666 ale_entries = <1024>; 668 ale_entries = <1024>;
667 bd_ram_size = <0x2000>; 669 bd_ram_size = <0x2000>;
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index db464d7eaca8..49fa59622254 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -490,6 +490,8 @@
490 #address-cells = <1>; 490 #address-cells = <1>;
491 #size-cells = <1>; 491 #size-cells = <1>;
492 ti,hwmods = "cpgmac0"; 492 ti,hwmods = "cpgmac0";
493 clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
494 clock-names = "fck", "cpts";
493 status = "disabled"; 495 status = "disabled";
494 cpdma_channels = <8>; 496 cpdma_channels = <8>;
495 ale_entries = <1024>; 497 ale_entries = <1024>;
diff --git a/arch/arm/boot/dts/armada-xp-matrix.dts b/arch/arm/boot/dts/armada-xp-matrix.dts
index 25674fe81f70..7e291e2ef4b3 100644
--- a/arch/arm/boot/dts/armada-xp-matrix.dts
+++ b/arch/arm/boot/dts/armada-xp-matrix.dts
@@ -57,6 +57,10 @@
57 ethernet@30000 { 57 ethernet@30000 {
58 status = "okay"; 58 status = "okay";
59 phy-mode = "sgmii"; 59 phy-mode = "sgmii";
60 fixed-link {
61 speed = <1000>;
62 full-duplex;
63 };
60 }; 64 };
61 65
62 pcie-controller { 66 pcie-controller {
diff --git a/arch/arm/boot/dts/vt8500.dtsi b/arch/arm/boot/dts/vt8500.dtsi
index 51d0e912c8f5..1929ad390d88 100644
--- a/arch/arm/boot/dts/vt8500.dtsi
+++ b/arch/arm/boot/dts/vt8500.dtsi
@@ -165,5 +165,11 @@
165 reg = <0xd8100000 0x10000>; 165 reg = <0xd8100000 0x10000>;
166 interrupts = <48>; 166 interrupts = <48>;
167 }; 167 };
168
169 ethernet@d8004000 {
170 compatible = "via,vt8500-rhine";
171 reg = <0xd8004000 0x100>;
172 interrupts = <10>;
173 };
168 }; 174 };
169}; 175};
diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi
index 7525982262ac..b1c59a766a13 100644
--- a/arch/arm/boot/dts/wm8650.dtsi
+++ b/arch/arm/boot/dts/wm8650.dtsi
@@ -218,5 +218,11 @@
218 reg = <0xd8100000 0x10000>; 218 reg = <0xd8100000 0x10000>;
219 interrupts = <48>; 219 interrupts = <48>;
220 }; 220 };
221
222 ethernet@d8004000 {
223 compatible = "via,vt8500-rhine";
224 reg = <0xd8004000 0x100>;
225 interrupts = <10>;
226 };
221 }; 227 };
222}; 228};
diff --git a/arch/arm/boot/dts/wm8850.dtsi b/arch/arm/boot/dts/wm8850.dtsi
index d98386dd2882..8fbccfbe75f3 100644
--- a/arch/arm/boot/dts/wm8850.dtsi
+++ b/arch/arm/boot/dts/wm8850.dtsi
@@ -298,5 +298,11 @@
298 bus-width = <4>; 298 bus-width = <4>;
299 sdon-inverted; 299 sdon-inverted;
300 }; 300 };
301
302 ethernet@d8004000 {
303 compatible = "via,vt8500-rhine";
304 reg = <0xd8004000 0x100>;
305 interrupts = <10>;
306 };
301 }; 307 };
302}; 308};
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index e4dec9fcb084..9c6029ba526f 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -23,9 +23,7 @@
23#include "board.h" 23#include "board.h"
24 24
25static struct rfkill_gpio_platform_data wifi_rfkill_platform_data = { 25static struct rfkill_gpio_platform_data wifi_rfkill_platform_data = {
26 .name = "wifi_rfkill", 26 .name = "wifi_rfkill",
27 .reset_gpio = 25, /* PD1 */
28 .shutdown_gpio = 85, /* PK5 */
29 .type = RFKILL_TYPE_WLAN, 27 .type = RFKILL_TYPE_WLAN,
30}; 28};
31 29
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 6f879c319a9d..fb5503ce016f 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -136,7 +136,7 @@ static u16 saved_regs(struct jit_ctx *ctx)
136 u16 ret = 0; 136 u16 ret = 0;
137 137
138 if ((ctx->skf->len > 1) || 138 if ((ctx->skf->len > 1) ||
139 (ctx->skf->insns[0].code == BPF_S_RET_A)) 139 (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
140 ret |= 1 << r_A; 140 ret |= 1 << r_A;
141 141
142#ifdef CONFIG_FRAME_POINTER 142#ifdef CONFIG_FRAME_POINTER
@@ -164,18 +164,10 @@ static inline int mem_words_used(struct jit_ctx *ctx)
164static inline bool is_load_to_a(u16 inst) 164static inline bool is_load_to_a(u16 inst)
165{ 165{
166 switch (inst) { 166 switch (inst) {
167 case BPF_S_LD_W_LEN: 167 case BPF_LD | BPF_W | BPF_LEN:
168 case BPF_S_LD_W_ABS: 168 case BPF_LD | BPF_W | BPF_ABS:
169 case BPF_S_LD_H_ABS: 169 case BPF_LD | BPF_H | BPF_ABS:
170 case BPF_S_LD_B_ABS: 170 case BPF_LD | BPF_B | BPF_ABS:
171 case BPF_S_ANC_CPU:
172 case BPF_S_ANC_IFINDEX:
173 case BPF_S_ANC_MARK:
174 case BPF_S_ANC_PROTOCOL:
175 case BPF_S_ANC_RXHASH:
176 case BPF_S_ANC_VLAN_TAG:
177 case BPF_S_ANC_VLAN_TAG_PRESENT:
178 case BPF_S_ANC_QUEUE:
179 return true; 171 return true;
180 default: 172 default:
181 return false; 173 return false;
@@ -215,7 +207,7 @@ static void build_prologue(struct jit_ctx *ctx)
215 emit(ARM_MOV_I(r_X, 0), ctx); 207 emit(ARM_MOV_I(r_X, 0), ctx);
216 208
217 /* do not leak kernel data to userspace */ 209 /* do not leak kernel data to userspace */
218 if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst))) 210 if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
219 emit(ARM_MOV_I(r_A, 0), ctx); 211 emit(ARM_MOV_I(r_A, 0), ctx);
220 212
221 /* stack space for the BPF_MEM words */ 213 /* stack space for the BPF_MEM words */
@@ -480,36 +472,39 @@ static int build_body(struct jit_ctx *ctx)
480 u32 k; 472 u32 k;
481 473
482 for (i = 0; i < prog->len; i++) { 474 for (i = 0; i < prog->len; i++) {
475 u16 code;
476
483 inst = &(prog->insns[i]); 477 inst = &(prog->insns[i]);
484 /* K as an immediate value operand */ 478 /* K as an immediate value operand */
485 k = inst->k; 479 k = inst->k;
480 code = bpf_anc_helper(inst);
486 481
487 /* compute offsets only in the fake pass */ 482 /* compute offsets only in the fake pass */
488 if (ctx->target == NULL) 483 if (ctx->target == NULL)
489 ctx->offsets[i] = ctx->idx * 4; 484 ctx->offsets[i] = ctx->idx * 4;
490 485
491 switch (inst->code) { 486 switch (code) {
492 case BPF_S_LD_IMM: 487 case BPF_LD | BPF_IMM:
493 emit_mov_i(r_A, k, ctx); 488 emit_mov_i(r_A, k, ctx);
494 break; 489 break;
495 case BPF_S_LD_W_LEN: 490 case BPF_LD | BPF_W | BPF_LEN:
496 ctx->seen |= SEEN_SKB; 491 ctx->seen |= SEEN_SKB;
497 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 492 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
498 emit(ARM_LDR_I(r_A, r_skb, 493 emit(ARM_LDR_I(r_A, r_skb,
499 offsetof(struct sk_buff, len)), ctx); 494 offsetof(struct sk_buff, len)), ctx);
500 break; 495 break;
501 case BPF_S_LD_MEM: 496 case BPF_LD | BPF_MEM:
502 /* A = scratch[k] */ 497 /* A = scratch[k] */
503 ctx->seen |= SEEN_MEM_WORD(k); 498 ctx->seen |= SEEN_MEM_WORD(k);
504 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); 499 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
505 break; 500 break;
506 case BPF_S_LD_W_ABS: 501 case BPF_LD | BPF_W | BPF_ABS:
507 load_order = 2; 502 load_order = 2;
508 goto load; 503 goto load;
509 case BPF_S_LD_H_ABS: 504 case BPF_LD | BPF_H | BPF_ABS:
510 load_order = 1; 505 load_order = 1;
511 goto load; 506 goto load;
512 case BPF_S_LD_B_ABS: 507 case BPF_LD | BPF_B | BPF_ABS:
513 load_order = 0; 508 load_order = 0;
514load: 509load:
515 /* the interpreter will deal with the negative K */ 510 /* the interpreter will deal with the negative K */
@@ -552,31 +547,31 @@ load_common:
552 emit_err_ret(ARM_COND_NE, ctx); 547 emit_err_ret(ARM_COND_NE, ctx);
553 emit(ARM_MOV_R(r_A, ARM_R0), ctx); 548 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
554 break; 549 break;
555 case BPF_S_LD_W_IND: 550 case BPF_LD | BPF_W | BPF_IND:
556 load_order = 2; 551 load_order = 2;
557 goto load_ind; 552 goto load_ind;
558 case BPF_S_LD_H_IND: 553 case BPF_LD | BPF_H | BPF_IND:
559 load_order = 1; 554 load_order = 1;
560 goto load_ind; 555 goto load_ind;
561 case BPF_S_LD_B_IND: 556 case BPF_LD | BPF_B | BPF_IND:
562 load_order = 0; 557 load_order = 0;
563load_ind: 558load_ind:
564 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); 559 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
565 goto load_common; 560 goto load_common;
566 case BPF_S_LDX_IMM: 561 case BPF_LDX | BPF_IMM:
567 ctx->seen |= SEEN_X; 562 ctx->seen |= SEEN_X;
568 emit_mov_i(r_X, k, ctx); 563 emit_mov_i(r_X, k, ctx);
569 break; 564 break;
570 case BPF_S_LDX_W_LEN: 565 case BPF_LDX | BPF_W | BPF_LEN:
571 ctx->seen |= SEEN_X | SEEN_SKB; 566 ctx->seen |= SEEN_X | SEEN_SKB;
572 emit(ARM_LDR_I(r_X, r_skb, 567 emit(ARM_LDR_I(r_X, r_skb,
573 offsetof(struct sk_buff, len)), ctx); 568 offsetof(struct sk_buff, len)), ctx);
574 break; 569 break;
575 case BPF_S_LDX_MEM: 570 case BPF_LDX | BPF_MEM:
576 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); 571 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
577 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); 572 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
578 break; 573 break;
579 case BPF_S_LDX_B_MSH: 574 case BPF_LDX | BPF_B | BPF_MSH:
580 /* x = ((*(frame + k)) & 0xf) << 2; */ 575 /* x = ((*(frame + k)) & 0xf) << 2; */
581 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; 576 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
582 /* the interpreter should deal with the negative K */ 577 /* the interpreter should deal with the negative K */
@@ -606,113 +601,113 @@ load_ind:
606 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); 601 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
607 emit(ARM_LSL_I(r_X, r_X, 2), ctx); 602 emit(ARM_LSL_I(r_X, r_X, 2), ctx);
608 break; 603 break;
609 case BPF_S_ST: 604 case BPF_ST:
610 ctx->seen |= SEEN_MEM_WORD(k); 605 ctx->seen |= SEEN_MEM_WORD(k);
611 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); 606 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
612 break; 607 break;
613 case BPF_S_STX: 608 case BPF_STX:
614 update_on_xread(ctx); 609 update_on_xread(ctx);
615 ctx->seen |= SEEN_MEM_WORD(k); 610 ctx->seen |= SEEN_MEM_WORD(k);
616 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); 611 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
617 break; 612 break;
618 case BPF_S_ALU_ADD_K: 613 case BPF_ALU | BPF_ADD | BPF_K:
619 /* A += K */ 614 /* A += K */
620 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); 615 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
621 break; 616 break;
622 case BPF_S_ALU_ADD_X: 617 case BPF_ALU | BPF_ADD | BPF_X:
623 update_on_xread(ctx); 618 update_on_xread(ctx);
624 emit(ARM_ADD_R(r_A, r_A, r_X), ctx); 619 emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
625 break; 620 break;
626 case BPF_S_ALU_SUB_K: 621 case BPF_ALU | BPF_SUB | BPF_K:
627 /* A -= K */ 622 /* A -= K */
628 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); 623 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
629 break; 624 break;
630 case BPF_S_ALU_SUB_X: 625 case BPF_ALU | BPF_SUB | BPF_X:
631 update_on_xread(ctx); 626 update_on_xread(ctx);
632 emit(ARM_SUB_R(r_A, r_A, r_X), ctx); 627 emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
633 break; 628 break;
634 case BPF_S_ALU_MUL_K: 629 case BPF_ALU | BPF_MUL | BPF_K:
635 /* A *= K */ 630 /* A *= K */
636 emit_mov_i(r_scratch, k, ctx); 631 emit_mov_i(r_scratch, k, ctx);
637 emit(ARM_MUL(r_A, r_A, r_scratch), ctx); 632 emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
638 break; 633 break;
639 case BPF_S_ALU_MUL_X: 634 case BPF_ALU | BPF_MUL | BPF_X:
640 update_on_xread(ctx); 635 update_on_xread(ctx);
641 emit(ARM_MUL(r_A, r_A, r_X), ctx); 636 emit(ARM_MUL(r_A, r_A, r_X), ctx);
642 break; 637 break;
643 case BPF_S_ALU_DIV_K: 638 case BPF_ALU | BPF_DIV | BPF_K:
644 if (k == 1) 639 if (k == 1)
645 break; 640 break;
646 emit_mov_i(r_scratch, k, ctx); 641 emit_mov_i(r_scratch, k, ctx);
647 emit_udiv(r_A, r_A, r_scratch, ctx); 642 emit_udiv(r_A, r_A, r_scratch, ctx);
648 break; 643 break;
649 case BPF_S_ALU_DIV_X: 644 case BPF_ALU | BPF_DIV | BPF_X:
650 update_on_xread(ctx); 645 update_on_xread(ctx);
651 emit(ARM_CMP_I(r_X, 0), ctx); 646 emit(ARM_CMP_I(r_X, 0), ctx);
652 emit_err_ret(ARM_COND_EQ, ctx); 647 emit_err_ret(ARM_COND_EQ, ctx);
653 emit_udiv(r_A, r_A, r_X, ctx); 648 emit_udiv(r_A, r_A, r_X, ctx);
654 break; 649 break;
655 case BPF_S_ALU_OR_K: 650 case BPF_ALU | BPF_OR | BPF_K:
656 /* A |= K */ 651 /* A |= K */
657 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); 652 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
658 break; 653 break;
659 case BPF_S_ALU_OR_X: 654 case BPF_ALU | BPF_OR | BPF_X:
660 update_on_xread(ctx); 655 update_on_xread(ctx);
661 emit(ARM_ORR_R(r_A, r_A, r_X), ctx); 656 emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
662 break; 657 break;
663 case BPF_S_ALU_XOR_K: 658 case BPF_ALU | BPF_XOR | BPF_K:
664 /* A ^= K; */ 659 /* A ^= K; */
665 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx); 660 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
666 break; 661 break;
667 case BPF_S_ANC_ALU_XOR_X: 662 case BPF_ANC | SKF_AD_ALU_XOR_X:
668 case BPF_S_ALU_XOR_X: 663 case BPF_ALU | BPF_XOR | BPF_X:
669 /* A ^= X */ 664 /* A ^= X */
670 update_on_xread(ctx); 665 update_on_xread(ctx);
671 emit(ARM_EOR_R(r_A, r_A, r_X), ctx); 666 emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
672 break; 667 break;
673 case BPF_S_ALU_AND_K: 668 case BPF_ALU | BPF_AND | BPF_K:
674 /* A &= K */ 669 /* A &= K */
675 OP_IMM3(ARM_AND, r_A, r_A, k, ctx); 670 OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
676 break; 671 break;
677 case BPF_S_ALU_AND_X: 672 case BPF_ALU | BPF_AND | BPF_X:
678 update_on_xread(ctx); 673 update_on_xread(ctx);
679 emit(ARM_AND_R(r_A, r_A, r_X), ctx); 674 emit(ARM_AND_R(r_A, r_A, r_X), ctx);
680 break; 675 break;
681 case BPF_S_ALU_LSH_K: 676 case BPF_ALU | BPF_LSH | BPF_K:
682 if (unlikely(k > 31)) 677 if (unlikely(k > 31))
683 return -1; 678 return -1;
684 emit(ARM_LSL_I(r_A, r_A, k), ctx); 679 emit(ARM_LSL_I(r_A, r_A, k), ctx);
685 break; 680 break;
686 case BPF_S_ALU_LSH_X: 681 case BPF_ALU | BPF_LSH | BPF_X:
687 update_on_xread(ctx); 682 update_on_xread(ctx);
688 emit(ARM_LSL_R(r_A, r_A, r_X), ctx); 683 emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
689 break; 684 break;
690 case BPF_S_ALU_RSH_K: 685 case BPF_ALU | BPF_RSH | BPF_K:
691 if (unlikely(k > 31)) 686 if (unlikely(k > 31))
692 return -1; 687 return -1;
693 emit(ARM_LSR_I(r_A, r_A, k), ctx); 688 emit(ARM_LSR_I(r_A, r_A, k), ctx);
694 break; 689 break;
695 case BPF_S_ALU_RSH_X: 690 case BPF_ALU | BPF_RSH | BPF_X:
696 update_on_xread(ctx); 691 update_on_xread(ctx);
697 emit(ARM_LSR_R(r_A, r_A, r_X), ctx); 692 emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
698 break; 693 break;
699 case BPF_S_ALU_NEG: 694 case BPF_ALU | BPF_NEG:
700 /* A = -A */ 695 /* A = -A */
701 emit(ARM_RSB_I(r_A, r_A, 0), ctx); 696 emit(ARM_RSB_I(r_A, r_A, 0), ctx);
702 break; 697 break;
703 case BPF_S_JMP_JA: 698 case BPF_JMP | BPF_JA:
704 /* pc += K */ 699 /* pc += K */
705 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); 700 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
706 break; 701 break;
707 case BPF_S_JMP_JEQ_K: 702 case BPF_JMP | BPF_JEQ | BPF_K:
708 /* pc += (A == K) ? pc->jt : pc->jf */ 703 /* pc += (A == K) ? pc->jt : pc->jf */
709 condt = ARM_COND_EQ; 704 condt = ARM_COND_EQ;
710 goto cmp_imm; 705 goto cmp_imm;
711 case BPF_S_JMP_JGT_K: 706 case BPF_JMP | BPF_JGT | BPF_K:
712 /* pc += (A > K) ? pc->jt : pc->jf */ 707 /* pc += (A > K) ? pc->jt : pc->jf */
713 condt = ARM_COND_HI; 708 condt = ARM_COND_HI;
714 goto cmp_imm; 709 goto cmp_imm;
715 case BPF_S_JMP_JGE_K: 710 case BPF_JMP | BPF_JGE | BPF_K:
716 /* pc += (A >= K) ? pc->jt : pc->jf */ 711 /* pc += (A >= K) ? pc->jt : pc->jf */
717 condt = ARM_COND_HS; 712 condt = ARM_COND_HS;
718cmp_imm: 713cmp_imm:
@@ -731,22 +726,22 @@ cond_jump:
731 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, 726 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
732 ctx)), ctx); 727 ctx)), ctx);
733 break; 728 break;
734 case BPF_S_JMP_JEQ_X: 729 case BPF_JMP | BPF_JEQ | BPF_X:
735 /* pc += (A == X) ? pc->jt : pc->jf */ 730 /* pc += (A == X) ? pc->jt : pc->jf */
736 condt = ARM_COND_EQ; 731 condt = ARM_COND_EQ;
737 goto cmp_x; 732 goto cmp_x;
738 case BPF_S_JMP_JGT_X: 733 case BPF_JMP | BPF_JGT | BPF_X:
739 /* pc += (A > X) ? pc->jt : pc->jf */ 734 /* pc += (A > X) ? pc->jt : pc->jf */
740 condt = ARM_COND_HI; 735 condt = ARM_COND_HI;
741 goto cmp_x; 736 goto cmp_x;
742 case BPF_S_JMP_JGE_X: 737 case BPF_JMP | BPF_JGE | BPF_X:
743 /* pc += (A >= X) ? pc->jt : pc->jf */ 738 /* pc += (A >= X) ? pc->jt : pc->jf */
744 condt = ARM_COND_CS; 739 condt = ARM_COND_CS;
745cmp_x: 740cmp_x:
746 update_on_xread(ctx); 741 update_on_xread(ctx);
747 emit(ARM_CMP_R(r_A, r_X), ctx); 742 emit(ARM_CMP_R(r_A, r_X), ctx);
748 goto cond_jump; 743 goto cond_jump;
749 case BPF_S_JMP_JSET_K: 744 case BPF_JMP | BPF_JSET | BPF_K:
750 /* pc += (A & K) ? pc->jt : pc->jf */ 745 /* pc += (A & K) ? pc->jt : pc->jf */
751 condt = ARM_COND_NE; 746 condt = ARM_COND_NE;
752 /* not set iff all zeroes iff Z==1 iff EQ */ 747 /* not set iff all zeroes iff Z==1 iff EQ */
@@ -759,16 +754,16 @@ cmp_x:
759 emit(ARM_TST_I(r_A, imm12), ctx); 754 emit(ARM_TST_I(r_A, imm12), ctx);
760 } 755 }
761 goto cond_jump; 756 goto cond_jump;
762 case BPF_S_JMP_JSET_X: 757 case BPF_JMP | BPF_JSET | BPF_X:
763 /* pc += (A & X) ? pc->jt : pc->jf */ 758 /* pc += (A & X) ? pc->jt : pc->jf */
764 update_on_xread(ctx); 759 update_on_xread(ctx);
765 condt = ARM_COND_NE; 760 condt = ARM_COND_NE;
766 emit(ARM_TST_R(r_A, r_X), ctx); 761 emit(ARM_TST_R(r_A, r_X), ctx);
767 goto cond_jump; 762 goto cond_jump;
768 case BPF_S_RET_A: 763 case BPF_RET | BPF_A:
769 emit(ARM_MOV_R(ARM_R0, r_A), ctx); 764 emit(ARM_MOV_R(ARM_R0, r_A), ctx);
770 goto b_epilogue; 765 goto b_epilogue;
771 case BPF_S_RET_K: 766 case BPF_RET | BPF_K:
772 if ((k == 0) && (ctx->ret0_fp_idx < 0)) 767 if ((k == 0) && (ctx->ret0_fp_idx < 0))
773 ctx->ret0_fp_idx = i; 768 ctx->ret0_fp_idx = i;
774 emit_mov_i(ARM_R0, k, ctx); 769 emit_mov_i(ARM_R0, k, ctx);
@@ -776,17 +771,17 @@ b_epilogue:
776 if (i != ctx->skf->len - 1) 771 if (i != ctx->skf->len - 1)
777 emit(ARM_B(b_imm(prog->len, ctx)), ctx); 772 emit(ARM_B(b_imm(prog->len, ctx)), ctx);
778 break; 773 break;
779 case BPF_S_MISC_TAX: 774 case BPF_MISC | BPF_TAX:
780 /* X = A */ 775 /* X = A */
781 ctx->seen |= SEEN_X; 776 ctx->seen |= SEEN_X;
782 emit(ARM_MOV_R(r_X, r_A), ctx); 777 emit(ARM_MOV_R(r_X, r_A), ctx);
783 break; 778 break;
784 case BPF_S_MISC_TXA: 779 case BPF_MISC | BPF_TXA:
785 /* A = X */ 780 /* A = X */
786 update_on_xread(ctx); 781 update_on_xread(ctx);
787 emit(ARM_MOV_R(r_A, r_X), ctx); 782 emit(ARM_MOV_R(r_A, r_X), ctx);
788 break; 783 break;
789 case BPF_S_ANC_PROTOCOL: 784 case BPF_ANC | SKF_AD_PROTOCOL:
790 /* A = ntohs(skb->protocol) */ 785 /* A = ntohs(skb->protocol) */
791 ctx->seen |= SEEN_SKB; 786 ctx->seen |= SEEN_SKB;
792 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 787 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -795,7 +790,7 @@ b_epilogue:
795 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); 790 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
796 emit_swap16(r_A, r_scratch, ctx); 791 emit_swap16(r_A, r_scratch, ctx);
797 break; 792 break;
798 case BPF_S_ANC_CPU: 793 case BPF_ANC | SKF_AD_CPU:
799 /* r_scratch = current_thread_info() */ 794 /* r_scratch = current_thread_info() */
800 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); 795 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
801 /* A = current_thread_info()->cpu */ 796 /* A = current_thread_info()->cpu */
@@ -803,7 +798,7 @@ b_epilogue:
803 off = offsetof(struct thread_info, cpu); 798 off = offsetof(struct thread_info, cpu);
804 emit(ARM_LDR_I(r_A, r_scratch, off), ctx); 799 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
805 break; 800 break;
806 case BPF_S_ANC_IFINDEX: 801 case BPF_ANC | SKF_AD_IFINDEX:
807 /* A = skb->dev->ifindex */ 802 /* A = skb->dev->ifindex */
808 ctx->seen |= SEEN_SKB; 803 ctx->seen |= SEEN_SKB;
809 off = offsetof(struct sk_buff, dev); 804 off = offsetof(struct sk_buff, dev);
@@ -817,30 +812,30 @@ b_epilogue:
817 off = offsetof(struct net_device, ifindex); 812 off = offsetof(struct net_device, ifindex);
818 emit(ARM_LDR_I(r_A, r_scratch, off), ctx); 813 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
819 break; 814 break;
820 case BPF_S_ANC_MARK: 815 case BPF_ANC | SKF_AD_MARK:
821 ctx->seen |= SEEN_SKB; 816 ctx->seen |= SEEN_SKB;
822 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 817 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
823 off = offsetof(struct sk_buff, mark); 818 off = offsetof(struct sk_buff, mark);
824 emit(ARM_LDR_I(r_A, r_skb, off), ctx); 819 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
825 break; 820 break;
826 case BPF_S_ANC_RXHASH: 821 case BPF_ANC | SKF_AD_RXHASH:
827 ctx->seen |= SEEN_SKB; 822 ctx->seen |= SEEN_SKB;
828 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 823 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
829 off = offsetof(struct sk_buff, hash); 824 off = offsetof(struct sk_buff, hash);
830 emit(ARM_LDR_I(r_A, r_skb, off), ctx); 825 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
831 break; 826 break;
832 case BPF_S_ANC_VLAN_TAG: 827 case BPF_ANC | SKF_AD_VLAN_TAG:
833 case BPF_S_ANC_VLAN_TAG_PRESENT: 828 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
834 ctx->seen |= SEEN_SKB; 829 ctx->seen |= SEEN_SKB;
835 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 830 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
836 off = offsetof(struct sk_buff, vlan_tci); 831 off = offsetof(struct sk_buff, vlan_tci);
837 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 832 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
838 if (inst->code == BPF_S_ANC_VLAN_TAG) 833 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
839 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); 834 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
840 else 835 else
841 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); 836 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
842 break; 837 break;
843 case BPF_S_ANC_QUEUE: 838 case BPF_ANC | SKF_AD_QUEUE:
844 ctx->seen |= SEEN_SKB; 839 ctx->seen |= SEEN_SKB;
845 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 840 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
846 queue_mapping) != 2); 841 queue_mapping) != 2);
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
index a8b5408dd349..da4cdb16844e 100644
--- a/arch/mips/bcm47xx/sprom.c
+++ b/arch/mips/bcm47xx/sprom.c
@@ -168,6 +168,7 @@ static void nvram_read_alpha2(const char *prefix, const char *name,
168static void bcm47xx_fill_sprom_r1234589(struct ssb_sprom *sprom, 168static void bcm47xx_fill_sprom_r1234589(struct ssb_sprom *sprom,
169 const char *prefix, bool fallback) 169 const char *prefix, bool fallback)
170{ 170{
171 nvram_read_u16(prefix, NULL, "devid", &sprom->dev_id, 0, fallback);
171 nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff, fallback); 172 nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff, fallback);
172 nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff, fallback); 173 nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff, fallback);
173 nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff, fallback); 174 nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff, fallback);
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
index e76eba74d9da..8f87d9217122 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -78,7 +78,7 @@ sk_load_byte_positive_offset:
78 blr 78 blr
79 79
80/* 80/*
81 * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) 81 * BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf)
82 * r_addr is the offset value 82 * r_addr is the offset value
83 */ 83 */
84 .globl sk_load_byte_msh 84 .globl sk_load_byte_msh
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 808ce1cae21a..6dcdadefd8d0 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -79,19 +79,11 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
79 } 79 }
80 80
81 switch (filter[0].code) { 81 switch (filter[0].code) {
82 case BPF_S_RET_K: 82 case BPF_RET | BPF_K:
83 case BPF_S_LD_W_LEN: 83 case BPF_LD | BPF_W | BPF_LEN:
84 case BPF_S_ANC_PROTOCOL: 84 case BPF_LD | BPF_W | BPF_ABS:
85 case BPF_S_ANC_IFINDEX: 85 case BPF_LD | BPF_H | BPF_ABS:
86 case BPF_S_ANC_MARK: 86 case BPF_LD | BPF_B | BPF_ABS:
87 case BPF_S_ANC_RXHASH:
88 case BPF_S_ANC_VLAN_TAG:
89 case BPF_S_ANC_VLAN_TAG_PRESENT:
90 case BPF_S_ANC_CPU:
91 case BPF_S_ANC_QUEUE:
92 case BPF_S_LD_W_ABS:
93 case BPF_S_LD_H_ABS:
94 case BPF_S_LD_B_ABS:
95 /* first instruction sets A register (or is RET 'constant') */ 87 /* first instruction sets A register (or is RET 'constant') */
96 break; 88 break;
97 default: 89 default:
@@ -144,6 +136,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
144 136
145 for (i = 0; i < flen; i++) { 137 for (i = 0; i < flen; i++) {
146 unsigned int K = filter[i].k; 138 unsigned int K = filter[i].k;
139 u16 code = bpf_anc_helper(&filter[i]);
147 140
148 /* 141 /*
149 * addrs[] maps a BPF bytecode address into a real offset from 142 * addrs[] maps a BPF bytecode address into a real offset from
@@ -151,35 +144,35 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
151 */ 144 */
152 addrs[i] = ctx->idx * 4; 145 addrs[i] = ctx->idx * 4;
153 146
154 switch (filter[i].code) { 147 switch (code) {
155 /*** ALU ops ***/ 148 /*** ALU ops ***/
156 case BPF_S_ALU_ADD_X: /* A += X; */ 149 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
157 ctx->seen |= SEEN_XREG; 150 ctx->seen |= SEEN_XREG;
158 PPC_ADD(r_A, r_A, r_X); 151 PPC_ADD(r_A, r_A, r_X);
159 break; 152 break;
160 case BPF_S_ALU_ADD_K: /* A += K; */ 153 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
161 if (!K) 154 if (!K)
162 break; 155 break;
163 PPC_ADDI(r_A, r_A, IMM_L(K)); 156 PPC_ADDI(r_A, r_A, IMM_L(K));
164 if (K >= 32768) 157 if (K >= 32768)
165 PPC_ADDIS(r_A, r_A, IMM_HA(K)); 158 PPC_ADDIS(r_A, r_A, IMM_HA(K));
166 break; 159 break;
167 case BPF_S_ALU_SUB_X: /* A -= X; */ 160 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
168 ctx->seen |= SEEN_XREG; 161 ctx->seen |= SEEN_XREG;
169 PPC_SUB(r_A, r_A, r_X); 162 PPC_SUB(r_A, r_A, r_X);
170 break; 163 break;
171 case BPF_S_ALU_SUB_K: /* A -= K */ 164 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
172 if (!K) 165 if (!K)
173 break; 166 break;
174 PPC_ADDI(r_A, r_A, IMM_L(-K)); 167 PPC_ADDI(r_A, r_A, IMM_L(-K));
175 if (K >= 32768) 168 if (K >= 32768)
176 PPC_ADDIS(r_A, r_A, IMM_HA(-K)); 169 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
177 break; 170 break;
178 case BPF_S_ALU_MUL_X: /* A *= X; */ 171 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
179 ctx->seen |= SEEN_XREG; 172 ctx->seen |= SEEN_XREG;
180 PPC_MUL(r_A, r_A, r_X); 173 PPC_MUL(r_A, r_A, r_X);
181 break; 174 break;
182 case BPF_S_ALU_MUL_K: /* A *= K */ 175 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
183 if (K < 32768) 176 if (K < 32768)
184 PPC_MULI(r_A, r_A, K); 177 PPC_MULI(r_A, r_A, K);
185 else { 178 else {
@@ -187,7 +180,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
187 PPC_MUL(r_A, r_A, r_scratch1); 180 PPC_MUL(r_A, r_A, r_scratch1);
188 } 181 }
189 break; 182 break;
190 case BPF_S_ALU_MOD_X: /* A %= X; */ 183 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
191 ctx->seen |= SEEN_XREG; 184 ctx->seen |= SEEN_XREG;
192 PPC_CMPWI(r_X, 0); 185 PPC_CMPWI(r_X, 0);
193 if (ctx->pc_ret0 != -1) { 186 if (ctx->pc_ret0 != -1) {
@@ -201,13 +194,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
201 PPC_MUL(r_scratch1, r_X, r_scratch1); 194 PPC_MUL(r_scratch1, r_X, r_scratch1);
202 PPC_SUB(r_A, r_A, r_scratch1); 195 PPC_SUB(r_A, r_A, r_scratch1);
203 break; 196 break;
204 case BPF_S_ALU_MOD_K: /* A %= K; */ 197 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
205 PPC_LI32(r_scratch2, K); 198 PPC_LI32(r_scratch2, K);
206 PPC_DIVWU(r_scratch1, r_A, r_scratch2); 199 PPC_DIVWU(r_scratch1, r_A, r_scratch2);
207 PPC_MUL(r_scratch1, r_scratch2, r_scratch1); 200 PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
208 PPC_SUB(r_A, r_A, r_scratch1); 201 PPC_SUB(r_A, r_A, r_scratch1);
209 break; 202 break;
210 case BPF_S_ALU_DIV_X: /* A /= X; */ 203 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
211 ctx->seen |= SEEN_XREG; 204 ctx->seen |= SEEN_XREG;
212 PPC_CMPWI(r_X, 0); 205 PPC_CMPWI(r_X, 0);
213 if (ctx->pc_ret0 != -1) { 206 if (ctx->pc_ret0 != -1) {
@@ -223,17 +216,17 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
223 } 216 }
224 PPC_DIVWU(r_A, r_A, r_X); 217 PPC_DIVWU(r_A, r_A, r_X);
225 break; 218 break;
226 case BPF_S_ALU_DIV_K: /* A /= K */ 219 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
227 if (K == 1) 220 if (K == 1)
228 break; 221 break;
229 PPC_LI32(r_scratch1, K); 222 PPC_LI32(r_scratch1, K);
230 PPC_DIVWU(r_A, r_A, r_scratch1); 223 PPC_DIVWU(r_A, r_A, r_scratch1);
231 break; 224 break;
232 case BPF_S_ALU_AND_X: 225 case BPF_ALU | BPF_AND | BPF_X:
233 ctx->seen |= SEEN_XREG; 226 ctx->seen |= SEEN_XREG;
234 PPC_AND(r_A, r_A, r_X); 227 PPC_AND(r_A, r_A, r_X);
235 break; 228 break;
236 case BPF_S_ALU_AND_K: 229 case BPF_ALU | BPF_AND | BPF_K:
237 if (!IMM_H(K)) 230 if (!IMM_H(K))
238 PPC_ANDI(r_A, r_A, K); 231 PPC_ANDI(r_A, r_A, K);
239 else { 232 else {
@@ -241,51 +234,51 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
241 PPC_AND(r_A, r_A, r_scratch1); 234 PPC_AND(r_A, r_A, r_scratch1);
242 } 235 }
243 break; 236 break;
244 case BPF_S_ALU_OR_X: 237 case BPF_ALU | BPF_OR | BPF_X:
245 ctx->seen |= SEEN_XREG; 238 ctx->seen |= SEEN_XREG;
246 PPC_OR(r_A, r_A, r_X); 239 PPC_OR(r_A, r_A, r_X);
247 break; 240 break;
248 case BPF_S_ALU_OR_K: 241 case BPF_ALU | BPF_OR | BPF_K:
249 if (IMM_L(K)) 242 if (IMM_L(K))
250 PPC_ORI(r_A, r_A, IMM_L(K)); 243 PPC_ORI(r_A, r_A, IMM_L(K));
251 if (K >= 65536) 244 if (K >= 65536)
252 PPC_ORIS(r_A, r_A, IMM_H(K)); 245 PPC_ORIS(r_A, r_A, IMM_H(K));
253 break; 246 break;
254 case BPF_S_ANC_ALU_XOR_X: 247 case BPF_ANC | SKF_AD_ALU_XOR_X:
255 case BPF_S_ALU_XOR_X: /* A ^= X */ 248 case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
256 ctx->seen |= SEEN_XREG; 249 ctx->seen |= SEEN_XREG;
257 PPC_XOR(r_A, r_A, r_X); 250 PPC_XOR(r_A, r_A, r_X);
258 break; 251 break;
259 case BPF_S_ALU_XOR_K: /* A ^= K */ 252 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
260 if (IMM_L(K)) 253 if (IMM_L(K))
261 PPC_XORI(r_A, r_A, IMM_L(K)); 254 PPC_XORI(r_A, r_A, IMM_L(K));
262 if (K >= 65536) 255 if (K >= 65536)
263 PPC_XORIS(r_A, r_A, IMM_H(K)); 256 PPC_XORIS(r_A, r_A, IMM_H(K));
264 break; 257 break;
265 case BPF_S_ALU_LSH_X: /* A <<= X; */ 258 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
266 ctx->seen |= SEEN_XREG; 259 ctx->seen |= SEEN_XREG;
267 PPC_SLW(r_A, r_A, r_X); 260 PPC_SLW(r_A, r_A, r_X);
268 break; 261 break;
269 case BPF_S_ALU_LSH_K: 262 case BPF_ALU | BPF_LSH | BPF_K:
270 if (K == 0) 263 if (K == 0)
271 break; 264 break;
272 else 265 else
273 PPC_SLWI(r_A, r_A, K); 266 PPC_SLWI(r_A, r_A, K);
274 break; 267 break;
275 case BPF_S_ALU_RSH_X: /* A >>= X; */ 268 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
276 ctx->seen |= SEEN_XREG; 269 ctx->seen |= SEEN_XREG;
277 PPC_SRW(r_A, r_A, r_X); 270 PPC_SRW(r_A, r_A, r_X);
278 break; 271 break;
279 case BPF_S_ALU_RSH_K: /* A >>= K; */ 272 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
280 if (K == 0) 273 if (K == 0)
281 break; 274 break;
282 else 275 else
283 PPC_SRWI(r_A, r_A, K); 276 PPC_SRWI(r_A, r_A, K);
284 break; 277 break;
285 case BPF_S_ALU_NEG: 278 case BPF_ALU | BPF_NEG:
286 PPC_NEG(r_A, r_A); 279 PPC_NEG(r_A, r_A);
287 break; 280 break;
288 case BPF_S_RET_K: 281 case BPF_RET | BPF_K:
289 PPC_LI32(r_ret, K); 282 PPC_LI32(r_ret, K);
290 if (!K) { 283 if (!K) {
291 if (ctx->pc_ret0 == -1) 284 if (ctx->pc_ret0 == -1)
@@ -312,7 +305,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
312 PPC_BLR(); 305 PPC_BLR();
313 } 306 }
314 break; 307 break;
315 case BPF_S_RET_A: 308 case BPF_RET | BPF_A:
316 PPC_MR(r_ret, r_A); 309 PPC_MR(r_ret, r_A);
317 if (i != flen - 1) { 310 if (i != flen - 1) {
318 if (ctx->seen) 311 if (ctx->seen)
@@ -321,53 +314,53 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
321 PPC_BLR(); 314 PPC_BLR();
322 } 315 }
323 break; 316 break;
324 case BPF_S_MISC_TAX: /* X = A */ 317 case BPF_MISC | BPF_TAX: /* X = A */
325 PPC_MR(r_X, r_A); 318 PPC_MR(r_X, r_A);
326 break; 319 break;
327 case BPF_S_MISC_TXA: /* A = X */ 320 case BPF_MISC | BPF_TXA: /* A = X */
328 ctx->seen |= SEEN_XREG; 321 ctx->seen |= SEEN_XREG;
329 PPC_MR(r_A, r_X); 322 PPC_MR(r_A, r_X);
330 break; 323 break;
331 324
332 /*** Constant loads/M[] access ***/ 325 /*** Constant loads/M[] access ***/
333 case BPF_S_LD_IMM: /* A = K */ 326 case BPF_LD | BPF_IMM: /* A = K */
334 PPC_LI32(r_A, K); 327 PPC_LI32(r_A, K);
335 break; 328 break;
336 case BPF_S_LDX_IMM: /* X = K */ 329 case BPF_LDX | BPF_IMM: /* X = K */
337 PPC_LI32(r_X, K); 330 PPC_LI32(r_X, K);
338 break; 331 break;
339 case BPF_S_LD_MEM: /* A = mem[K] */ 332 case BPF_LD | BPF_MEM: /* A = mem[K] */
340 PPC_MR(r_A, r_M + (K & 0xf)); 333 PPC_MR(r_A, r_M + (K & 0xf));
341 ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); 334 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
342 break; 335 break;
343 case BPF_S_LDX_MEM: /* X = mem[K] */ 336 case BPF_LDX | BPF_MEM: /* X = mem[K] */
344 PPC_MR(r_X, r_M + (K & 0xf)); 337 PPC_MR(r_X, r_M + (K & 0xf));
345 ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); 338 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
346 break; 339 break;
347 case BPF_S_ST: /* mem[K] = A */ 340 case BPF_ST: /* mem[K] = A */
348 PPC_MR(r_M + (K & 0xf), r_A); 341 PPC_MR(r_M + (K & 0xf), r_A);
349 ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); 342 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
350 break; 343 break;
351 case BPF_S_STX: /* mem[K] = X */ 344 case BPF_STX: /* mem[K] = X */
352 PPC_MR(r_M + (K & 0xf), r_X); 345 PPC_MR(r_M + (K & 0xf), r_X);
353 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); 346 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
354 break; 347 break;
355 case BPF_S_LD_W_LEN: /* A = skb->len; */ 348 case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
356 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 349 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
357 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); 350 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
358 break; 351 break;
359 case BPF_S_LDX_W_LEN: /* X = skb->len; */ 352 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
360 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); 353 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
361 break; 354 break;
362 355
363 /*** Ancillary info loads ***/ 356 /*** Ancillary info loads ***/
364 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ 357 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
365 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 358 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
366 protocol) != 2); 359 protocol) != 2);
367 PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff, 360 PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
368 protocol)); 361 protocol));
369 break; 362 break;
370 case BPF_S_ANC_IFINDEX: 363 case BPF_ANC | SKF_AD_IFINDEX:
371 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, 364 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
372 dev)); 365 dev));
373 PPC_CMPDI(r_scratch1, 0); 366 PPC_CMPDI(r_scratch1, 0);
@@ -384,33 +377,33 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
384 PPC_LWZ_OFFS(r_A, r_scratch1, 377 PPC_LWZ_OFFS(r_A, r_scratch1,
385 offsetof(struct net_device, ifindex)); 378 offsetof(struct net_device, ifindex));
386 break; 379 break;
387 case BPF_S_ANC_MARK: 380 case BPF_ANC | SKF_AD_MARK:
388 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 381 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
389 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 382 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
390 mark)); 383 mark));
391 break; 384 break;
392 case BPF_S_ANC_RXHASH: 385 case BPF_ANC | SKF_AD_RXHASH:
393 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 386 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
394 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 387 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
395 hash)); 388 hash));
396 break; 389 break;
397 case BPF_S_ANC_VLAN_TAG: 390 case BPF_ANC | SKF_AD_VLAN_TAG:
398 case BPF_S_ANC_VLAN_TAG_PRESENT: 391 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
399 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 392 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
400 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 393 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
401 vlan_tci)); 394 vlan_tci));
402 if (filter[i].code == BPF_S_ANC_VLAN_TAG) 395 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
403 PPC_ANDI(r_A, r_A, VLAN_VID_MASK); 396 PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
404 else 397 else
405 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); 398 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
406 break; 399 break;
407 case BPF_S_ANC_QUEUE: 400 case BPF_ANC | SKF_AD_QUEUE:
408 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 401 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
409 queue_mapping) != 2); 402 queue_mapping) != 2);
410 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 403 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
411 queue_mapping)); 404 queue_mapping));
412 break; 405 break;
413 case BPF_S_ANC_CPU: 406 case BPF_ANC | SKF_AD_CPU:
414#ifdef CONFIG_SMP 407#ifdef CONFIG_SMP
415 /* 408 /*
416 * PACA ptr is r13: 409 * PACA ptr is r13:
@@ -426,13 +419,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
426 break; 419 break;
427 420
428 /*** Absolute loads from packet header/data ***/ 421 /*** Absolute loads from packet header/data ***/
429 case BPF_S_LD_W_ABS: 422 case BPF_LD | BPF_W | BPF_ABS:
430 func = CHOOSE_LOAD_FUNC(K, sk_load_word); 423 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
431 goto common_load; 424 goto common_load;
432 case BPF_S_LD_H_ABS: 425 case BPF_LD | BPF_H | BPF_ABS:
433 func = CHOOSE_LOAD_FUNC(K, sk_load_half); 426 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
434 goto common_load; 427 goto common_load;
435 case BPF_S_LD_B_ABS: 428 case BPF_LD | BPF_B | BPF_ABS:
436 func = CHOOSE_LOAD_FUNC(K, sk_load_byte); 429 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
437 common_load: 430 common_load:
438 /* Load from [K]. */ 431 /* Load from [K]. */
@@ -449,13 +442,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
449 break; 442 break;
450 443
451 /*** Indirect loads from packet header/data ***/ 444 /*** Indirect loads from packet header/data ***/
452 case BPF_S_LD_W_IND: 445 case BPF_LD | BPF_W | BPF_IND:
453 func = sk_load_word; 446 func = sk_load_word;
454 goto common_load_ind; 447 goto common_load_ind;
455 case BPF_S_LD_H_IND: 448 case BPF_LD | BPF_H | BPF_IND:
456 func = sk_load_half; 449 func = sk_load_half;
457 goto common_load_ind; 450 goto common_load_ind;
458 case BPF_S_LD_B_IND: 451 case BPF_LD | BPF_B | BPF_IND:
459 func = sk_load_byte; 452 func = sk_load_byte;
460 common_load_ind: 453 common_load_ind:
461 /* 454 /*
@@ -473,31 +466,31 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
473 PPC_BCC(COND_LT, exit_addr); 466 PPC_BCC(COND_LT, exit_addr);
474 break; 467 break;
475 468
476 case BPF_S_LDX_B_MSH: 469 case BPF_LDX | BPF_B | BPF_MSH:
477 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); 470 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
478 goto common_load; 471 goto common_load;
479 break; 472 break;
480 473
481 /*** Jump and branches ***/ 474 /*** Jump and branches ***/
482 case BPF_S_JMP_JA: 475 case BPF_JMP | BPF_JA:
483 if (K != 0) 476 if (K != 0)
484 PPC_JMP(addrs[i + 1 + K]); 477 PPC_JMP(addrs[i + 1 + K]);
485 break; 478 break;
486 479
487 case BPF_S_JMP_JGT_K: 480 case BPF_JMP | BPF_JGT | BPF_K:
488 case BPF_S_JMP_JGT_X: 481 case BPF_JMP | BPF_JGT | BPF_X:
489 true_cond = COND_GT; 482 true_cond = COND_GT;
490 goto cond_branch; 483 goto cond_branch;
491 case BPF_S_JMP_JGE_K: 484 case BPF_JMP | BPF_JGE | BPF_K:
492 case BPF_S_JMP_JGE_X: 485 case BPF_JMP | BPF_JGE | BPF_X:
493 true_cond = COND_GE; 486 true_cond = COND_GE;
494 goto cond_branch; 487 goto cond_branch;
495 case BPF_S_JMP_JEQ_K: 488 case BPF_JMP | BPF_JEQ | BPF_K:
496 case BPF_S_JMP_JEQ_X: 489 case BPF_JMP | BPF_JEQ | BPF_X:
497 true_cond = COND_EQ; 490 true_cond = COND_EQ;
498 goto cond_branch; 491 goto cond_branch;
499 case BPF_S_JMP_JSET_K: 492 case BPF_JMP | BPF_JSET | BPF_K:
500 case BPF_S_JMP_JSET_X: 493 case BPF_JMP | BPF_JSET | BPF_X:
501 true_cond = COND_NE; 494 true_cond = COND_NE;
502 /* Fall through */ 495 /* Fall through */
503 cond_branch: 496 cond_branch:
@@ -508,20 +501,20 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
508 break; 501 break;
509 } 502 }
510 503
511 switch (filter[i].code) { 504 switch (code) {
512 case BPF_S_JMP_JGT_X: 505 case BPF_JMP | BPF_JGT | BPF_X:
513 case BPF_S_JMP_JGE_X: 506 case BPF_JMP | BPF_JGE | BPF_X:
514 case BPF_S_JMP_JEQ_X: 507 case BPF_JMP | BPF_JEQ | BPF_X:
515 ctx->seen |= SEEN_XREG; 508 ctx->seen |= SEEN_XREG;
516 PPC_CMPLW(r_A, r_X); 509 PPC_CMPLW(r_A, r_X);
517 break; 510 break;
518 case BPF_S_JMP_JSET_X: 511 case BPF_JMP | BPF_JSET | BPF_X:
519 ctx->seen |= SEEN_XREG; 512 ctx->seen |= SEEN_XREG;
520 PPC_AND_DOT(r_scratch1, r_A, r_X); 513 PPC_AND_DOT(r_scratch1, r_A, r_X);
521 break; 514 break;
522 case BPF_S_JMP_JEQ_K: 515 case BPF_JMP | BPF_JEQ | BPF_K:
523 case BPF_S_JMP_JGT_K: 516 case BPF_JMP | BPF_JGT | BPF_K:
524 case BPF_S_JMP_JGE_K: 517 case BPF_JMP | BPF_JGE | BPF_K:
525 if (K < 32768) 518 if (K < 32768)
526 PPC_CMPLWI(r_A, K); 519 PPC_CMPLWI(r_A, K);
527 else { 520 else {
@@ -529,7 +522,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
529 PPC_CMPLW(r_A, r_scratch1); 522 PPC_CMPLW(r_A, r_scratch1);
530 } 523 }
531 break; 524 break;
532 case BPF_S_JMP_JSET_K: 525 case BPF_JMP | BPF_JSET | BPF_K:
533 if (K < 32768) 526 if (K < 32768)
534 /* PPC_ANDI is /only/ dot-form */ 527 /* PPC_ANDI is /only/ dot-form */
535 PPC_ANDI(r_scratch1, r_A, K); 528 PPC_ANDI(r_scratch1, r_A, K);
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 228cf91b91c1..ffd1169ebaab 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -25,7 +25,6 @@
25#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/of_platform.h> 26#include <linux/of_platform.h>
27#include <linux/phy.h> 27#include <linux/phy.h>
28#include <linux/phy_fixed.h>
29#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
30#include <linux/fsl_devices.h> 29#include <linux/fsl_devices.h>
31#include <linux/fs_enet_pd.h> 30#include <linux/fs_enet_pd.h>
@@ -178,37 +177,6 @@ u32 get_baudrate(void)
178EXPORT_SYMBOL(get_baudrate); 177EXPORT_SYMBOL(get_baudrate);
179#endif /* CONFIG_CPM2 */ 178#endif /* CONFIG_CPM2 */
180 179
181#ifdef CONFIG_FIXED_PHY
182static int __init of_add_fixed_phys(void)
183{
184 int ret;
185 struct device_node *np;
186 u32 *fixed_link;
187 struct fixed_phy_status status = {};
188
189 for_each_node_by_name(np, "ethernet") {
190 fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL);
191 if (!fixed_link)
192 continue;
193
194 status.link = 1;
195 status.duplex = fixed_link[1];
196 status.speed = fixed_link[2];
197 status.pause = fixed_link[3];
198 status.asym_pause = fixed_link[4];
199
200 ret = fixed_phy_add(PHY_POLL, fixed_link[0], &status);
201 if (ret) {
202 of_node_put(np);
203 return ret;
204 }
205 }
206
207 return 0;
208}
209arch_initcall(of_add_fixed_phys);
210#endif /* CONFIG_FIXED_PHY */
211
212#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) 180#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
213static __be32 __iomem *rstcr; 181static __be32 __iomem *rstcr;
214 182
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e9f8fa9337fe..a2cbd875543a 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -269,27 +269,17 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
269 EMIT4(0xa7c80000); 269 EMIT4(0xa7c80000);
270 /* Clear A if the first register does not set it. */ 270 /* Clear A if the first register does not set it. */
271 switch (filter[0].code) { 271 switch (filter[0].code) {
272 case BPF_S_LD_W_ABS: 272 case BPF_LD | BPF_W | BPF_ABS:
273 case BPF_S_LD_H_ABS: 273 case BPF_LD | BPF_H | BPF_ABS:
274 case BPF_S_LD_B_ABS: 274 case BPF_LD | BPF_B | BPF_ABS:
275 case BPF_S_LD_W_LEN: 275 case BPF_LD | BPF_W | BPF_LEN:
276 case BPF_S_LD_W_IND: 276 case BPF_LD | BPF_W | BPF_IND:
277 case BPF_S_LD_H_IND: 277 case BPF_LD | BPF_H | BPF_IND:
278 case BPF_S_LD_B_IND: 278 case BPF_LD | BPF_B | BPF_IND:
279 case BPF_S_LD_IMM: 279 case BPF_LD | BPF_IMM:
280 case BPF_S_LD_MEM: 280 case BPF_LD | BPF_MEM:
281 case BPF_S_MISC_TXA: 281 case BPF_MISC | BPF_TXA:
282 case BPF_S_ANC_PROTOCOL: 282 case BPF_RET | BPF_K:
283 case BPF_S_ANC_PKTTYPE:
284 case BPF_S_ANC_IFINDEX:
285 case BPF_S_ANC_MARK:
286 case BPF_S_ANC_QUEUE:
287 case BPF_S_ANC_HATYPE:
288 case BPF_S_ANC_RXHASH:
289 case BPF_S_ANC_CPU:
290 case BPF_S_ANC_VLAN_TAG:
291 case BPF_S_ANC_VLAN_TAG_PRESENT:
292 case BPF_S_RET_K:
293 /* first instruction sets A register */ 283 /* first instruction sets A register */
294 break; 284 break;
295 default: /* A = 0 */ 285 default: /* A = 0 */
@@ -304,15 +294,18 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
304 unsigned int K; 294 unsigned int K;
305 int offset; 295 int offset;
306 unsigned int mask; 296 unsigned int mask;
297 u16 code;
307 298
308 K = filter->k; 299 K = filter->k;
309 switch (filter->code) { 300 code = bpf_anc_helper(filter);
310 case BPF_S_ALU_ADD_X: /* A += X */ 301
302 switch (code) {
303 case BPF_ALU | BPF_ADD | BPF_X: /* A += X */
311 jit->seen |= SEEN_XREG; 304 jit->seen |= SEEN_XREG;
312 /* ar %r5,%r12 */ 305 /* ar %r5,%r12 */
313 EMIT2(0x1a5c); 306 EMIT2(0x1a5c);
314 break; 307 break;
315 case BPF_S_ALU_ADD_K: /* A += K */ 308 case BPF_ALU | BPF_ADD | BPF_K: /* A += K */
316 if (!K) 309 if (!K)
317 break; 310 break;
318 if (K <= 16383) 311 if (K <= 16383)
@@ -325,12 +318,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
325 /* a %r5,<d(K)>(%r13) */ 318 /* a %r5,<d(K)>(%r13) */
326 EMIT4_DISP(0x5a50d000, EMIT_CONST(K)); 319 EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
327 break; 320 break;
328 case BPF_S_ALU_SUB_X: /* A -= X */ 321 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X */
329 jit->seen |= SEEN_XREG; 322 jit->seen |= SEEN_XREG;
330 /* sr %r5,%r12 */ 323 /* sr %r5,%r12 */
331 EMIT2(0x1b5c); 324 EMIT2(0x1b5c);
332 break; 325 break;
333 case BPF_S_ALU_SUB_K: /* A -= K */ 326 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
334 if (!K) 327 if (!K)
335 break; 328 break;
336 if (K <= 16384) 329 if (K <= 16384)
@@ -343,12 +336,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
343 /* s %r5,<d(K)>(%r13) */ 336 /* s %r5,<d(K)>(%r13) */
344 EMIT4_DISP(0x5b50d000, EMIT_CONST(K)); 337 EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
345 break; 338 break;
346 case BPF_S_ALU_MUL_X: /* A *= X */ 339 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X */
347 jit->seen |= SEEN_XREG; 340 jit->seen |= SEEN_XREG;
348 /* msr %r5,%r12 */ 341 /* msr %r5,%r12 */
349 EMIT4(0xb252005c); 342 EMIT4(0xb252005c);
350 break; 343 break;
351 case BPF_S_ALU_MUL_K: /* A *= K */ 344 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
352 if (K <= 16383) 345 if (K <= 16383)
353 /* mhi %r5,K */ 346 /* mhi %r5,K */
354 EMIT4_IMM(0xa75c0000, K); 347 EMIT4_IMM(0xa75c0000, K);
@@ -359,7 +352,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
359 /* ms %r5,<d(K)>(%r13) */ 352 /* ms %r5,<d(K)>(%r13) */
360 EMIT4_DISP(0x7150d000, EMIT_CONST(K)); 353 EMIT4_DISP(0x7150d000, EMIT_CONST(K));
361 break; 354 break;
362 case BPF_S_ALU_DIV_X: /* A /= X */ 355 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X */
363 jit->seen |= SEEN_XREG | SEEN_RET0; 356 jit->seen |= SEEN_XREG | SEEN_RET0;
364 /* ltr %r12,%r12 */ 357 /* ltr %r12,%r12 */
365 EMIT2(0x12cc); 358 EMIT2(0x12cc);
@@ -370,7 +363,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
370 /* dlr %r4,%r12 */ 363 /* dlr %r4,%r12 */
371 EMIT4(0xb997004c); 364 EMIT4(0xb997004c);
372 break; 365 break;
373 case BPF_S_ALU_DIV_K: /* A /= K */ 366 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
374 if (K == 1) 367 if (K == 1)
375 break; 368 break;
376 /* lhi %r4,0 */ 369 /* lhi %r4,0 */
@@ -378,7 +371,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
378 /* dl %r4,<d(K)>(%r13) */ 371 /* dl %r4,<d(K)>(%r13) */
379 EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K)); 372 EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
380 break; 373 break;
381 case BPF_S_ALU_MOD_X: /* A %= X */ 374 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X */
382 jit->seen |= SEEN_XREG | SEEN_RET0; 375 jit->seen |= SEEN_XREG | SEEN_RET0;
383 /* ltr %r12,%r12 */ 376 /* ltr %r12,%r12 */
384 EMIT2(0x12cc); 377 EMIT2(0x12cc);
@@ -391,7 +384,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
391 /* lr %r5,%r4 */ 384 /* lr %r5,%r4 */
392 EMIT2(0x1854); 385 EMIT2(0x1854);
393 break; 386 break;
394 case BPF_S_ALU_MOD_K: /* A %= K */ 387 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K */
395 if (K == 1) { 388 if (K == 1) {
396 /* lhi %r5,0 */ 389 /* lhi %r5,0 */
397 EMIT4(0xa7580000); 390 EMIT4(0xa7580000);
@@ -404,12 +397,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
404 /* lr %r5,%r4 */ 397 /* lr %r5,%r4 */
405 EMIT2(0x1854); 398 EMIT2(0x1854);
406 break; 399 break;
407 case BPF_S_ALU_AND_X: /* A &= X */ 400 case BPF_ALU | BPF_AND | BPF_X: /* A &= X */
408 jit->seen |= SEEN_XREG; 401 jit->seen |= SEEN_XREG;
409 /* nr %r5,%r12 */ 402 /* nr %r5,%r12 */
410 EMIT2(0x145c); 403 EMIT2(0x145c);
411 break; 404 break;
412 case BPF_S_ALU_AND_K: /* A &= K */ 405 case BPF_ALU | BPF_AND | BPF_K: /* A &= K */
413 if (test_facility(21)) 406 if (test_facility(21))
414 /* nilf %r5,<K> */ 407 /* nilf %r5,<K> */
415 EMIT6_IMM(0xc05b0000, K); 408 EMIT6_IMM(0xc05b0000, K);
@@ -417,12 +410,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
417 /* n %r5,<d(K)>(%r13) */ 410 /* n %r5,<d(K)>(%r13) */
418 EMIT4_DISP(0x5450d000, EMIT_CONST(K)); 411 EMIT4_DISP(0x5450d000, EMIT_CONST(K));
419 break; 412 break;
420 case BPF_S_ALU_OR_X: /* A |= X */ 413 case BPF_ALU | BPF_OR | BPF_X: /* A |= X */
421 jit->seen |= SEEN_XREG; 414 jit->seen |= SEEN_XREG;
422 /* or %r5,%r12 */ 415 /* or %r5,%r12 */
423 EMIT2(0x165c); 416 EMIT2(0x165c);
424 break; 417 break;
425 case BPF_S_ALU_OR_K: /* A |= K */ 418 case BPF_ALU | BPF_OR | BPF_K: /* A |= K */
426 if (test_facility(21)) 419 if (test_facility(21))
427 /* oilf %r5,<K> */ 420 /* oilf %r5,<K> */
428 EMIT6_IMM(0xc05d0000, K); 421 EMIT6_IMM(0xc05d0000, K);
@@ -430,55 +423,55 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
430 /* o %r5,<d(K)>(%r13) */ 423 /* o %r5,<d(K)>(%r13) */
431 EMIT4_DISP(0x5650d000, EMIT_CONST(K)); 424 EMIT4_DISP(0x5650d000, EMIT_CONST(K));
432 break; 425 break;
433 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ 426 case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */
434 case BPF_S_ALU_XOR_X: 427 case BPF_ALU | BPF_XOR | BPF_X:
435 jit->seen |= SEEN_XREG; 428 jit->seen |= SEEN_XREG;
436 /* xr %r5,%r12 */ 429 /* xr %r5,%r12 */
437 EMIT2(0x175c); 430 EMIT2(0x175c);
438 break; 431 break;
439 case BPF_S_ALU_XOR_K: /* A ^= K */ 432 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
440 if (!K) 433 if (!K)
441 break; 434 break;
442 /* x %r5,<d(K)>(%r13) */ 435 /* x %r5,<d(K)>(%r13) */
443 EMIT4_DISP(0x5750d000, EMIT_CONST(K)); 436 EMIT4_DISP(0x5750d000, EMIT_CONST(K));
444 break; 437 break;
445 case BPF_S_ALU_LSH_X: /* A <<= X; */ 438 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
446 jit->seen |= SEEN_XREG; 439 jit->seen |= SEEN_XREG;
447 /* sll %r5,0(%r12) */ 440 /* sll %r5,0(%r12) */
448 EMIT4(0x8950c000); 441 EMIT4(0x8950c000);
449 break; 442 break;
450 case BPF_S_ALU_LSH_K: /* A <<= K */ 443 case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */
451 if (K == 0) 444 if (K == 0)
452 break; 445 break;
453 /* sll %r5,K */ 446 /* sll %r5,K */
454 EMIT4_DISP(0x89500000, K); 447 EMIT4_DISP(0x89500000, K);
455 break; 448 break;
456 case BPF_S_ALU_RSH_X: /* A >>= X; */ 449 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
457 jit->seen |= SEEN_XREG; 450 jit->seen |= SEEN_XREG;
458 /* srl %r5,0(%r12) */ 451 /* srl %r5,0(%r12) */
459 EMIT4(0x8850c000); 452 EMIT4(0x8850c000);
460 break; 453 break;
461 case BPF_S_ALU_RSH_K: /* A >>= K; */ 454 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
462 if (K == 0) 455 if (K == 0)
463 break; 456 break;
464 /* srl %r5,K */ 457 /* srl %r5,K */
465 EMIT4_DISP(0x88500000, K); 458 EMIT4_DISP(0x88500000, K);
466 break; 459 break;
467 case BPF_S_ALU_NEG: /* A = -A */ 460 case BPF_ALU | BPF_NEG: /* A = -A */
468 /* lnr %r5,%r5 */ 461 /* lnr %r5,%r5 */
469 EMIT2(0x1155); 462 EMIT2(0x1155);
470 break; 463 break;
471 case BPF_S_JMP_JA: /* ip += K */ 464 case BPF_JMP | BPF_JA: /* ip += K */
472 offset = addrs[i + K] + jit->start - jit->prg; 465 offset = addrs[i + K] + jit->start - jit->prg;
473 EMIT4_PCREL(0xa7f40000, offset); 466 EMIT4_PCREL(0xa7f40000, offset);
474 break; 467 break;
475 case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */ 468 case BPF_JMP | BPF_JGT | BPF_K: /* ip += (A > K) ? jt : jf */
476 mask = 0x200000; /* jh */ 469 mask = 0x200000; /* jh */
477 goto kbranch; 470 goto kbranch;
478 case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */ 471 case BPF_JMP | BPF_JGE | BPF_K: /* ip += (A >= K) ? jt : jf */
479 mask = 0xa00000; /* jhe */ 472 mask = 0xa00000; /* jhe */
480 goto kbranch; 473 goto kbranch;
481 case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */ 474 case BPF_JMP | BPF_JEQ | BPF_K: /* ip += (A == K) ? jt : jf */
482 mask = 0x800000; /* je */ 475 mask = 0x800000; /* je */
483kbranch: /* Emit compare if the branch targets are different */ 476kbranch: /* Emit compare if the branch targets are different */
484 if (filter->jt != filter->jf) { 477 if (filter->jt != filter->jf) {
@@ -511,7 +504,7 @@ branch: if (filter->jt == filter->jf) {
511 EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset); 504 EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
512 } 505 }
513 break; 506 break;
514 case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */ 507 case BPF_JMP | BPF_JSET | BPF_K: /* ip += (A & K) ? jt : jf */
515 mask = 0x700000; /* jnz */ 508 mask = 0x700000; /* jnz */
516 /* Emit test if the branch targets are different */ 509 /* Emit test if the branch targets are different */
517 if (filter->jt != filter->jf) { 510 if (filter->jt != filter->jf) {
@@ -525,13 +518,13 @@ branch: if (filter->jt == filter->jf) {
525 EMIT4_IMM(0xa7510000, K); 518 EMIT4_IMM(0xa7510000, K);
526 } 519 }
527 goto branch; 520 goto branch;
528 case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */ 521 case BPF_JMP | BPF_JGT | BPF_X: /* ip += (A > X) ? jt : jf */
529 mask = 0x200000; /* jh */ 522 mask = 0x200000; /* jh */
530 goto xbranch; 523 goto xbranch;
531 case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */ 524 case BPF_JMP | BPF_JGE | BPF_X: /* ip += (A >= X) ? jt : jf */
532 mask = 0xa00000; /* jhe */ 525 mask = 0xa00000; /* jhe */
533 goto xbranch; 526 goto xbranch;
534 case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */ 527 case BPF_JMP | BPF_JEQ | BPF_X: /* ip += (A == X) ? jt : jf */
535 mask = 0x800000; /* je */ 528 mask = 0x800000; /* je */
536xbranch: /* Emit compare if the branch targets are different */ 529xbranch: /* Emit compare if the branch targets are different */
537 if (filter->jt != filter->jf) { 530 if (filter->jt != filter->jf) {
@@ -540,7 +533,7 @@ xbranch: /* Emit compare if the branch targets are different */
540 EMIT2(0x195c); 533 EMIT2(0x195c);
541 } 534 }
542 goto branch; 535 goto branch;
543 case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */ 536 case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
544 mask = 0x700000; /* jnz */ 537 mask = 0x700000; /* jnz */
545 /* Emit test if the branch targets are different */ 538 /* Emit test if the branch targets are different */
546 if (filter->jt != filter->jf) { 539 if (filter->jt != filter->jf) {
@@ -551,15 +544,15 @@ xbranch: /* Emit compare if the branch targets are different */
551 EMIT2(0x144c); 544 EMIT2(0x144c);
552 } 545 }
553 goto branch; 546 goto branch;
554 case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */ 547 case BPF_LD | BPF_W | BPF_ABS: /* A = *(u32 *) (skb->data+K) */
555 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD; 548 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD;
556 offset = jit->off_load_word; 549 offset = jit->off_load_word;
557 goto load_abs; 550 goto load_abs;
558 case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */ 551 case BPF_LD | BPF_H | BPF_ABS: /* A = *(u16 *) (skb->data+K) */
559 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF; 552 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF;
560 offset = jit->off_load_half; 553 offset = jit->off_load_half;
561 goto load_abs; 554 goto load_abs;
562 case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */ 555 case BPF_LD | BPF_B | BPF_ABS: /* A = *(u8 *) (skb->data+K) */
563 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE; 556 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE;
564 offset = jit->off_load_byte; 557 offset = jit->off_load_byte;
565load_abs: if ((int) K < 0) 558load_abs: if ((int) K < 0)
@@ -573,19 +566,19 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
573 /* jnz <ret0> */ 566 /* jnz <ret0> */
574 EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg)); 567 EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
575 break; 568 break;
576 case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */ 569 case BPF_LD | BPF_W | BPF_IND: /* A = *(u32 *) (skb->data+K+X) */
577 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD; 570 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD;
578 offset = jit->off_load_iword; 571 offset = jit->off_load_iword;
579 goto call_fn; 572 goto call_fn;
580 case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */ 573 case BPF_LD | BPF_H | BPF_IND: /* A = *(u16 *) (skb->data+K+X) */
581 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF; 574 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF;
582 offset = jit->off_load_ihalf; 575 offset = jit->off_load_ihalf;
583 goto call_fn; 576 goto call_fn;
584 case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */ 577 case BPF_LD | BPF_B | BPF_IND: /* A = *(u8 *) (skb->data+K+X) */
585 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE; 578 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE;
586 offset = jit->off_load_ibyte; 579 offset = jit->off_load_ibyte;
587 goto call_fn; 580 goto call_fn;
588 case BPF_S_LDX_B_MSH: 581 case BPF_LDX | BPF_B | BPF_MSH:
589 /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */ 582 /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
590 jit->seen |= SEEN_RET0; 583 jit->seen |= SEEN_RET0;
591 if ((int) K < 0) { 584 if ((int) K < 0) {
@@ -596,17 +589,17 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
596 jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH; 589 jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
597 offset = jit->off_load_bmsh; 590 offset = jit->off_load_bmsh;
598 goto call_fn; 591 goto call_fn;
599 case BPF_S_LD_W_LEN: /* A = skb->len; */ 592 case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
600 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 593 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
601 /* l %r5,<d(len)>(%r2) */ 594 /* l %r5,<d(len)>(%r2) */
602 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len)); 595 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
603 break; 596 break;
604 case BPF_S_LDX_W_LEN: /* X = skb->len; */ 597 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
605 jit->seen |= SEEN_XREG; 598 jit->seen |= SEEN_XREG;
606 /* l %r12,<d(len)>(%r2) */ 599 /* l %r12,<d(len)>(%r2) */
607 EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len)); 600 EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
608 break; 601 break;
609 case BPF_S_LD_IMM: /* A = K */ 602 case BPF_LD | BPF_IMM: /* A = K */
610 if (K <= 16383) 603 if (K <= 16383)
611 /* lhi %r5,K */ 604 /* lhi %r5,K */
612 EMIT4_IMM(0xa7580000, K); 605 EMIT4_IMM(0xa7580000, K);
@@ -617,7 +610,7 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
617 /* l %r5,<d(K)>(%r13) */ 610 /* l %r5,<d(K)>(%r13) */
618 EMIT4_DISP(0x5850d000, EMIT_CONST(K)); 611 EMIT4_DISP(0x5850d000, EMIT_CONST(K));
619 break; 612 break;
620 case BPF_S_LDX_IMM: /* X = K */ 613 case BPF_LDX | BPF_IMM: /* X = K */
621 jit->seen |= SEEN_XREG; 614 jit->seen |= SEEN_XREG;
622 if (K <= 16383) 615 if (K <= 16383)
623 /* lhi %r12,<K> */ 616 /* lhi %r12,<K> */
@@ -629,29 +622,29 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
629 /* l %r12,<d(K)>(%r13) */ 622 /* l %r12,<d(K)>(%r13) */
630 EMIT4_DISP(0x58c0d000, EMIT_CONST(K)); 623 EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
631 break; 624 break;
632 case BPF_S_LD_MEM: /* A = mem[K] */ 625 case BPF_LD | BPF_MEM: /* A = mem[K] */
633 jit->seen |= SEEN_MEM; 626 jit->seen |= SEEN_MEM;
634 /* l %r5,<K>(%r15) */ 627 /* l %r5,<K>(%r15) */
635 EMIT4_DISP(0x5850f000, 628 EMIT4_DISP(0x5850f000,
636 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); 629 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
637 break; 630 break;
638 case BPF_S_LDX_MEM: /* X = mem[K] */ 631 case BPF_LDX | BPF_MEM: /* X = mem[K] */
639 jit->seen |= SEEN_XREG | SEEN_MEM; 632 jit->seen |= SEEN_XREG | SEEN_MEM;
640 /* l %r12,<K>(%r15) */ 633 /* l %r12,<K>(%r15) */
641 EMIT4_DISP(0x58c0f000, 634 EMIT4_DISP(0x58c0f000,
642 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); 635 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
643 break; 636 break;
644 case BPF_S_MISC_TAX: /* X = A */ 637 case BPF_MISC | BPF_TAX: /* X = A */
645 jit->seen |= SEEN_XREG; 638 jit->seen |= SEEN_XREG;
646 /* lr %r12,%r5 */ 639 /* lr %r12,%r5 */
647 EMIT2(0x18c5); 640 EMIT2(0x18c5);
648 break; 641 break;
649 case BPF_S_MISC_TXA: /* A = X */ 642 case BPF_MISC | BPF_TXA: /* A = X */
650 jit->seen |= SEEN_XREG; 643 jit->seen |= SEEN_XREG;
651 /* lr %r5,%r12 */ 644 /* lr %r5,%r12 */
652 EMIT2(0x185c); 645 EMIT2(0x185c);
653 break; 646 break;
654 case BPF_S_RET_K: 647 case BPF_RET | BPF_K:
655 if (K == 0) { 648 if (K == 0) {
656 jit->seen |= SEEN_RET0; 649 jit->seen |= SEEN_RET0;
657 if (last) 650 if (last)
@@ -671,33 +664,33 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
671 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg); 664 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
672 } 665 }
673 break; 666 break;
674 case BPF_S_RET_A: 667 case BPF_RET | BPF_A:
675 /* llgfr %r2,%r5 */ 668 /* llgfr %r2,%r5 */
676 EMIT4(0xb9160025); 669 EMIT4(0xb9160025);
677 /* j <exit> */ 670 /* j <exit> */
678 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg); 671 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
679 break; 672 break;
680 case BPF_S_ST: /* mem[K] = A */ 673 case BPF_ST: /* mem[K] = A */
681 jit->seen |= SEEN_MEM; 674 jit->seen |= SEEN_MEM;
682 /* st %r5,<K>(%r15) */ 675 /* st %r5,<K>(%r15) */
683 EMIT4_DISP(0x5050f000, 676 EMIT4_DISP(0x5050f000,
684 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); 677 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
685 break; 678 break;
686 case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */ 679 case BPF_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
687 jit->seen |= SEEN_XREG | SEEN_MEM; 680 jit->seen |= SEEN_XREG | SEEN_MEM;
688 /* st %r12,<K>(%r15) */ 681 /* st %r12,<K>(%r15) */
689 EMIT4_DISP(0x50c0f000, 682 EMIT4_DISP(0x50c0f000,
690 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); 683 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
691 break; 684 break;
692 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ 685 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
693 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); 686 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
694 /* lhi %r5,0 */ 687 /* lhi %r5,0 */
695 EMIT4(0xa7580000); 688 EMIT4(0xa7580000);
696 /* icm %r5,3,<d(protocol)>(%r2) */ 689 /* icm %r5,3,<d(protocol)>(%r2) */
697 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol)); 690 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
698 break; 691 break;
699 case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0; 692 case BPF_ANC | SKF_AD_IFINDEX: /* if (!skb->dev) return 0;
700 * A = skb->dev->ifindex */ 693 * A = skb->dev->ifindex */
701 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); 694 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
702 jit->seen |= SEEN_RET0; 695 jit->seen |= SEEN_RET0;
703 /* lg %r1,<d(dev)>(%r2) */ 696 /* lg %r1,<d(dev)>(%r2) */
@@ -709,20 +702,20 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
709 /* l %r5,<d(ifindex)>(%r1) */ 702 /* l %r5,<d(ifindex)>(%r1) */
710 EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex)); 703 EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
711 break; 704 break;
712 case BPF_S_ANC_MARK: /* A = skb->mark */ 705 case BPF_ANC | SKF_AD_MARK: /* A = skb->mark */
713 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 706 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
714 /* l %r5,<d(mark)>(%r2) */ 707 /* l %r5,<d(mark)>(%r2) */
715 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark)); 708 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
716 break; 709 break;
717 case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */ 710 case BPF_ANC | SKF_AD_QUEUE: /* A = skb->queue_mapping */
718 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); 711 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
719 /* lhi %r5,0 */ 712 /* lhi %r5,0 */
720 EMIT4(0xa7580000); 713 EMIT4(0xa7580000);
721 /* icm %r5,3,<d(queue_mapping)>(%r2) */ 714 /* icm %r5,3,<d(queue_mapping)>(%r2) */
722 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping)); 715 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
723 break; 716 break;
724 case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0; 717 case BPF_ANC | SKF_AD_HATYPE: /* if (!skb->dev) return 0;
725 * A = skb->dev->type */ 718 * A = skb->dev->type */
726 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); 719 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
727 jit->seen |= SEEN_RET0; 720 jit->seen |= SEEN_RET0;
728 /* lg %r1,<d(dev)>(%r2) */ 721 /* lg %r1,<d(dev)>(%r2) */
@@ -736,20 +729,20 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
736 /* icm %r5,3,<d(type)>(%r1) */ 729 /* icm %r5,3,<d(type)>(%r1) */
737 EMIT4_DISP(0xbf531000, offsetof(struct net_device, type)); 730 EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
738 break; 731 break;
739 case BPF_S_ANC_RXHASH: /* A = skb->hash */ 732 case BPF_ANC | SKF_AD_RXHASH: /* A = skb->hash */
740 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 733 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
741 /* l %r5,<d(hash)>(%r2) */ 734 /* l %r5,<d(hash)>(%r2) */
742 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, hash)); 735 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, hash));
743 break; 736 break;
744 case BPF_S_ANC_VLAN_TAG: 737 case BPF_ANC | SKF_AD_VLAN_TAG:
745 case BPF_S_ANC_VLAN_TAG_PRESENT: 738 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
746 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 739 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
747 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); 740 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
748 /* lhi %r5,0 */ 741 /* lhi %r5,0 */
749 EMIT4(0xa7580000); 742 EMIT4(0xa7580000);
750 /* icm %r5,3,<d(vlan_tci)>(%r2) */ 743 /* icm %r5,3,<d(vlan_tci)>(%r2) */
751 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci)); 744 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci));
752 if (filter->code == BPF_S_ANC_VLAN_TAG) { 745 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
753 /* nill %r5,0xefff */ 746 /* nill %r5,0xefff */
754 EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT); 747 EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT);
755 } else { 748 } else {
@@ -759,7 +752,7 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
759 EMIT4_DISP(0x88500000, 12); 752 EMIT4_DISP(0x88500000, 12);
760 } 753 }
761 break; 754 break;
762 case BPF_S_ANC_PKTTYPE: 755 case BPF_ANC | SKF_AD_PKTTYPE:
763 if (pkt_type_offset < 0) 756 if (pkt_type_offset < 0)
764 goto out; 757 goto out;
765 /* lhi %r5,0 */ 758 /* lhi %r5,0 */
@@ -769,7 +762,7 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
769 /* srl %r5,5 */ 762 /* srl %r5,5 */
770 EMIT4_DISP(0x88500000, 5); 763 EMIT4_DISP(0x88500000, 5);
771 break; 764 break;
772 case BPF_S_ANC_CPU: /* A = smp_processor_id() */ 765 case BPF_ANC | SKF_AD_CPU: /* A = smp_processor_id() */
773#ifdef CONFIG_SMP 766#ifdef CONFIG_SMP
774 /* l %r5,<d(cpu_nr)> */ 767 /* l %r5,<d(cpu_nr)> */
775 EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr)); 768 EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
diff --git a/arch/sparc/include/asm/checksum_32.h b/arch/sparc/include/asm/checksum_32.h
index bdbda1453aa9..04471dc64847 100644
--- a/arch/sparc/include/asm/checksum_32.h
+++ b/arch/sparc/include/asm/checksum_32.h
@@ -238,4 +238,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
238 return csum_fold(csum_partial(buff, len, 0)); 238 return csum_fold(csum_partial(buff, len, 0));
239} 239}
240 240
241#define HAVE_ARCH_CSUM_ADD
242static inline __wsum csum_add(__wsum csum, __wsum addend)
243{
244 __asm__ __volatile__(
245 "addcc %0, %1, %0\n"
246 "addx %0, %%g0, %0"
247 : "=r" (csum)
248 : "r" (addend), "0" (csum));
249
250 return csum;
251}
252
241#endif /* !(__SPARC_CHECKSUM_H) */ 253#endif /* !(__SPARC_CHECKSUM_H) */
diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h
index 019b9615e43c..2ff81ae8f3af 100644
--- a/arch/sparc/include/asm/checksum_64.h
+++ b/arch/sparc/include/asm/checksum_64.h
@@ -164,4 +164,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
164 return csum_fold(csum_partial(buff, len, 0)); 164 return csum_fold(csum_partial(buff, len, 0));
165} 165}
166 166
167#define HAVE_ARCH_CSUM_ADD
168static inline __wsum csum_add(__wsum csum, __wsum addend)
169{
170 __asm__ __volatile__(
171 "addcc %0, %1, %0\n"
172 "addx %0, %%g0, %0"
173 : "=r" (csum)
174 : "r" (addend), "0" (csum));
175
176 return csum;
177}
178
167#endif /* !(__SPARC64_CHECKSUM_H) */ 179#endif /* !(__SPARC64_CHECKSUM_H) */
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index a82c6b2a9780..892a102671ad 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -83,9 +83,9 @@ static void bpf_flush_icache(void *start_, void *end_)
83#define BNE (F2(0, 2) | CONDNE) 83#define BNE (F2(0, 2) | CONDNE)
84 84
85#ifdef CONFIG_SPARC64 85#ifdef CONFIG_SPARC64
86#define BNE_PTR (F2(0, 1) | CONDNE | (2 << 20)) 86#define BE_PTR (F2(0, 1) | CONDE | (2 << 20))
87#else 87#else
88#define BNE_PTR BNE 88#define BE_PTR BE
89#endif 89#endif
90 90
91#define SETHI(K, REG) \ 91#define SETHI(K, REG) \
@@ -415,20 +415,11 @@ void bpf_jit_compile(struct sk_filter *fp)
415 emit_reg_move(O7, r_saved_O7); 415 emit_reg_move(O7, r_saved_O7);
416 416
417 switch (filter[0].code) { 417 switch (filter[0].code) {
418 case BPF_S_RET_K: 418 case BPF_RET | BPF_K:
419 case BPF_S_LD_W_LEN: 419 case BPF_LD | BPF_W | BPF_LEN:
420 case BPF_S_ANC_PROTOCOL: 420 case BPF_LD | BPF_W | BPF_ABS:
421 case BPF_S_ANC_PKTTYPE: 421 case BPF_LD | BPF_H | BPF_ABS:
422 case BPF_S_ANC_IFINDEX: 422 case BPF_LD | BPF_B | BPF_ABS:
423 case BPF_S_ANC_MARK:
424 case BPF_S_ANC_RXHASH:
425 case BPF_S_ANC_VLAN_TAG:
426 case BPF_S_ANC_VLAN_TAG_PRESENT:
427 case BPF_S_ANC_CPU:
428 case BPF_S_ANC_QUEUE:
429 case BPF_S_LD_W_ABS:
430 case BPF_S_LD_H_ABS:
431 case BPF_S_LD_B_ABS:
432 /* The first instruction sets the A register (or is 423 /* The first instruction sets the A register (or is
433 * a "RET 'constant'") 424 * a "RET 'constant'")
434 */ 425 */
@@ -445,59 +436,60 @@ void bpf_jit_compile(struct sk_filter *fp)
445 unsigned int t_offset; 436 unsigned int t_offset;
446 unsigned int f_offset; 437 unsigned int f_offset;
447 u32 t_op, f_op; 438 u32 t_op, f_op;
439 u16 code = bpf_anc_helper(&filter[i]);
448 int ilen; 440 int ilen;
449 441
450 switch (filter[i].code) { 442 switch (code) {
451 case BPF_S_ALU_ADD_X: /* A += X; */ 443 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
452 emit_alu_X(ADD); 444 emit_alu_X(ADD);
453 break; 445 break;
454 case BPF_S_ALU_ADD_K: /* A += K; */ 446 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
455 emit_alu_K(ADD, K); 447 emit_alu_K(ADD, K);
456 break; 448 break;
457 case BPF_S_ALU_SUB_X: /* A -= X; */ 449 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
458 emit_alu_X(SUB); 450 emit_alu_X(SUB);
459 break; 451 break;
460 case BPF_S_ALU_SUB_K: /* A -= K */ 452 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
461 emit_alu_K(SUB, K); 453 emit_alu_K(SUB, K);
462 break; 454 break;
463 case BPF_S_ALU_AND_X: /* A &= X */ 455 case BPF_ALU | BPF_AND | BPF_X: /* A &= X */
464 emit_alu_X(AND); 456 emit_alu_X(AND);
465 break; 457 break;
466 case BPF_S_ALU_AND_K: /* A &= K */ 458 case BPF_ALU | BPF_AND | BPF_K: /* A &= K */
467 emit_alu_K(AND, K); 459 emit_alu_K(AND, K);
468 break; 460 break;
469 case BPF_S_ALU_OR_X: /* A |= X */ 461 case BPF_ALU | BPF_OR | BPF_X: /* A |= X */
470 emit_alu_X(OR); 462 emit_alu_X(OR);
471 break; 463 break;
472 case BPF_S_ALU_OR_K: /* A |= K */ 464 case BPF_ALU | BPF_OR | BPF_K: /* A |= K */
473 emit_alu_K(OR, K); 465 emit_alu_K(OR, K);
474 break; 466 break;
475 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ 467 case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */
476 case BPF_S_ALU_XOR_X: 468 case BPF_ALU | BPF_XOR | BPF_X:
477 emit_alu_X(XOR); 469 emit_alu_X(XOR);
478 break; 470 break;
479 case BPF_S_ALU_XOR_K: /* A ^= K */ 471 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
480 emit_alu_K(XOR, K); 472 emit_alu_K(XOR, K);
481 break; 473 break;
482 case BPF_S_ALU_LSH_X: /* A <<= X */ 474 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */
483 emit_alu_X(SLL); 475 emit_alu_X(SLL);
484 break; 476 break;
485 case BPF_S_ALU_LSH_K: /* A <<= K */ 477 case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */
486 emit_alu_K(SLL, K); 478 emit_alu_K(SLL, K);
487 break; 479 break;
488 case BPF_S_ALU_RSH_X: /* A >>= X */ 480 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */
489 emit_alu_X(SRL); 481 emit_alu_X(SRL);
490 break; 482 break;
491 case BPF_S_ALU_RSH_K: /* A >>= K */ 483 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */
492 emit_alu_K(SRL, K); 484 emit_alu_K(SRL, K);
493 break; 485 break;
494 case BPF_S_ALU_MUL_X: /* A *= X; */ 486 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
495 emit_alu_X(MUL); 487 emit_alu_X(MUL);
496 break; 488 break;
497 case BPF_S_ALU_MUL_K: /* A *= K */ 489 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
498 emit_alu_K(MUL, K); 490 emit_alu_K(MUL, K);
499 break; 491 break;
500 case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/ 492 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/
501 if (K == 1) 493 if (K == 1)
502 break; 494 break;
503 emit_write_y(G0); 495 emit_write_y(G0);
@@ -512,7 +504,7 @@ void bpf_jit_compile(struct sk_filter *fp)
512#endif 504#endif
513 emit_alu_K(DIV, K); 505 emit_alu_K(DIV, K);
514 break; 506 break;
515 case BPF_S_ALU_DIV_X: /* A /= X; */ 507 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
516 emit_cmpi(r_X, 0); 508 emit_cmpi(r_X, 0);
517 if (pc_ret0 > 0) { 509 if (pc_ret0 > 0) {
518 t_offset = addrs[pc_ret0 - 1]; 510 t_offset = addrs[pc_ret0 - 1];
@@ -544,10 +536,10 @@ void bpf_jit_compile(struct sk_filter *fp)
544#endif 536#endif
545 emit_alu_X(DIV); 537 emit_alu_X(DIV);
546 break; 538 break;
547 case BPF_S_ALU_NEG: 539 case BPF_ALU | BPF_NEG:
548 emit_neg(); 540 emit_neg();
549 break; 541 break;
550 case BPF_S_RET_K: 542 case BPF_RET | BPF_K:
551 if (!K) { 543 if (!K) {
552 if (pc_ret0 == -1) 544 if (pc_ret0 == -1)
553 pc_ret0 = i; 545 pc_ret0 = i;
@@ -556,7 +548,7 @@ void bpf_jit_compile(struct sk_filter *fp)
556 emit_loadimm(K, r_A); 548 emit_loadimm(K, r_A);
557 } 549 }
558 /* Fallthrough */ 550 /* Fallthrough */
559 case BPF_S_RET_A: 551 case BPF_RET | BPF_A:
560 if (seen_or_pass0) { 552 if (seen_or_pass0) {
561 if (i != flen - 1) { 553 if (i != flen - 1) {
562 emit_jump(cleanup_addr); 554 emit_jump(cleanup_addr);
@@ -573,18 +565,18 @@ void bpf_jit_compile(struct sk_filter *fp)
573 emit_jmpl(r_saved_O7, 8, G0); 565 emit_jmpl(r_saved_O7, 8, G0);
574 emit_reg_move(r_A, O0); /* delay slot */ 566 emit_reg_move(r_A, O0); /* delay slot */
575 break; 567 break;
576 case BPF_S_MISC_TAX: 568 case BPF_MISC | BPF_TAX:
577 seen |= SEEN_XREG; 569 seen |= SEEN_XREG;
578 emit_reg_move(r_A, r_X); 570 emit_reg_move(r_A, r_X);
579 break; 571 break;
580 case BPF_S_MISC_TXA: 572 case BPF_MISC | BPF_TXA:
581 seen |= SEEN_XREG; 573 seen |= SEEN_XREG;
582 emit_reg_move(r_X, r_A); 574 emit_reg_move(r_X, r_A);
583 break; 575 break;
584 case BPF_S_ANC_CPU: 576 case BPF_ANC | SKF_AD_CPU:
585 emit_load_cpu(r_A); 577 emit_load_cpu(r_A);
586 break; 578 break;
587 case BPF_S_ANC_PROTOCOL: 579 case BPF_ANC | SKF_AD_PROTOCOL:
588 emit_skb_load16(protocol, r_A); 580 emit_skb_load16(protocol, r_A);
589 break; 581 break;
590#if 0 582#if 0
@@ -592,38 +584,38 @@ void bpf_jit_compile(struct sk_filter *fp)
592 * a bit field even though we very much 584 * a bit field even though we very much
593 * know what we are doing here. 585 * know what we are doing here.
594 */ 586 */
595 case BPF_S_ANC_PKTTYPE: 587 case BPF_ANC | SKF_AD_PKTTYPE:
596 __emit_skb_load8(pkt_type, r_A); 588 __emit_skb_load8(pkt_type, r_A);
597 emit_alu_K(SRL, 5); 589 emit_alu_K(SRL, 5);
598 break; 590 break;
599#endif 591#endif
600 case BPF_S_ANC_IFINDEX: 592 case BPF_ANC | SKF_AD_IFINDEX:
601 emit_skb_loadptr(dev, r_A); 593 emit_skb_loadptr(dev, r_A);
602 emit_cmpi(r_A, 0); 594 emit_cmpi(r_A, 0);
603 emit_branch(BNE_PTR, cleanup_addr + 4); 595 emit_branch(BE_PTR, cleanup_addr + 4);
604 emit_nop(); 596 emit_nop();
605 emit_load32(r_A, struct net_device, ifindex, r_A); 597 emit_load32(r_A, struct net_device, ifindex, r_A);
606 break; 598 break;
607 case BPF_S_ANC_MARK: 599 case BPF_ANC | SKF_AD_MARK:
608 emit_skb_load32(mark, r_A); 600 emit_skb_load32(mark, r_A);
609 break; 601 break;
610 case BPF_S_ANC_QUEUE: 602 case BPF_ANC | SKF_AD_QUEUE:
611 emit_skb_load16(queue_mapping, r_A); 603 emit_skb_load16(queue_mapping, r_A);
612 break; 604 break;
613 case BPF_S_ANC_HATYPE: 605 case BPF_ANC | SKF_AD_HATYPE:
614 emit_skb_loadptr(dev, r_A); 606 emit_skb_loadptr(dev, r_A);
615 emit_cmpi(r_A, 0); 607 emit_cmpi(r_A, 0);
616 emit_branch(BNE_PTR, cleanup_addr + 4); 608 emit_branch(BE_PTR, cleanup_addr + 4);
617 emit_nop(); 609 emit_nop();
618 emit_load16(r_A, struct net_device, type, r_A); 610 emit_load16(r_A, struct net_device, type, r_A);
619 break; 611 break;
620 case BPF_S_ANC_RXHASH: 612 case BPF_ANC | SKF_AD_RXHASH:
621 emit_skb_load32(hash, r_A); 613 emit_skb_load32(hash, r_A);
622 break; 614 break;
623 case BPF_S_ANC_VLAN_TAG: 615 case BPF_ANC | SKF_AD_VLAN_TAG:
624 case BPF_S_ANC_VLAN_TAG_PRESENT: 616 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
625 emit_skb_load16(vlan_tci, r_A); 617 emit_skb_load16(vlan_tci, r_A);
626 if (filter[i].code == BPF_S_ANC_VLAN_TAG) { 618 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
627 emit_andi(r_A, VLAN_VID_MASK, r_A); 619 emit_andi(r_A, VLAN_VID_MASK, r_A);
628 } else { 620 } else {
629 emit_loadimm(VLAN_TAG_PRESENT, r_TMP); 621 emit_loadimm(VLAN_TAG_PRESENT, r_TMP);
@@ -631,44 +623,44 @@ void bpf_jit_compile(struct sk_filter *fp)
631 } 623 }
632 break; 624 break;
633 625
634 case BPF_S_LD_IMM: 626 case BPF_LD | BPF_IMM:
635 emit_loadimm(K, r_A); 627 emit_loadimm(K, r_A);
636 break; 628 break;
637 case BPF_S_LDX_IMM: 629 case BPF_LDX | BPF_IMM:
638 emit_loadimm(K, r_X); 630 emit_loadimm(K, r_X);
639 break; 631 break;
640 case BPF_S_LD_MEM: 632 case BPF_LD | BPF_MEM:
641 emit_ldmem(K * 4, r_A); 633 emit_ldmem(K * 4, r_A);
642 break; 634 break;
643 case BPF_S_LDX_MEM: 635 case BPF_LDX | BPF_MEM:
644 emit_ldmem(K * 4, r_X); 636 emit_ldmem(K * 4, r_X);
645 break; 637 break;
646 case BPF_S_ST: 638 case BPF_ST:
647 emit_stmem(K * 4, r_A); 639 emit_stmem(K * 4, r_A);
648 break; 640 break;
649 case BPF_S_STX: 641 case BPF_STX:
650 emit_stmem(K * 4, r_X); 642 emit_stmem(K * 4, r_X);
651 break; 643 break;
652 644
653#define CHOOSE_LOAD_FUNC(K, func) \ 645#define CHOOSE_LOAD_FUNC(K, func) \
654 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) 646 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
655 647
656 case BPF_S_LD_W_ABS: 648 case BPF_LD | BPF_W | BPF_ABS:
657 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word); 649 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
658common_load: seen |= SEEN_DATAREF; 650common_load: seen |= SEEN_DATAREF;
659 emit_loadimm(K, r_OFF); 651 emit_loadimm(K, r_OFF);
660 emit_call(func); 652 emit_call(func);
661 break; 653 break;
662 case BPF_S_LD_H_ABS: 654 case BPF_LD | BPF_H | BPF_ABS:
663 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half); 655 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
664 goto common_load; 656 goto common_load;
665 case BPF_S_LD_B_ABS: 657 case BPF_LD | BPF_B | BPF_ABS:
666 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte); 658 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
667 goto common_load; 659 goto common_load;
668 case BPF_S_LDX_B_MSH: 660 case BPF_LDX | BPF_B | BPF_MSH:
669 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh); 661 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
670 goto common_load; 662 goto common_load;
671 case BPF_S_LD_W_IND: 663 case BPF_LD | BPF_W | BPF_IND:
672 func = bpf_jit_load_word; 664 func = bpf_jit_load_word;
673common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; 665common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
674 if (K) { 666 if (K) {
@@ -683,13 +675,13 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
683 } 675 }
684 emit_call(func); 676 emit_call(func);
685 break; 677 break;
686 case BPF_S_LD_H_IND: 678 case BPF_LD | BPF_H | BPF_IND:
687 func = bpf_jit_load_half; 679 func = bpf_jit_load_half;
688 goto common_load_ind; 680 goto common_load_ind;
689 case BPF_S_LD_B_IND: 681 case BPF_LD | BPF_B | BPF_IND:
690 func = bpf_jit_load_byte; 682 func = bpf_jit_load_byte;
691 goto common_load_ind; 683 goto common_load_ind;
692 case BPF_S_JMP_JA: 684 case BPF_JMP | BPF_JA:
693 emit_jump(addrs[i + K]); 685 emit_jump(addrs[i + K]);
694 emit_nop(); 686 emit_nop();
695 break; 687 break;
@@ -700,14 +692,14 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
700 f_op = FOP; \ 692 f_op = FOP; \
701 goto cond_branch 693 goto cond_branch
702 694
703 COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU); 695 COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU);
704 COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU); 696 COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU);
705 COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE); 697 COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE);
706 COND_SEL(BPF_S_JMP_JSET_K, BNE, BE); 698 COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE);
707 COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU); 699 COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU);
708 COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU); 700 COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU);
709 COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE); 701 COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE);
710 COND_SEL(BPF_S_JMP_JSET_X, BNE, BE); 702 COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE);
711 703
712cond_branch: f_offset = addrs[i + filter[i].jf]; 704cond_branch: f_offset = addrs[i + filter[i].jf];
713 t_offset = addrs[i + filter[i].jt]; 705 t_offset = addrs[i + filter[i].jt];
@@ -719,20 +711,20 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
719 break; 711 break;
720 } 712 }
721 713
722 switch (filter[i].code) { 714 switch (code) {
723 case BPF_S_JMP_JGT_X: 715 case BPF_JMP | BPF_JGT | BPF_X:
724 case BPF_S_JMP_JGE_X: 716 case BPF_JMP | BPF_JGE | BPF_X:
725 case BPF_S_JMP_JEQ_X: 717 case BPF_JMP | BPF_JEQ | BPF_X:
726 seen |= SEEN_XREG; 718 seen |= SEEN_XREG;
727 emit_cmp(r_A, r_X); 719 emit_cmp(r_A, r_X);
728 break; 720 break;
729 case BPF_S_JMP_JSET_X: 721 case BPF_JMP | BPF_JSET | BPF_X:
730 seen |= SEEN_XREG; 722 seen |= SEEN_XREG;
731 emit_btst(r_A, r_X); 723 emit_btst(r_A, r_X);
732 break; 724 break;
733 case BPF_S_JMP_JEQ_K: 725 case BPF_JMP | BPF_JEQ | BPF_K:
734 case BPF_S_JMP_JGT_K: 726 case BPF_JMP | BPF_JGT | BPF_K:
735 case BPF_S_JMP_JGE_K: 727 case BPF_JMP | BPF_JGE | BPF_K:
736 if (is_simm13(K)) { 728 if (is_simm13(K)) {
737 emit_cmpi(r_A, K); 729 emit_cmpi(r_A, K);
738 } else { 730 } else {
@@ -740,7 +732,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
740 emit_cmp(r_A, r_TMP); 732 emit_cmp(r_A, r_TMP);
741 } 733 }
742 break; 734 break;
743 case BPF_S_JMP_JSET_K: 735 case BPF_JMP | BPF_JSET | BPF_K:
744 if (is_simm13(K)) { 736 if (is_simm13(K)) {
745 emit_btsti(r_A, K); 737 emit_btsti(r_A, K);
746 } else { 738 } else {
diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h
index e6fd8a026c7b..cd00e1774491 100644
--- a/arch/x86/include/asm/checksum_64.h
+++ b/arch/x86/include/asm/checksum_64.h
@@ -184,8 +184,15 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b)
184 asm("addl %2,%0\n\t" 184 asm("addl %2,%0\n\t"
185 "adcl $0,%0" 185 "adcl $0,%0"
186 : "=r" (a) 186 : "=r" (a)
187 : "0" (a), "r" (b)); 187 : "0" (a), "rm" (b));
188 return a; 188 return a;
189} 189}
190 190
191#define HAVE_ARCH_CSUM_ADD
192static inline __wsum csum_add(__wsum csum, __wsum addend)
193{
194 return (__force __wsum)add32_with_carry((__force unsigned)csum,
195 (__force unsigned)addend);
196}
197
191#endif /* _ASM_X86_CHECKSUM_64_H */ 198#endif /* _ASM_X86_CHECKSUM_64_H */
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 01495755701b..6440221ced0d 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -12,13 +12,16 @@
12 12
13/* 13/*
14 * Calling convention : 14 * Calling convention :
15 * rdi : skb pointer 15 * rbx : skb pointer (callee saved)
16 * esi : offset of byte(s) to fetch in skb (can be scratched) 16 * esi : offset of byte(s) to fetch in skb (can be scratched)
17 * r8 : copy of skb->data 17 * r10 : copy of skb->data
18 * r9d : hlen = skb->len - skb->data_len 18 * r9d : hlen = skb->len - skb->data_len
19 */ 19 */
20#define SKBDATA %r8 20#define SKBDATA %r10
21#define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */ 21#define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */
22#define MAX_BPF_STACK (512 /* from filter.h */ + \
23 32 /* space for rbx,r13,r14,r15 */ + \
24 8 /* space for skb_copy_bits */)
22 25
23sk_load_word: 26sk_load_word:
24 .globl sk_load_word 27 .globl sk_load_word
@@ -68,53 +71,31 @@ sk_load_byte_positive_offset:
68 movzbl (SKBDATA,%rsi),%eax 71 movzbl (SKBDATA,%rsi),%eax
69 ret 72 ret
70 73
71/**
72 * sk_load_byte_msh - BPF_S_LDX_B_MSH helper
73 *
74 * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf)
75 * Must preserve A accumulator (%eax)
76 * Inputs : %esi is the offset value
77 */
78sk_load_byte_msh:
79 .globl sk_load_byte_msh
80 test %esi,%esi
81 js bpf_slow_path_byte_msh_neg
82
83sk_load_byte_msh_positive_offset:
84 .globl sk_load_byte_msh_positive_offset
85 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
86 jle bpf_slow_path_byte_msh
87 movzbl (SKBDATA,%rsi),%ebx
88 and $15,%bl
89 shl $2,%bl
90 ret
91
92/* rsi contains offset and can be scratched */ 74/* rsi contains offset and can be scratched */
93#define bpf_slow_path_common(LEN) \ 75#define bpf_slow_path_common(LEN) \
94 push %rdi; /* save skb */ \ 76 mov %rbx, %rdi; /* arg1 == skb */ \
95 push %r9; \ 77 push %r9; \
96 push SKBDATA; \ 78 push SKBDATA; \
97/* rsi already has offset */ \ 79/* rsi already has offset */ \
98 mov $LEN,%ecx; /* len */ \ 80 mov $LEN,%ecx; /* len */ \
99 lea -12(%rbp),%rdx; \ 81 lea - MAX_BPF_STACK + 32(%rbp),%rdx; \
100 call skb_copy_bits; \ 82 call skb_copy_bits; \
101 test %eax,%eax; \ 83 test %eax,%eax; \
102 pop SKBDATA; \ 84 pop SKBDATA; \
103 pop %r9; \ 85 pop %r9;
104 pop %rdi
105 86
106 87
107bpf_slow_path_word: 88bpf_slow_path_word:
108 bpf_slow_path_common(4) 89 bpf_slow_path_common(4)
109 js bpf_error 90 js bpf_error
110 mov -12(%rbp),%eax 91 mov - MAX_BPF_STACK + 32(%rbp),%eax
111 bswap %eax 92 bswap %eax
112 ret 93 ret
113 94
114bpf_slow_path_half: 95bpf_slow_path_half:
115 bpf_slow_path_common(2) 96 bpf_slow_path_common(2)
116 js bpf_error 97 js bpf_error
117 mov -12(%rbp),%ax 98 mov - MAX_BPF_STACK + 32(%rbp),%ax
118 rol $8,%ax 99 rol $8,%ax
119 movzwl %ax,%eax 100 movzwl %ax,%eax
120 ret 101 ret
@@ -122,21 +103,11 @@ bpf_slow_path_half:
122bpf_slow_path_byte: 103bpf_slow_path_byte:
123 bpf_slow_path_common(1) 104 bpf_slow_path_common(1)
124 js bpf_error 105 js bpf_error
125 movzbl -12(%rbp),%eax 106 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
126 ret
127
128bpf_slow_path_byte_msh:
129 xchg %eax,%ebx /* dont lose A , X is about to be scratched */
130 bpf_slow_path_common(1)
131 js bpf_error
132 movzbl -12(%rbp),%eax
133 and $15,%al
134 shl $2,%al
135 xchg %eax,%ebx
136 ret 107 ret
137 108
138#define sk_negative_common(SIZE) \ 109#define sk_negative_common(SIZE) \
139 push %rdi; /* save skb */ \ 110 mov %rbx, %rdi; /* arg1 == skb */ \
140 push %r9; \ 111 push %r9; \
141 push SKBDATA; \ 112 push SKBDATA; \
142/* rsi already has offset */ \ 113/* rsi already has offset */ \
@@ -145,10 +116,8 @@ bpf_slow_path_byte_msh:
145 test %rax,%rax; \ 116 test %rax,%rax; \
146 pop SKBDATA; \ 117 pop SKBDATA; \
147 pop %r9; \ 118 pop %r9; \
148 pop %rdi; \
149 jz bpf_error 119 jz bpf_error
150 120
151
152bpf_slow_path_word_neg: 121bpf_slow_path_word_neg:
153 cmp SKF_MAX_NEG_OFF, %esi /* test range */ 122 cmp SKF_MAX_NEG_OFF, %esi /* test range */
154 jl bpf_error /* offset lower -> error */ 123 jl bpf_error /* offset lower -> error */
@@ -179,22 +148,12 @@ sk_load_byte_negative_offset:
179 movzbl (%rax), %eax 148 movzbl (%rax), %eax
180 ret 149 ret
181 150
182bpf_slow_path_byte_msh_neg:
183 cmp SKF_MAX_NEG_OFF, %esi
184 jl bpf_error
185sk_load_byte_msh_negative_offset:
186 .globl sk_load_byte_msh_negative_offset
187 xchg %eax,%ebx /* dont lose A , X is about to be scratched */
188 sk_negative_common(1)
189 movzbl (%rax),%eax
190 and $15,%al
191 shl $2,%al
192 xchg %eax,%ebx
193 ret
194
195bpf_error: 151bpf_error:
196# force a return 0 from jit handler 152# force a return 0 from jit handler
197 xor %eax,%eax 153 xor %eax,%eax
198 mov -8(%rbp),%rbx 154 mov - MAX_BPF_STACK(%rbp),%rbx
155 mov - MAX_BPF_STACK + 8(%rbp),%r13
156 mov - MAX_BPF_STACK + 16(%rbp),%r14
157 mov - MAX_BPF_STACK + 24(%rbp),%r15
199 leaveq 158 leaveq
200 ret 159 ret
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 6d5663a599a7..99bef86ed6df 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1,6 +1,7 @@
1/* bpf_jit_comp.c : BPF JIT compiler 1/* bpf_jit_comp.c : BPF JIT compiler
2 * 2 *
3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
4 * 5 *
5 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -14,28 +15,16 @@
14#include <linux/if_vlan.h> 15#include <linux/if_vlan.h>
15#include <linux/random.h> 16#include <linux/random.h>
16 17
17/*
18 * Conventions :
19 * EAX : BPF A accumulator
20 * EBX : BPF X accumulator
21 * RDI : pointer to skb (first argument given to JIT function)
22 * RBP : frame pointer (even if CONFIG_FRAME_POINTER=n)
23 * ECX,EDX,ESI : scratch registers
24 * r9d : skb->len - skb->data_len (headlen)
25 * r8 : skb->data
26 * -8(RBP) : saved RBX value
27 * -16(RBP)..-80(RBP) : BPF_MEMWORDS values
28 */
29int bpf_jit_enable __read_mostly; 18int bpf_jit_enable __read_mostly;
30 19
31/* 20/*
32 * assembly code in arch/x86/net/bpf_jit.S 21 * assembly code in arch/x86/net/bpf_jit.S
33 */ 22 */
34extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; 23extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
35extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[]; 24extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
36extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[]; 25extern u8 sk_load_byte_positive_offset[];
37extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; 26extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
38extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[]; 27extern u8 sk_load_byte_negative_offset[];
39 28
40static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 29static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
41{ 30{
@@ -56,30 +45,44 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
56#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 45#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
57#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 46#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
58#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 47#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
59#define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0) 48#define EMIT1_off32(b1, off) \
60 49 do {EMIT1(b1); EMIT(off, 4); } while (0)
61#define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */ 50#define EMIT2_off32(b1, b2, off) \
62#define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */ 51 do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
52#define EMIT3_off32(b1, b2, b3, off) \
53 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
54#define EMIT4_off32(b1, b2, b3, b4, off) \
55 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
63 56
64static inline bool is_imm8(int value) 57static inline bool is_imm8(int value)
65{ 58{
66 return value <= 127 && value >= -128; 59 return value <= 127 && value >= -128;
67} 60}
68 61
69static inline bool is_near(int offset) 62static inline bool is_simm32(s64 value)
70{ 63{
71 return offset <= 127 && offset >= -128; 64 return value == (s64) (s32) value;
72} 65}
73 66
74#define EMIT_JMP(offset) \ 67/* mov dst, src */
75do { \ 68#define EMIT_mov(DST, SRC) \
76 if (offset) { \ 69 do {if (DST != SRC) \
77 if (is_near(offset)) \ 70 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
78 EMIT2(0xeb, offset); /* jmp .+off8 */ \ 71 } while (0)
79 else \ 72
80 EMIT1_off32(0xe9, offset); /* jmp .+off32 */ \ 73static int bpf_size_to_x86_bytes(int bpf_size)
81 } \ 74{
82} while (0) 75 if (bpf_size == BPF_W)
76 return 4;
77 else if (bpf_size == BPF_H)
78 return 2;
79 else if (bpf_size == BPF_B)
80 return 1;
81 else if (bpf_size == BPF_DW)
82 return 4; /* imm32 */
83 else
84 return 0;
85}
83 86
84/* list of x86 cond jumps opcodes (. + s8) 87/* list of x86 cond jumps opcodes (. + s8)
85 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
@@ -90,27 +93,8 @@ do { \
90#define X86_JNE 0x75 93#define X86_JNE 0x75
91#define X86_JBE 0x76 94#define X86_JBE 0x76
92#define X86_JA 0x77 95#define X86_JA 0x77
93 96#define X86_JGE 0x7D
94#define EMIT_COND_JMP(op, offset) \ 97#define X86_JG 0x7F
95do { \
96 if (is_near(offset)) \
97 EMIT2(op, offset); /* jxx .+off8 */ \
98 else { \
99 EMIT2(0x0f, op + 0x10); \
100 EMIT(offset, 4); /* jxx .+off32 */ \
101 } \
102} while (0)
103
104#define COND_SEL(CODE, TOP, FOP) \
105 case CODE: \
106 t_op = TOP; \
107 f_op = FOP; \
108 goto cond_branch
109
110
111#define SEEN_DATAREF 1 /* might call external helpers */
112#define SEEN_XREG 2 /* ebx is used */
113#define SEEN_MEM 4 /* use mem[] for temporary storage */
114 98
115static inline void bpf_flush_icache(void *start, void *end) 99static inline void bpf_flush_icache(void *start, void *end)
116{ 100{
@@ -125,26 +109,6 @@ static inline void bpf_flush_icache(void *start, void *end)
125#define CHOOSE_LOAD_FUNC(K, func) \ 109#define CHOOSE_LOAD_FUNC(K, func) \
126 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) 110 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
127 111
128/* Helper to find the offset of pkt_type in sk_buff
129 * We want to make sure its still a 3bit field starting at a byte boundary.
130 */
131#define PKT_TYPE_MAX 7
132static int pkt_type_offset(void)
133{
134 struct sk_buff skb_probe = {
135 .pkt_type = ~0,
136 };
137 char *ct = (char *)&skb_probe;
138 unsigned int off;
139
140 for (off = 0; off < sizeof(struct sk_buff); off++) {
141 if (ct[off] == PKT_TYPE_MAX)
142 return off;
143 }
144 pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
145 return -1;
146}
147
148struct bpf_binary_header { 112struct bpf_binary_header {
149 unsigned int pages; 113 unsigned int pages;
150 /* Note : for security reasons, bpf code will follow a randomly 114 /* Note : for security reasons, bpf code will follow a randomly
@@ -178,583 +142,771 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
178 return header; 142 return header;
179} 143}
180 144
181void bpf_jit_compile(struct sk_filter *fp) 145/* pick a register outside of BPF range for JIT internal work */
146#define AUX_REG (MAX_BPF_REG + 1)
147
148/* the following table maps BPF registers to x64 registers.
149 * x64 register r12 is unused, since if used as base address register
150 * in load/store instructions, it always needs an extra byte of encoding
151 */
152static const int reg2hex[] = {
153 [BPF_REG_0] = 0, /* rax */
154 [BPF_REG_1] = 7, /* rdi */
155 [BPF_REG_2] = 6, /* rsi */
156 [BPF_REG_3] = 2, /* rdx */
157 [BPF_REG_4] = 1, /* rcx */
158 [BPF_REG_5] = 0, /* r8 */
159 [BPF_REG_6] = 3, /* rbx callee saved */
160 [BPF_REG_7] = 5, /* r13 callee saved */
161 [BPF_REG_8] = 6, /* r14 callee saved */
162 [BPF_REG_9] = 7, /* r15 callee saved */
163 [BPF_REG_FP] = 5, /* rbp readonly */
164 [AUX_REG] = 3, /* r11 temp register */
165};
166
167/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
168 * which need extra byte of encoding.
169 * rax,rcx,...,rbp have simpler encoding
170 */
171static inline bool is_ereg(u32 reg)
182{ 172{
183 u8 temp[64]; 173 if (reg == BPF_REG_5 || reg == AUX_REG ||
184 u8 *prog; 174 (reg >= BPF_REG_7 && reg <= BPF_REG_9))
185 unsigned int proglen, oldproglen = 0; 175 return true;
186 int ilen, i; 176 else
187 int t_offset, f_offset; 177 return false;
188 u8 t_op, f_op, seen = 0, pass; 178}
189 u8 *image = NULL;
190 struct bpf_binary_header *header = NULL;
191 u8 *func;
192 int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
193 unsigned int cleanup_addr; /* epilogue code offset */
194 unsigned int *addrs;
195 const struct sock_filter *filter = fp->insns;
196 int flen = fp->len;
197 179
198 if (!bpf_jit_enable) 180/* add modifiers if 'reg' maps to x64 registers r8..r15 */
199 return; 181static inline u8 add_1mod(u8 byte, u32 reg)
182{
183 if (is_ereg(reg))
184 byte |= 1;
185 return byte;
186}
200 187
201 addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL); 188static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
202 if (addrs == NULL) 189{
203 return; 190 if (is_ereg(r1))
191 byte |= 1;
192 if (is_ereg(r2))
193 byte |= 4;
194 return byte;
195}
204 196
205 /* Before first pass, make a rough estimation of addrs[] 197/* encode 'dst_reg' register into x64 opcode 'byte' */
206 * each bpf instruction is translated to less than 64 bytes 198static inline u8 add_1reg(u8 byte, u32 dst_reg)
199{
200 return byte + reg2hex[dst_reg];
201}
202
203/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
204static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
205{
206 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
207}
208
209struct jit_context {
210 unsigned int cleanup_addr; /* epilogue code offset */
211 bool seen_ld_abs;
212};
213
214static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
215 int oldproglen, struct jit_context *ctx)
216{
217 struct sock_filter_int *insn = bpf_prog->insnsi;
218 int insn_cnt = bpf_prog->len;
219 u8 temp[64];
220 int i;
221 int proglen = 0;
222 u8 *prog = temp;
223 int stacksize = MAX_BPF_STACK +
224 32 /* space for rbx, r13, r14, r15 */ +
225 8 /* space for skb_copy_bits() buffer */;
226
227 EMIT1(0x55); /* push rbp */
228 EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
229
230 /* sub rsp, stacksize */
231 EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
232
233 /* all classic BPF filters use R6(rbx) save it */
234
235 /* mov qword ptr [rbp-X],rbx */
236 EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
237
238 /* sk_convert_filter() maps classic BPF register X to R7 and uses R8
239 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
240 * R8(r14). R9(r15) spill could be made conditional, but there is only
241 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
242 * The overhead of extra spill is negligible for any filter other
243 * than synthetic ones. Therefore not worth adding complexity.
207 */ 244 */
208 for (proglen = 0, i = 0; i < flen; i++) { 245
209 proglen += 64; 246 /* mov qword ptr [rbp-X],r13 */
210 addrs[i] = proglen; 247 EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
248 /* mov qword ptr [rbp-X],r14 */
249 EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
250 /* mov qword ptr [rbp-X],r15 */
251 EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
252
253 /* clear A and X registers */
254 EMIT2(0x31, 0xc0); /* xor eax, eax */
255 EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
256
257 if (ctx->seen_ld_abs) {
258 /* r9d : skb->len - skb->data_len (headlen)
259 * r10 : skb->data
260 */
261 if (is_imm8(offsetof(struct sk_buff, len)))
262 /* mov %r9d, off8(%rdi) */
263 EMIT4(0x44, 0x8b, 0x4f,
264 offsetof(struct sk_buff, len));
265 else
266 /* mov %r9d, off32(%rdi) */
267 EMIT3_off32(0x44, 0x8b, 0x8f,
268 offsetof(struct sk_buff, len));
269
270 if (is_imm8(offsetof(struct sk_buff, data_len)))
271 /* sub %r9d, off8(%rdi) */
272 EMIT4(0x44, 0x2b, 0x4f,
273 offsetof(struct sk_buff, data_len));
274 else
275 EMIT3_off32(0x44, 0x2b, 0x8f,
276 offsetof(struct sk_buff, data_len));
277
278 if (is_imm8(offsetof(struct sk_buff, data)))
279 /* mov %r10, off8(%rdi) */
280 EMIT4(0x4c, 0x8b, 0x57,
281 offsetof(struct sk_buff, data));
282 else
283 /* mov %r10, off32(%rdi) */
284 EMIT3_off32(0x4c, 0x8b, 0x97,
285 offsetof(struct sk_buff, data));
211 } 286 }
212 cleanup_addr = proglen; /* epilogue address */
213 287
214 for (pass = 0; pass < 10; pass++) { 288 for (i = 0; i < insn_cnt; i++, insn++) {
215 u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen; 289 const s32 imm32 = insn->imm;
216 /* no prologue/epilogue for trivial filters (RET something) */ 290 u32 dst_reg = insn->dst_reg;
217 proglen = 0; 291 u32 src_reg = insn->src_reg;
218 prog = temp; 292 u8 b1 = 0, b2 = 0, b3 = 0;
293 s64 jmp_offset;
294 u8 jmp_cond;
295 int ilen;
296 u8 *func;
297
298 switch (insn->code) {
299 /* ALU */
300 case BPF_ALU | BPF_ADD | BPF_X:
301 case BPF_ALU | BPF_SUB | BPF_X:
302 case BPF_ALU | BPF_AND | BPF_X:
303 case BPF_ALU | BPF_OR | BPF_X:
304 case BPF_ALU | BPF_XOR | BPF_X:
305 case BPF_ALU64 | BPF_ADD | BPF_X:
306 case BPF_ALU64 | BPF_SUB | BPF_X:
307 case BPF_ALU64 | BPF_AND | BPF_X:
308 case BPF_ALU64 | BPF_OR | BPF_X:
309 case BPF_ALU64 | BPF_XOR | BPF_X:
310 switch (BPF_OP(insn->code)) {
311 case BPF_ADD: b2 = 0x01; break;
312 case BPF_SUB: b2 = 0x29; break;
313 case BPF_AND: b2 = 0x21; break;
314 case BPF_OR: b2 = 0x09; break;
315 case BPF_XOR: b2 = 0x31; break;
316 }
317 if (BPF_CLASS(insn->code) == BPF_ALU64)
318 EMIT1(add_2mod(0x48, dst_reg, src_reg));
319 else if (is_ereg(dst_reg) || is_ereg(src_reg))
320 EMIT1(add_2mod(0x40, dst_reg, src_reg));
321 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
322 break;
219 323
220 if (seen_or_pass0) { 324 /* mov dst, src */
221 EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */ 325 case BPF_ALU64 | BPF_MOV | BPF_X:
222 EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */ 326 EMIT_mov(dst_reg, src_reg);
223 /* note : must save %rbx in case bpf_error is hit */ 327 break;
224 if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF)) 328
225 EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */ 329 /* mov32 dst, src */
226 if (seen_or_pass0 & SEEN_XREG) 330 case BPF_ALU | BPF_MOV | BPF_X:
227 CLEAR_X(); /* make sure we dont leek kernel memory */ 331 if (is_ereg(dst_reg) || is_ereg(src_reg))
228 332 EMIT1(add_2mod(0x40, dst_reg, src_reg));
229 /* 333 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
230 * If this filter needs to access skb data, 334 break;
231 * loads r9 and r8 with : 335
232 * r9 = skb->len - skb->data_len 336 /* neg dst */
233 * r8 = skb->data 337 case BPF_ALU | BPF_NEG:
338 case BPF_ALU64 | BPF_NEG:
339 if (BPF_CLASS(insn->code) == BPF_ALU64)
340 EMIT1(add_1mod(0x48, dst_reg));
341 else if (is_ereg(dst_reg))
342 EMIT1(add_1mod(0x40, dst_reg));
343 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
344 break;
345
346 case BPF_ALU | BPF_ADD | BPF_K:
347 case BPF_ALU | BPF_SUB | BPF_K:
348 case BPF_ALU | BPF_AND | BPF_K:
349 case BPF_ALU | BPF_OR | BPF_K:
350 case BPF_ALU | BPF_XOR | BPF_K:
351 case BPF_ALU64 | BPF_ADD | BPF_K:
352 case BPF_ALU64 | BPF_SUB | BPF_K:
353 case BPF_ALU64 | BPF_AND | BPF_K:
354 case BPF_ALU64 | BPF_OR | BPF_K:
355 case BPF_ALU64 | BPF_XOR | BPF_K:
356 if (BPF_CLASS(insn->code) == BPF_ALU64)
357 EMIT1(add_1mod(0x48, dst_reg));
358 else if (is_ereg(dst_reg))
359 EMIT1(add_1mod(0x40, dst_reg));
360
361 switch (BPF_OP(insn->code)) {
362 case BPF_ADD: b3 = 0xC0; break;
363 case BPF_SUB: b3 = 0xE8; break;
364 case BPF_AND: b3 = 0xE0; break;
365 case BPF_OR: b3 = 0xC8; break;
366 case BPF_XOR: b3 = 0xF0; break;
367 }
368
369 if (is_imm8(imm32))
370 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
371 else
372 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
373 break;
374
375 case BPF_ALU64 | BPF_MOV | BPF_K:
376 /* optimization: if imm32 is positive,
377 * use 'mov eax, imm32' (which zero-extends imm32)
378 * to save 2 bytes
234 */ 379 */
235 if (seen_or_pass0 & SEEN_DATAREF) { 380 if (imm32 < 0) {
236 if (offsetof(struct sk_buff, len) <= 127) 381 /* 'mov rax, imm32' sign extends imm32 */
237 /* mov off8(%rdi),%r9d */ 382 b1 = add_1mod(0x48, dst_reg);
238 EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len)); 383 b2 = 0xC7;
239 else { 384 b3 = 0xC0;
240 /* mov off32(%rdi),%r9d */ 385 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
241 EMIT3(0x44, 0x8b, 0x8f); 386 break;
242 EMIT(offsetof(struct sk_buff, len), 4); 387 }
243 }
244 if (is_imm8(offsetof(struct sk_buff, data_len)))
245 /* sub off8(%rdi),%r9d */
246 EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
247 else {
248 EMIT3(0x44, 0x2b, 0x8f);
249 EMIT(offsetof(struct sk_buff, data_len), 4);
250 }
251 388
252 if (is_imm8(offsetof(struct sk_buff, data))) 389 case BPF_ALU | BPF_MOV | BPF_K:
253 /* mov off8(%rdi),%r8 */ 390 /* mov %eax, imm32 */
254 EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data)); 391 if (is_ereg(dst_reg))
255 else { 392 EMIT1(add_1mod(0x40, dst_reg));
256 /* mov off32(%rdi),%r8 */ 393 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
257 EMIT3(0x4c, 0x8b, 0x87); 394 break;
258 EMIT(offsetof(struct sk_buff, data), 4); 395
259 } 396 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
397 case BPF_ALU | BPF_MOD | BPF_X:
398 case BPF_ALU | BPF_DIV | BPF_X:
399 case BPF_ALU | BPF_MOD | BPF_K:
400 case BPF_ALU | BPF_DIV | BPF_K:
401 case BPF_ALU64 | BPF_MOD | BPF_X:
402 case BPF_ALU64 | BPF_DIV | BPF_X:
403 case BPF_ALU64 | BPF_MOD | BPF_K:
404 case BPF_ALU64 | BPF_DIV | BPF_K:
405 EMIT1(0x50); /* push rax */
406 EMIT1(0x52); /* push rdx */
407
408 if (BPF_SRC(insn->code) == BPF_X)
409 /* mov r11, src_reg */
410 EMIT_mov(AUX_REG, src_reg);
411 else
412 /* mov r11, imm32 */
413 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
414
415 /* mov rax, dst_reg */
416 EMIT_mov(BPF_REG_0, dst_reg);
417
418 /* xor edx, edx
419 * equivalent to 'xor rdx, rdx', but one byte less
420 */
421 EMIT2(0x31, 0xd2);
422
423 if (BPF_SRC(insn->code) == BPF_X) {
424 /* if (src_reg == 0) return 0 */
425
426 /* cmp r11, 0 */
427 EMIT4(0x49, 0x83, 0xFB, 0x00);
428
429 /* jne .+9 (skip over pop, pop, xor and jmp) */
430 EMIT2(X86_JNE, 1 + 1 + 2 + 5);
431 EMIT1(0x5A); /* pop rdx */
432 EMIT1(0x58); /* pop rax */
433 EMIT2(0x31, 0xc0); /* xor eax, eax */
434
435 /* jmp cleanup_addr
436 * addrs[i] - 11, because there are 11 bytes
437 * after this insn: div, mov, pop, pop, mov
438 */
439 jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
440 EMIT1_off32(0xE9, jmp_offset);
260 } 441 }
261 }
262 442
263 switch (filter[0].code) { 443 if (BPF_CLASS(insn->code) == BPF_ALU64)
264 case BPF_S_RET_K: 444 /* div r11 */
265 case BPF_S_LD_W_LEN: 445 EMIT3(0x49, 0xF7, 0xF3);
266 case BPF_S_ANC_PROTOCOL: 446 else
267 case BPF_S_ANC_IFINDEX: 447 /* div r11d */
268 case BPF_S_ANC_MARK: 448 EMIT3(0x41, 0xF7, 0xF3);
269 case BPF_S_ANC_RXHASH: 449
270 case BPF_S_ANC_CPU: 450 if (BPF_OP(insn->code) == BPF_MOD)
271 case BPF_S_ANC_VLAN_TAG: 451 /* mov r11, rdx */
272 case BPF_S_ANC_VLAN_TAG_PRESENT: 452 EMIT3(0x49, 0x89, 0xD3);
273 case BPF_S_ANC_QUEUE: 453 else
274 case BPF_S_ANC_PKTTYPE: 454 /* mov r11, rax */
275 case BPF_S_LD_W_ABS: 455 EMIT3(0x49, 0x89, 0xC3);
276 case BPF_S_LD_H_ABS: 456
277 case BPF_S_LD_B_ABS: 457 EMIT1(0x5A); /* pop rdx */
278 /* first instruction sets A register (or is RET 'constant') */ 458 EMIT1(0x58); /* pop rax */
459
460 /* mov dst_reg, r11 */
461 EMIT_mov(dst_reg, AUX_REG);
279 break; 462 break;
280 default:
281 /* make sure we dont leak kernel information to user */
282 CLEAR_A(); /* A = 0 */
283 }
284 463
285 for (i = 0; i < flen; i++) { 464 case BPF_ALU | BPF_MUL | BPF_K:
286 unsigned int K = filter[i].k; 465 case BPF_ALU | BPF_MUL | BPF_X:
466 case BPF_ALU64 | BPF_MUL | BPF_K:
467 case BPF_ALU64 | BPF_MUL | BPF_X:
468 EMIT1(0x50); /* push rax */
469 EMIT1(0x52); /* push rdx */
470
471 /* mov r11, dst_reg */
472 EMIT_mov(AUX_REG, dst_reg);
473
474 if (BPF_SRC(insn->code) == BPF_X)
475 /* mov rax, src_reg */
476 EMIT_mov(BPF_REG_0, src_reg);
477 else
478 /* mov rax, imm32 */
479 EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
480
481 if (BPF_CLASS(insn->code) == BPF_ALU64)
482 EMIT1(add_1mod(0x48, AUX_REG));
483 else if (is_ereg(AUX_REG))
484 EMIT1(add_1mod(0x40, AUX_REG));
485 /* mul(q) r11 */
486 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
487
488 /* mov r11, rax */
489 EMIT_mov(AUX_REG, BPF_REG_0);
490
491 EMIT1(0x5A); /* pop rdx */
492 EMIT1(0x58); /* pop rax */
493
494 /* mov dst_reg, r11 */
495 EMIT_mov(dst_reg, AUX_REG);
496 break;
287 497
288 switch (filter[i].code) { 498 /* shifts */
289 case BPF_S_ALU_ADD_X: /* A += X; */ 499 case BPF_ALU | BPF_LSH | BPF_K:
290 seen |= SEEN_XREG; 500 case BPF_ALU | BPF_RSH | BPF_K:
291 EMIT2(0x01, 0xd8); /* add %ebx,%eax */ 501 case BPF_ALU | BPF_ARSH | BPF_K:
292 break; 502 case BPF_ALU64 | BPF_LSH | BPF_K:
293 case BPF_S_ALU_ADD_K: /* A += K; */ 503 case BPF_ALU64 | BPF_RSH | BPF_K:
294 if (!K) 504 case BPF_ALU64 | BPF_ARSH | BPF_K:
295 break; 505 if (BPF_CLASS(insn->code) == BPF_ALU64)
296 if (is_imm8(K)) 506 EMIT1(add_1mod(0x48, dst_reg));
297 EMIT3(0x83, 0xc0, K); /* add imm8,%eax */ 507 else if (is_ereg(dst_reg))
298 else 508 EMIT1(add_1mod(0x40, dst_reg));
299 EMIT1_off32(0x05, K); /* add imm32,%eax */ 509
300 break; 510 switch (BPF_OP(insn->code)) {
301 case BPF_S_ALU_SUB_X: /* A -= X; */ 511 case BPF_LSH: b3 = 0xE0; break;
302 seen |= SEEN_XREG; 512 case BPF_RSH: b3 = 0xE8; break;
303 EMIT2(0x29, 0xd8); /* sub %ebx,%eax */ 513 case BPF_ARSH: b3 = 0xF8; break;
304 break; 514 }
305 case BPF_S_ALU_SUB_K: /* A -= K */ 515 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
306 if (!K) 516 break;
307 break; 517
308 if (is_imm8(K)) 518 case BPF_ALU | BPF_END | BPF_FROM_BE:
309 EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */ 519 switch (imm32) {
310 else 520 case 16:
311 EMIT1_off32(0x2d, K); /* sub imm32,%eax */ 521 /* emit 'ror %ax, 8' to swap lower 2 bytes */
312 break; 522 EMIT1(0x66);
313 case BPF_S_ALU_MUL_X: /* A *= X; */ 523 if (is_ereg(dst_reg))
314 seen |= SEEN_XREG; 524 EMIT1(0x41);
315 EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */ 525 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
316 break; 526 break;
317 case BPF_S_ALU_MUL_K: /* A *= K */ 527 case 32:
318 if (is_imm8(K)) 528 /* emit 'bswap eax' to swap lower 4 bytes */
319 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */ 529 if (is_ereg(dst_reg))
320 else { 530 EMIT2(0x41, 0x0F);
321 EMIT2(0x69, 0xc0); /* imul imm32,%eax */
322 EMIT(K, 4);
323 }
324 break;
325 case BPF_S_ALU_DIV_X: /* A /= X; */
326 seen |= SEEN_XREG;
327 EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
328 if (pc_ret0 > 0) {
329 /* addrs[pc_ret0 - 1] is start address of target
330 * (addrs[i] - 4) is the address following this jmp
331 * ("xor %edx,%edx; div %ebx" being 4 bytes long)
332 */
333 EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
334 (addrs[i] - 4));
335 } else {
336 EMIT_COND_JMP(X86_JNE, 2 + 5);
337 CLEAR_A();
338 EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
339 }
340 EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
341 break;
342 case BPF_S_ALU_MOD_X: /* A %= X; */
343 seen |= SEEN_XREG;
344 EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
345 if (pc_ret0 > 0) {
346 /* addrs[pc_ret0 - 1] is start address of target
347 * (addrs[i] - 6) is the address following this jmp
348 * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
349 */
350 EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
351 (addrs[i] - 6));
352 } else {
353 EMIT_COND_JMP(X86_JNE, 2 + 5);
354 CLEAR_A();
355 EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
356 }
357 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
358 EMIT2(0xf7, 0xf3); /* div %ebx */
359 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
360 break;
361 case BPF_S_ALU_MOD_K: /* A %= K; */
362 if (K == 1) {
363 CLEAR_A();
364 break;
365 }
366 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
367 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
368 EMIT2(0xf7, 0xf1); /* div %ecx */
369 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
370 break;
371 case BPF_S_ALU_DIV_K: /* A /= K */
372 if (K == 1)
373 break;
374 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
375 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
376 EMIT2(0xf7, 0xf1); /* div %ecx */
377 break;
378 case BPF_S_ALU_AND_X:
379 seen |= SEEN_XREG;
380 EMIT2(0x21, 0xd8); /* and %ebx,%eax */
381 break;
382 case BPF_S_ALU_AND_K:
383 if (K >= 0xFFFFFF00) {
384 EMIT2(0x24, K & 0xFF); /* and imm8,%al */
385 } else if (K >= 0xFFFF0000) {
386 EMIT2(0x66, 0x25); /* and imm16,%ax */
387 EMIT(K, 2);
388 } else {
389 EMIT1_off32(0x25, K); /* and imm32,%eax */
390 }
391 break;
392 case BPF_S_ALU_OR_X:
393 seen |= SEEN_XREG;
394 EMIT2(0x09, 0xd8); /* or %ebx,%eax */
395 break;
396 case BPF_S_ALU_OR_K:
397 if (is_imm8(K))
398 EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
399 else
400 EMIT1_off32(0x0d, K); /* or imm32,%eax */
401 break;
402 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
403 case BPF_S_ALU_XOR_X:
404 seen |= SEEN_XREG;
405 EMIT2(0x31, 0xd8); /* xor %ebx,%eax */
406 break;
407 case BPF_S_ALU_XOR_K: /* A ^= K; */
408 if (K == 0)
409 break;
410 if (is_imm8(K))
411 EMIT3(0x83, 0xf0, K); /* xor imm8,%eax */
412 else
413 EMIT1_off32(0x35, K); /* xor imm32,%eax */
414 break;
415 case BPF_S_ALU_LSH_X: /* A <<= X; */
416 seen |= SEEN_XREG;
417 EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */
418 break;
419 case BPF_S_ALU_LSH_K:
420 if (K == 0)
421 break;
422 else if (K == 1)
423 EMIT2(0xd1, 0xe0); /* shl %eax */
424 else
425 EMIT3(0xc1, 0xe0, K);
426 break;
427 case BPF_S_ALU_RSH_X: /* A >>= X; */
428 seen |= SEEN_XREG;
429 EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */
430 break;
431 case BPF_S_ALU_RSH_K: /* A >>= K; */
432 if (K == 0)
433 break;
434 else if (K == 1)
435 EMIT2(0xd1, 0xe8); /* shr %eax */
436 else
437 EMIT3(0xc1, 0xe8, K);
438 break;
439 case BPF_S_ALU_NEG:
440 EMIT2(0xf7, 0xd8); /* neg %eax */
441 break;
442 case BPF_S_RET_K:
443 if (!K) {
444 if (pc_ret0 == -1)
445 pc_ret0 = i;
446 CLEAR_A();
447 } else {
448 EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
449 }
450 /* fallinto */
451 case BPF_S_RET_A:
452 if (seen_or_pass0) {
453 if (i != flen - 1) {
454 EMIT_JMP(cleanup_addr - addrs[i]);
455 break;
456 }
457 if (seen_or_pass0 & SEEN_XREG)
458 EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
459 EMIT1(0xc9); /* leaveq */
460 }
461 EMIT1(0xc3); /* ret */
462 break;
463 case BPF_S_MISC_TAX: /* X = A */
464 seen |= SEEN_XREG;
465 EMIT2(0x89, 0xc3); /* mov %eax,%ebx */
466 break;
467 case BPF_S_MISC_TXA: /* A = X */
468 seen |= SEEN_XREG;
469 EMIT2(0x89, 0xd8); /* mov %ebx,%eax */
470 break;
471 case BPF_S_LD_IMM: /* A = K */
472 if (!K)
473 CLEAR_A();
474 else
475 EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
476 break;
477 case BPF_S_LDX_IMM: /* X = K */
478 seen |= SEEN_XREG;
479 if (!K)
480 CLEAR_X();
481 else 531 else
482 EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */ 532 EMIT1(0x0F);
483 break; 533 EMIT1(add_1reg(0xC8, dst_reg));
484 case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
485 seen |= SEEN_MEM;
486 EMIT3(0x8b, 0x45, 0xf0 - K*4);
487 break;
488 case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
489 seen |= SEEN_XREG | SEEN_MEM;
490 EMIT3(0x8b, 0x5d, 0xf0 - K*4);
491 break;
492 case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
493 seen |= SEEN_MEM;
494 EMIT3(0x89, 0x45, 0xf0 - K*4);
495 break;
496 case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
497 seen |= SEEN_XREG | SEEN_MEM;
498 EMIT3(0x89, 0x5d, 0xf0 - K*4);
499 break;
500 case BPF_S_LD_W_LEN: /* A = skb->len; */
501 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
502 if (is_imm8(offsetof(struct sk_buff, len)))
503 /* mov off8(%rdi),%eax */
504 EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
505 else {
506 EMIT2(0x8b, 0x87);
507 EMIT(offsetof(struct sk_buff, len), 4);
508 }
509 break;
510 case BPF_S_LDX_W_LEN: /* X = skb->len; */
511 seen |= SEEN_XREG;
512 if (is_imm8(offsetof(struct sk_buff, len)))
513 /* mov off8(%rdi),%ebx */
514 EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
515 else {
516 EMIT2(0x8b, 0x9f);
517 EMIT(offsetof(struct sk_buff, len), 4);
518 }
519 break;
520 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
521 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
522 if (is_imm8(offsetof(struct sk_buff, protocol))) {
523 /* movzwl off8(%rdi),%eax */
524 EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
525 } else {
526 EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
527 EMIT(offsetof(struct sk_buff, protocol), 4);
528 }
529 EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */
530 break;
531 case BPF_S_ANC_IFINDEX:
532 if (is_imm8(offsetof(struct sk_buff, dev))) {
533 /* movq off8(%rdi),%rax */
534 EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
535 } else {
536 EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
537 EMIT(offsetof(struct sk_buff, dev), 4);
538 }
539 EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */
540 EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
541 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
542 EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */
543 EMIT(offsetof(struct net_device, ifindex), 4);
544 break;
545 case BPF_S_ANC_MARK:
546 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
547 if (is_imm8(offsetof(struct sk_buff, mark))) {
548 /* mov off8(%rdi),%eax */
549 EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
550 } else {
551 EMIT2(0x8b, 0x87);
552 EMIT(offsetof(struct sk_buff, mark), 4);
553 }
554 break;
555 case BPF_S_ANC_RXHASH:
556 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
557 if (is_imm8(offsetof(struct sk_buff, hash))) {
558 /* mov off8(%rdi),%eax */
559 EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash));
560 } else {
561 EMIT2(0x8b, 0x87);
562 EMIT(offsetof(struct sk_buff, hash), 4);
563 }
564 break;
565 case BPF_S_ANC_QUEUE:
566 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
567 if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
568 /* movzwl off8(%rdi),%eax */
569 EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
570 } else {
571 EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
572 EMIT(offsetof(struct sk_buff, queue_mapping), 4);
573 }
574 break;
575 case BPF_S_ANC_CPU:
576#ifdef CONFIG_SMP
577 EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
578 EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
579#else
580 CLEAR_A();
581#endif
582 break; 534 break;
583 case BPF_S_ANC_VLAN_TAG: 535 case 64:
584 case BPF_S_ANC_VLAN_TAG_PRESENT: 536 /* emit 'bswap rax' to swap 8 bytes */
585 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 537 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
586 if (is_imm8(offsetof(struct sk_buff, vlan_tci))) { 538 add_1reg(0xC8, dst_reg));
587 /* movzwl off8(%rdi),%eax */
588 EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
589 } else {
590 EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
591 EMIT(offsetof(struct sk_buff, vlan_tci), 4);
592 }
593 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
594 if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
595 EMIT3(0x80, 0xe4, 0xef); /* and $0xef,%ah */
596 } else {
597 EMIT3(0xc1, 0xe8, 0x0c); /* shr $0xc,%eax */
598 EMIT3(0x83, 0xe0, 0x01); /* and $0x1,%eax */
599 }
600 break;
601 case BPF_S_ANC_PKTTYPE:
602 {
603 int off = pkt_type_offset();
604
605 if (off < 0)
606 goto out;
607 if (is_imm8(off)) {
608 /* movzbl off8(%rdi),%eax */
609 EMIT4(0x0f, 0xb6, 0x47, off);
610 } else {
611 /* movbl off32(%rdi),%eax */
612 EMIT3(0x0f, 0xb6, 0x87);
613 EMIT(off, 4);
614 }
615 EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and $0x7,%eax */
616 break; 539 break;
617 } 540 }
618 case BPF_S_LD_W_ABS: 541 break;
619 func = CHOOSE_LOAD_FUNC(K, sk_load_word); 542
620common_load: seen |= SEEN_DATAREF; 543 case BPF_ALU | BPF_END | BPF_FROM_LE:
621 t_offset = func - (image + addrs[i]); 544 break;
622 EMIT1_off32(0xbe, K); /* mov imm32,%esi */ 545
623 EMIT1_off32(0xe8, t_offset); /* call */ 546 /* ST: *(u8*)(dst_reg + off) = imm */
624 break; 547 case BPF_ST | BPF_MEM | BPF_B:
625 case BPF_S_LD_H_ABS: 548 if (is_ereg(dst_reg))
626 func = CHOOSE_LOAD_FUNC(K, sk_load_half); 549 EMIT2(0x41, 0xC6);
627 goto common_load; 550 else
628 case BPF_S_LD_B_ABS: 551 EMIT1(0xC6);
629 func = CHOOSE_LOAD_FUNC(K, sk_load_byte); 552 goto st;
630 goto common_load; 553 case BPF_ST | BPF_MEM | BPF_H:
631 case BPF_S_LDX_B_MSH: 554 if (is_ereg(dst_reg))
632 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); 555 EMIT3(0x66, 0x41, 0xC7);
633 seen |= SEEN_DATAREF | SEEN_XREG; 556 else
634 t_offset = func - (image + addrs[i]); 557 EMIT2(0x66, 0xC7);
635 EMIT1_off32(0xbe, K); /* mov imm32,%esi */ 558 goto st;
636 EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */ 559 case BPF_ST | BPF_MEM | BPF_W:
637 break; 560 if (is_ereg(dst_reg))
638 case BPF_S_LD_W_IND: 561 EMIT2(0x41, 0xC7);
639 func = sk_load_word; 562 else
640common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; 563 EMIT1(0xC7);
641 t_offset = func - (image + addrs[i]); 564 goto st;
642 if (K) { 565 case BPF_ST | BPF_MEM | BPF_DW:
643 if (is_imm8(K)) { 566 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
644 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */ 567
645 } else { 568st: if (is_imm8(insn->off))
646 EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */ 569 EMIT2(add_1reg(0x40, dst_reg), insn->off);
647 EMIT(K, 4); 570 else
648 } 571 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
649 } else { 572
650 EMIT2(0x89,0xde); /* mov %ebx,%esi */ 573 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
651 } 574 break;
652 EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */ 575
653 break; 576 /* STX: *(u8*)(dst_reg + off) = src_reg */
654 case BPF_S_LD_H_IND: 577 case BPF_STX | BPF_MEM | BPF_B:
655 func = sk_load_half; 578 /* emit 'mov byte ptr [rax + off], al' */
656 goto common_load_ind; 579 if (is_ereg(dst_reg) || is_ereg(src_reg) ||
657 case BPF_S_LD_B_IND: 580 /* have to add extra byte for x86 SIL, DIL regs */
658 func = sk_load_byte; 581 src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
659 goto common_load_ind; 582 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
660 case BPF_S_JMP_JA: 583 else
661 t_offset = addrs[i + K] - addrs[i]; 584 EMIT1(0x88);
662 EMIT_JMP(t_offset); 585 goto stx;
663 break; 586 case BPF_STX | BPF_MEM | BPF_H:
664 COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE); 587 if (is_ereg(dst_reg) || is_ereg(src_reg))
665 COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB); 588 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
666 COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE); 589 else
667 COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE); 590 EMIT2(0x66, 0x89);
668 COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE); 591 goto stx;
669 COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB); 592 case BPF_STX | BPF_MEM | BPF_W:
670 COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE); 593 if (is_ereg(dst_reg) || is_ereg(src_reg))
671 COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE); 594 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
672 595 else
673cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; 596 EMIT1(0x89);
674 t_offset = addrs[i + filter[i].jt] - addrs[i]; 597 goto stx;
675 598 case BPF_STX | BPF_MEM | BPF_DW:
676 /* same targets, can avoid doing the test :) */ 599 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
677 if (filter[i].jt == filter[i].jf) { 600stx: if (is_imm8(insn->off))
678 EMIT_JMP(t_offset); 601 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
679 break; 602 else
680 } 603 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
604 insn->off);
605 break;
606
607 /* LDX: dst_reg = *(u8*)(src_reg + off) */
608 case BPF_LDX | BPF_MEM | BPF_B:
609 /* emit 'movzx rax, byte ptr [rax + off]' */
610 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
611 goto ldx;
612 case BPF_LDX | BPF_MEM | BPF_H:
613 /* emit 'movzx rax, word ptr [rax + off]' */
614 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
615 goto ldx;
616 case BPF_LDX | BPF_MEM | BPF_W:
617 /* emit 'mov eax, dword ptr [rax+0x14]' */
618 if (is_ereg(dst_reg) || is_ereg(src_reg))
619 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
620 else
621 EMIT1(0x8B);
622 goto ldx;
623 case BPF_LDX | BPF_MEM | BPF_DW:
624 /* emit 'mov rax, qword ptr [rax+0x14]' */
625 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
626ldx: /* if insn->off == 0 we can save one extra byte, but
627 * special case of x86 r13 which always needs an offset
628 * is not worth the hassle
629 */
630 if (is_imm8(insn->off))
631 EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
632 else
633 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
634 insn->off);
635 break;
636
637 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
638 case BPF_STX | BPF_XADD | BPF_W:
639 /* emit 'lock add dword ptr [rax + off], eax' */
640 if (is_ereg(dst_reg) || is_ereg(src_reg))
641 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
642 else
643 EMIT2(0xF0, 0x01);
644 goto xadd;
645 case BPF_STX | BPF_XADD | BPF_DW:
646 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
647xadd: if (is_imm8(insn->off))
648 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
649 else
650 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
651 insn->off);
652 break;
653
654 /* call */
655 case BPF_JMP | BPF_CALL:
656 func = (u8 *) __bpf_call_base + imm32;
657 jmp_offset = func - (image + addrs[i]);
658 if (ctx->seen_ld_abs) {
659 EMIT2(0x41, 0x52); /* push %r10 */
660 EMIT2(0x41, 0x51); /* push %r9 */
661 /* need to adjust jmp offset, since
662 * pop %r9, pop %r10 take 4 bytes after call insn
663 */
664 jmp_offset += 4;
665 }
666 if (!imm32 || !is_simm32(jmp_offset)) {
667 pr_err("unsupported bpf func %d addr %p image %p\n",
668 imm32, func, image);
669 return -EINVAL;
670 }
671 EMIT1_off32(0xE8, jmp_offset);
672 if (ctx->seen_ld_abs) {
673 EMIT2(0x41, 0x59); /* pop %r9 */
674 EMIT2(0x41, 0x5A); /* pop %r10 */
675 }
676 break;
677
678 /* cond jump */
679 case BPF_JMP | BPF_JEQ | BPF_X:
680 case BPF_JMP | BPF_JNE | BPF_X:
681 case BPF_JMP | BPF_JGT | BPF_X:
682 case BPF_JMP | BPF_JGE | BPF_X:
683 case BPF_JMP | BPF_JSGT | BPF_X:
684 case BPF_JMP | BPF_JSGE | BPF_X:
685 /* cmp dst_reg, src_reg */
686 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
687 add_2reg(0xC0, dst_reg, src_reg));
688 goto emit_cond_jmp;
689
690 case BPF_JMP | BPF_JSET | BPF_X:
691 /* test dst_reg, src_reg */
692 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
693 add_2reg(0xC0, dst_reg, src_reg));
694 goto emit_cond_jmp;
695
696 case BPF_JMP | BPF_JSET | BPF_K:
697 /* test dst_reg, imm32 */
698 EMIT1(add_1mod(0x48, dst_reg));
699 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
700 goto emit_cond_jmp;
701
702 case BPF_JMP | BPF_JEQ | BPF_K:
703 case BPF_JMP | BPF_JNE | BPF_K:
704 case BPF_JMP | BPF_JGT | BPF_K:
705 case BPF_JMP | BPF_JGE | BPF_K:
706 case BPF_JMP | BPF_JSGT | BPF_K:
707 case BPF_JMP | BPF_JSGE | BPF_K:
708 /* cmp dst_reg, imm8/32 */
709 EMIT1(add_1mod(0x48, dst_reg));
710
711 if (is_imm8(imm32))
712 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
713 else
714 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
715
716emit_cond_jmp: /* convert BPF opcode to x86 */
717 switch (BPF_OP(insn->code)) {
718 case BPF_JEQ:
719 jmp_cond = X86_JE;
720 break;
721 case BPF_JSET:
722 case BPF_JNE:
723 jmp_cond = X86_JNE;
724 break;
725 case BPF_JGT:
726 /* GT is unsigned '>', JA in x86 */
727 jmp_cond = X86_JA;
728 break;
729 case BPF_JGE:
730 /* GE is unsigned '>=', JAE in x86 */
731 jmp_cond = X86_JAE;
732 break;
733 case BPF_JSGT:
734 /* signed '>', GT in x86 */
735 jmp_cond = X86_JG;
736 break;
737 case BPF_JSGE:
738 /* signed '>=', GE in x86 */
739 jmp_cond = X86_JGE;
740 break;
741 default: /* to silence gcc warning */
742 return -EFAULT;
743 }
744 jmp_offset = addrs[i + insn->off] - addrs[i];
745 if (is_imm8(jmp_offset)) {
746 EMIT2(jmp_cond, jmp_offset);
747 } else if (is_simm32(jmp_offset)) {
748 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
749 } else {
750 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
751 return -EFAULT;
752 }
753
754 break;
681 755
682 switch (filter[i].code) { 756 case BPF_JMP | BPF_JA:
683 case BPF_S_JMP_JGT_X: 757 jmp_offset = addrs[i + insn->off] - addrs[i];
684 case BPF_S_JMP_JGE_X: 758 if (!jmp_offset)
685 case BPF_S_JMP_JEQ_X: 759 /* optimize out nop jumps */
686 seen |= SEEN_XREG; 760 break;
687 EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */ 761emit_jmp:
688 break; 762 if (is_imm8(jmp_offset)) {
689 case BPF_S_JMP_JSET_X: 763 EMIT2(0xEB, jmp_offset);
690 seen |= SEEN_XREG; 764 } else if (is_simm32(jmp_offset)) {
691 EMIT2(0x85, 0xd8); /* test %ebx,%eax */ 765 EMIT1_off32(0xE9, jmp_offset);
692 break; 766 } else {
693 case BPF_S_JMP_JEQ_K: 767 pr_err("jmp gen bug %llx\n", jmp_offset);
694 if (K == 0) { 768 return -EFAULT;
695 EMIT2(0x85, 0xc0); /* test %eax,%eax */ 769 }
696 break; 770 break;
697 } 771
698 case BPF_S_JMP_JGT_K: 772 case BPF_LD | BPF_IND | BPF_W:
699 case BPF_S_JMP_JGE_K: 773 func = sk_load_word;
700 if (K <= 127) 774 goto common_load;
701 EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */ 775 case BPF_LD | BPF_ABS | BPF_W:
776 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
777common_load: ctx->seen_ld_abs = true;
778 jmp_offset = func - (image + addrs[i]);
779 if (!func || !is_simm32(jmp_offset)) {
780 pr_err("unsupported bpf func %d addr %p image %p\n",
781 imm32, func, image);
782 return -EINVAL;
783 }
784 if (BPF_MODE(insn->code) == BPF_ABS) {
785 /* mov %esi, imm32 */
786 EMIT1_off32(0xBE, imm32);
787 } else {
788 /* mov %rsi, src_reg */
789 EMIT_mov(BPF_REG_2, src_reg);
790 if (imm32) {
791 if (is_imm8(imm32))
792 /* add %esi, imm8 */
793 EMIT3(0x83, 0xC6, imm32);
702 else 794 else
703 EMIT1_off32(0x3d, K); /* cmp imm32,%eax */ 795 /* add %esi, imm32 */
704 break; 796 EMIT2_off32(0x81, 0xC6, imm32);
705 case BPF_S_JMP_JSET_K:
706 if (K <= 0xFF)
707 EMIT2(0xa8, K); /* test imm8,%al */
708 else if (!(K & 0xFFFF00FF))
709 EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
710 else if (K <= 0xFFFF) {
711 EMIT2(0x66, 0xa9); /* test imm16,%ax */
712 EMIT(K, 2);
713 } else {
714 EMIT1_off32(0xa9, K); /* test imm32,%eax */
715 }
716 break;
717 } 797 }
718 if (filter[i].jt != 0) {
719 if (filter[i].jf && f_offset)
720 t_offset += is_near(f_offset) ? 2 : 5;
721 EMIT_COND_JMP(t_op, t_offset);
722 if (filter[i].jf)
723 EMIT_JMP(f_offset);
724 break;
725 }
726 EMIT_COND_JMP(f_op, f_offset);
727 break;
728 default:
729 /* hmm, too complex filter, give up with jit compiler */
730 goto out;
731 } 798 }
732 ilen = prog - temp; 799 /* skb pointer is in R6 (%rbx), it will be copied into
733 if (image) { 800 * %rdi if skb_copy_bits() call is necessary.
734 if (unlikely(proglen + ilen > oldproglen)) { 801 * sk_load_* helpers also use %r10 and %r9d.
735 pr_err("bpb_jit_compile fatal error\n"); 802 * See bpf_jit.S
736 kfree(addrs); 803 */
737 module_free(NULL, header); 804 EMIT1_off32(0xE8, jmp_offset); /* call */
738 return; 805 break;
739 } 806
740 memcpy(image + proglen, temp, ilen); 807 case BPF_LD | BPF_IND | BPF_H:
808 func = sk_load_half;
809 goto common_load;
810 case BPF_LD | BPF_ABS | BPF_H:
811 func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
812 goto common_load;
813 case BPF_LD | BPF_IND | BPF_B:
814 func = sk_load_byte;
815 goto common_load;
816 case BPF_LD | BPF_ABS | BPF_B:
817 func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
818 goto common_load;
819
820 case BPF_JMP | BPF_EXIT:
821 if (i != insn_cnt - 1) {
822 jmp_offset = ctx->cleanup_addr - addrs[i];
823 goto emit_jmp;
741 } 824 }
742 proglen += ilen; 825 /* update cleanup_addr */
743 addrs[i] = proglen; 826 ctx->cleanup_addr = proglen;
744 prog = temp; 827 /* mov rbx, qword ptr [rbp-X] */
828 EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
829 /* mov r13, qword ptr [rbp-X] */
830 EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
831 /* mov r14, qword ptr [rbp-X] */
832 EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
833 /* mov r15, qword ptr [rbp-X] */
834 EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
835
836 EMIT1(0xC9); /* leave */
837 EMIT1(0xC3); /* ret */
838 break;
839
840 default:
841 /* By design x64 JIT should support all BPF instructions
842 * This error will be seen if new instruction was added
843 * to interpreter, but not to JIT
844 * or if there is junk in sk_filter
845 */
846 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
847 return -EINVAL;
745 } 848 }
746 /* last bpf instruction is always a RET :
747 * use it to give the cleanup instruction(s) addr
748 */
749 cleanup_addr = proglen - 1; /* ret */
750 if (seen_or_pass0)
751 cleanup_addr -= 1; /* leaveq */
752 if (seen_or_pass0 & SEEN_XREG)
753 cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
754 849
850 ilen = prog - temp;
851 if (image) {
852 if (unlikely(proglen + ilen > oldproglen)) {
853 pr_err("bpf_jit_compile fatal error\n");
854 return -EFAULT;
855 }
856 memcpy(image + proglen, temp, ilen);
857 }
858 proglen += ilen;
859 addrs[i] = proglen;
860 prog = temp;
861 }
862 return proglen;
863}
864
865void bpf_jit_compile(struct sk_filter *prog)
866{
867}
868
869void bpf_int_jit_compile(struct sk_filter *prog)
870{
871 struct bpf_binary_header *header = NULL;
872 int proglen, oldproglen = 0;
873 struct jit_context ctx = {};
874 u8 *image = NULL;
875 int *addrs;
876 int pass;
877 int i;
878
879 if (!bpf_jit_enable)
880 return;
881
882 if (!prog || !prog->len)
883 return;
884
885 addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
886 if (!addrs)
887 return;
888
889 /* Before first pass, make a rough estimation of addrs[]
890 * each bpf instruction is translated to less than 64 bytes
891 */
892 for (proglen = 0, i = 0; i < prog->len; i++) {
893 proglen += 64;
894 addrs[i] = proglen;
895 }
896 ctx.cleanup_addr = proglen;
897
898 for (pass = 0; pass < 10; pass++) {
899 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
900 if (proglen <= 0) {
901 image = NULL;
902 if (header)
903 module_free(NULL, header);
904 goto out;
905 }
755 if (image) { 906 if (image) {
756 if (proglen != oldproglen) 907 if (proglen != oldproglen)
757 pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen); 908 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
909 proglen, oldproglen);
758 break; 910 break;
759 } 911 }
760 if (proglen == oldproglen) { 912 if (proglen == oldproglen) {
@@ -766,17 +918,16 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
766 } 918 }
767 919
768 if (bpf_jit_enable > 1) 920 if (bpf_jit_enable > 1)
769 bpf_jit_dump(flen, proglen, pass, image); 921 bpf_jit_dump(prog->len, proglen, 0, image);
770 922
771 if (image) { 923 if (image) {
772 bpf_flush_icache(header, image + proglen); 924 bpf_flush_icache(header, image + proglen);
773 set_memory_ro((unsigned long)header, header->pages); 925 set_memory_ro((unsigned long)header, header->pages);
774 fp->bpf_func = (void *)image; 926 prog->bpf_func = (void *)image;
775 fp->jited = 1; 927 prog->jited = 1;
776 } 928 }
777out: 929out:
778 kfree(addrs); 930 kfree(addrs);
779 return;
780} 931}
781 932
782static void bpf_jit_free_deferred(struct work_struct *work) 933static void bpf_jit_free_deferred(struct work_struct *work)
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 204814e88e46..d4725fc0395d 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2780,7 +2780,7 @@ static struct pci_driver fore200e_pca_driver = {
2780 2780
2781static int __init fore200e_module_init(void) 2781static int __init fore200e_module_init(void)
2782{ 2782{
2783 int err; 2783 int err = 0;
2784 2784
2785 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n"); 2785 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2786 2786
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 1bdf104e90bb..b621f56a36be 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -2551,12 +2551,12 @@ done:
2551 timeout = 5 * 1000; 2551 timeout = 5 * 1000;
2552 while (atomic_read(&vc->scq->used) > 0) { 2552 while (atomic_read(&vc->scq->used) > 0) {
2553 timeout = msleep_interruptible(timeout); 2553 timeout = msleep_interruptible(timeout);
2554 if (!timeout) 2554 if (!timeout) {
2555 pr_warn("%s: SCQ drain timeout: %u used\n",
2556 card->name, atomic_read(&vc->scq->used));
2555 break; 2557 break;
2558 }
2556 } 2559 }
2557 if (!timeout)
2558 printk("%s: SCQ drain timeout: %u used\n",
2559 card->name, atomic_read(&vc->scq->used));
2560 2560
2561 writel(TCMDQ_HALT | vc->index, SAR_REG_TCMDQ); 2561 writel(TCMDQ_HALT | vc->index, SAR_REG_TCMDQ);
2562 clear_scd(card, vc->scq, vc->class); 2562 clear_scd(card, vc->scq, vc->class);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a83b57e57b63..f98380648cb3 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -193,9 +193,10 @@ static int ath3k_load_firmware(struct usb_device *udev,
193 sent += 20; 193 sent += 20;
194 count -= 20; 194 count -= 20;
195 195
196 pipe = usb_sndbulkpipe(udev, 0x02);
197
196 while (count) { 198 while (count) {
197 size = min_t(uint, count, BULK_SIZE); 199 size = min_t(uint, count, BULK_SIZE);
198 pipe = usb_sndbulkpipe(udev, 0x02);
199 memcpy(send_buf, firmware->data + sent, size); 200 memcpy(send_buf, firmware->data + sent, size);
200 201
201 err = usb_bulk_msg(udev, pipe, send_buf, size, 202 err = usb_bulk_msg(udev, pipe, send_buf, size,
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 7399303d7d99..dc79f88f8717 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -59,6 +59,8 @@ struct btmrvl_device {
59}; 59};
60 60
61struct btmrvl_adapter { 61struct btmrvl_adapter {
62 void *hw_regs_buf;
63 u8 *hw_regs;
62 u32 int_count; 64 u32 int_count;
63 struct sk_buff_head tx_queue; 65 struct sk_buff_head tx_queue;
64 u8 psmode; 66 u8 psmode;
@@ -140,7 +142,7 @@ void btmrvl_interrupt(struct btmrvl_private *priv);
140bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb); 142bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
141int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb); 143int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
142 144
143int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd); 145int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd);
144int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv); 146int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv);
145int btmrvl_enable_ps(struct btmrvl_private *priv); 147int btmrvl_enable_ps(struct btmrvl_private *priv);
146int btmrvl_prepare_command(struct btmrvl_private *priv); 148int btmrvl_prepare_command(struct btmrvl_private *priv);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 2c4997ce2484..e9dbddb0b8f1 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -24,6 +24,7 @@
24#include <net/bluetooth/hci_core.h> 24#include <net/bluetooth/hci_core.h>
25 25
26#include "btmrvl_drv.h" 26#include "btmrvl_drv.h"
27#include "btmrvl_sdio.h"
27 28
28#define VERSION "1.0" 29#define VERSION "1.0"
29 30
@@ -201,7 +202,7 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
201 return 0; 202 return 0;
202} 203}
203 204
204int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd) 205int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd)
205{ 206{
206 int ret; 207 int ret;
207 208
@@ -337,10 +338,25 @@ static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb)
337 338
338static void btmrvl_init_adapter(struct btmrvl_private *priv) 339static void btmrvl_init_adapter(struct btmrvl_private *priv)
339{ 340{
341 int buf_size;
342
340 skb_queue_head_init(&priv->adapter->tx_queue); 343 skb_queue_head_init(&priv->adapter->tx_queue);
341 344
342 priv->adapter->ps_state = PS_AWAKE; 345 priv->adapter->ps_state = PS_AWAKE;
343 346
347 buf_size = ALIGN_SZ(SDIO_BLOCK_SIZE, BTSDIO_DMA_ALIGN);
348 priv->adapter->hw_regs_buf = kzalloc(buf_size, GFP_KERNEL);
349 if (!priv->adapter->hw_regs_buf) {
350 priv->adapter->hw_regs = NULL;
351 BT_ERR("Unable to allocate buffer for hw_regs.");
352 } else {
353 priv->adapter->hw_regs =
354 (u8 *)ALIGN_ADDR(priv->adapter->hw_regs_buf,
355 BTSDIO_DMA_ALIGN);
356 BT_DBG("hw_regs_buf=%p hw_regs=%p",
357 priv->adapter->hw_regs_buf, priv->adapter->hw_regs);
358 }
359
344 init_waitqueue_head(&priv->adapter->cmd_wait_q); 360 init_waitqueue_head(&priv->adapter->cmd_wait_q);
345} 361}
346 362
@@ -348,6 +364,7 @@ static void btmrvl_free_adapter(struct btmrvl_private *priv)
348{ 364{
349 skb_queue_purge(&priv->adapter->tx_queue); 365 skb_queue_purge(&priv->adapter->tx_queue);
350 366
367 kfree(priv->adapter->hw_regs_buf);
351 kfree(priv->adapter); 368 kfree(priv->adapter);
352 369
353 priv->adapter = NULL; 370 priv->adapter = NULL;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 1b52c9f5230d..9dedca516ff5 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -64,6 +64,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
64 .io_port_0 = 0x00, 64 .io_port_0 = 0x00,
65 .io_port_1 = 0x01, 65 .io_port_1 = 0x01,
66 .io_port_2 = 0x02, 66 .io_port_2 = 0x02,
67 .int_read_to_clear = false,
67}; 68};
68static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = { 69static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
69 .cfg = 0x00, 70 .cfg = 0x00,
@@ -80,6 +81,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
80 .io_port_0 = 0x78, 81 .io_port_0 = 0x78,
81 .io_port_1 = 0x79, 82 .io_port_1 = 0x79,
82 .io_port_2 = 0x7a, 83 .io_port_2 = 0x7a,
84 .int_read_to_clear = false,
83}; 85};
84 86
85static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = { 87static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
@@ -97,6 +99,9 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
97 .io_port_0 = 0xd8, 99 .io_port_0 = 0xd8,
98 .io_port_1 = 0xd9, 100 .io_port_1 = 0xd9,
99 .io_port_2 = 0xda, 101 .io_port_2 = 0xda,
102 .int_read_to_clear = true,
103 .host_int_rsr = 0x01,
104 .card_misc_cfg = 0xcc,
100}; 105};
101 106
102static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = { 107static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
@@ -667,46 +672,78 @@ static int btmrvl_sdio_process_int_status(struct btmrvl_private *priv)
667 return 0; 672 return 0;
668} 673}
669 674
670static void btmrvl_sdio_interrupt(struct sdio_func *func) 675static int btmrvl_sdio_read_to_clear(struct btmrvl_sdio_card *card, u8 *ireg)
671{ 676{
672 struct btmrvl_private *priv; 677 struct btmrvl_adapter *adapter = card->priv->adapter;
673 struct btmrvl_sdio_card *card;
674 ulong flags;
675 u8 ireg = 0;
676 int ret; 678 int ret;
677 679
678 card = sdio_get_drvdata(func); 680 ret = sdio_readsb(card->func, adapter->hw_regs, 0, SDIO_BLOCK_SIZE);
679 if (!card || !card->priv) { 681 if (ret) {
680 BT_ERR("sbi_interrupt(%p) card or priv is " 682 BT_ERR("sdio_readsb: read int hw_regs failed: %d", ret);
681 "NULL, card=%p\n", func, card); 683 return ret;
682 return;
683 } 684 }
684 685
685 priv = card->priv; 686 *ireg = adapter->hw_regs[card->reg->host_intstatus];
687 BT_DBG("hw_regs[%#x]=%#x", card->reg->host_intstatus, *ireg);
688
689 return 0;
690}
686 691
687 ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret); 692static int btmrvl_sdio_write_to_clear(struct btmrvl_sdio_card *card, u8 *ireg)
693{
694 int ret;
695
696 *ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret);
688 if (ret) { 697 if (ret) {
689 BT_ERR("sdio_readb: read int status register failed"); 698 BT_ERR("sdio_readb: read int status failed: %d", ret);
690 return; 699 return ret;
691 } 700 }
692 701
693 if (ireg != 0) { 702 if (*ireg) {
694 /* 703 /*
695 * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS 704 * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
696 * Clear the interrupt status register and re-enable the 705 * Clear the interrupt status register and re-enable the
697 * interrupt. 706 * interrupt.
698 */ 707 */
699 BT_DBG("ireg = 0x%x", ireg); 708 BT_DBG("int_status = 0x%x", *ireg);
700 709
701 sdio_writeb(card->func, ~(ireg) & (DN_LD_HOST_INT_STATUS | 710 sdio_writeb(card->func, ~(*ireg) & (DN_LD_HOST_INT_STATUS |
702 UP_LD_HOST_INT_STATUS), 711 UP_LD_HOST_INT_STATUS),
703 card->reg->host_intstatus, &ret); 712 card->reg->host_intstatus, &ret);
704 if (ret) { 713 if (ret) {
705 BT_ERR("sdio_writeb: clear int status register failed"); 714 BT_ERR("sdio_writeb: clear int status failed: %d", ret);
706 return; 715 return ret;
707 } 716 }
708 } 717 }
709 718
719 return 0;
720}
721
722static void btmrvl_sdio_interrupt(struct sdio_func *func)
723{
724 struct btmrvl_private *priv;
725 struct btmrvl_sdio_card *card;
726 ulong flags;
727 u8 ireg = 0;
728 int ret;
729
730 card = sdio_get_drvdata(func);
731 if (!card || !card->priv) {
732 BT_ERR("sbi_interrupt(%p) card or priv is "
733 "NULL, card=%p\n", func, card);
734 return;
735 }
736
737 priv = card->priv;
738
739 if (card->reg->int_read_to_clear)
740 ret = btmrvl_sdio_read_to_clear(card, &ireg);
741 else
742 ret = btmrvl_sdio_write_to_clear(card, &ireg);
743
744 if (ret)
745 return;
746
710 spin_lock_irqsave(&priv->driver_lock, flags); 747 spin_lock_irqsave(&priv->driver_lock, flags);
711 sdio_ireg |= ireg; 748 sdio_ireg |= ireg;
712 spin_unlock_irqrestore(&priv->driver_lock, flags); 749 spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -777,6 +814,30 @@ static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card)
777 814
778 BT_DBG("SDIO FUNC%d IO port: 0x%x", func->num, card->ioport); 815 BT_DBG("SDIO FUNC%d IO port: 0x%x", func->num, card->ioport);
779 816
817 if (card->reg->int_read_to_clear) {
818 reg = sdio_readb(func, card->reg->host_int_rsr, &ret);
819 if (ret < 0) {
820 ret = -EIO;
821 goto release_irq;
822 }
823 sdio_writeb(func, reg | 0x3f, card->reg->host_int_rsr, &ret);
824 if (ret < 0) {
825 ret = -EIO;
826 goto release_irq;
827 }
828
829 reg = sdio_readb(func, card->reg->card_misc_cfg, &ret);
830 if (ret < 0) {
831 ret = -EIO;
832 goto release_irq;
833 }
834 sdio_writeb(func, reg | 0x10, card->reg->card_misc_cfg, &ret);
835 if (ret < 0) {
836 ret = -EIO;
837 goto release_irq;
838 }
839 }
840
780 sdio_set_drvdata(func, card); 841 sdio_set_drvdata(func, card);
781 842
782 sdio_release_host(func); 843 sdio_release_host(func);
diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h
index 43d35a609ca9..d4dd3b0fa53d 100644
--- a/drivers/bluetooth/btmrvl_sdio.h
+++ b/drivers/bluetooth/btmrvl_sdio.h
@@ -78,6 +78,9 @@ struct btmrvl_sdio_card_reg {
78 u8 io_port_0; 78 u8 io_port_0;
79 u8 io_port_1; 79 u8 io_port_1;
80 u8 io_port_2; 80 u8 io_port_2;
81 bool int_read_to_clear;
82 u8 host_int_rsr;
83 u8 card_misc_cfg;
81}; 84};
82 85
83struct btmrvl_sdio_card { 86struct btmrvl_sdio_card {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a7dfbf9a3afb..a1c80b0c7663 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -49,6 +49,7 @@ static struct usb_driver btusb_driver;
49#define BTUSB_WRONG_SCO_MTU 0x40 49#define BTUSB_WRONG_SCO_MTU 0x40
50#define BTUSB_ATH3012 0x80 50#define BTUSB_ATH3012 0x80
51#define BTUSB_INTEL 0x100 51#define BTUSB_INTEL 0x100
52#define BTUSB_BCM_PATCHRAM 0x200
52 53
53static const struct usb_device_id btusb_table[] = { 54static const struct usb_device_id btusb_table[] = {
54 /* Generic Bluetooth USB device */ 55 /* Generic Bluetooth USB device */
@@ -111,7 +112,8 @@ static const struct usb_device_id btusb_table[] = {
111 { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) }, 112 { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
112 113
113 /* Broadcom devices with vendor specific id */ 114 /* Broadcom devices with vendor specific id */
114 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, 115 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01),
116 .driver_info = BTUSB_BCM_PATCHRAM },
115 117
116 /* Belkin F8065bf - Broadcom based */ 118 /* Belkin F8065bf - Broadcom based */
117 { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) }, 119 { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
@@ -1381,6 +1383,154 @@ exit_mfg_deactivate:
1381 return 0; 1383 return 0;
1382} 1384}
1383 1385
1386static int btusb_setup_bcm_patchram(struct hci_dev *hdev)
1387{
1388 struct btusb_data *data = hci_get_drvdata(hdev);
1389 struct usb_device *udev = data->udev;
1390 char fw_name[64];
1391 const struct firmware *fw;
1392 const u8 *fw_ptr;
1393 size_t fw_size;
1394 const struct hci_command_hdr *cmd;
1395 const u8 *cmd_param;
1396 u16 opcode;
1397 struct sk_buff *skb;
1398 struct hci_rp_read_local_version *ver;
1399 long ret;
1400
1401 snprintf(fw_name, sizeof(fw_name), "brcm/%s-%04x-%04x.hcd",
1402 udev->product ? udev->product : "BCM",
1403 le16_to_cpu(udev->descriptor.idVendor),
1404 le16_to_cpu(udev->descriptor.idProduct));
1405
1406 ret = request_firmware(&fw, fw_name, &hdev->dev);
1407 if (ret < 0) {
1408 BT_INFO("%s: BCM: patch %s not found", hdev->name,
1409 fw_name);
1410 return 0;
1411 }
1412
1413 /* Reset */
1414 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
1415 if (IS_ERR(skb)) {
1416 ret = PTR_ERR(skb);
1417 BT_ERR("%s: HCI_OP_RESET failed (%ld)", hdev->name, ret);
1418 goto done;
1419 }
1420 kfree_skb(skb);
1421
1422 /* Read Local Version Info */
1423 skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
1424 HCI_INIT_TIMEOUT);
1425 if (IS_ERR(skb)) {
1426 ret = PTR_ERR(skb);
1427 BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
1428 hdev->name, ret);
1429 goto done;
1430 }
1431
1432 if (skb->len != sizeof(*ver)) {
1433 BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
1434 hdev->name);
1435 kfree_skb(skb);
1436 ret = -EIO;
1437 goto done;
1438 }
1439
1440 ver = (struct hci_rp_read_local_version *) skb->data;
1441 BT_INFO("%s: BCM: patching hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
1442 "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev,
1443 ver->lmp_ver, ver->lmp_subver);
1444 kfree_skb(skb);
1445
1446 /* Start Download */
1447 skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
1448 if (IS_ERR(skb)) {
1449 ret = PTR_ERR(skb);
1450 BT_ERR("%s: BCM: Download Minidrv command failed (%ld)",
1451 hdev->name, ret);
1452 goto reset_fw;
1453 }
1454 kfree_skb(skb);
1455
1456 /* 50 msec delay after Download Minidrv completes */
1457 msleep(50);
1458
1459 fw_ptr = fw->data;
1460 fw_size = fw->size;
1461
1462 while (fw_size >= sizeof(*cmd)) {
1463 cmd = (struct hci_command_hdr *) fw_ptr;
1464 fw_ptr += sizeof(*cmd);
1465 fw_size -= sizeof(*cmd);
1466
1467 if (fw_size < cmd->plen) {
1468 BT_ERR("%s: BCM: patch %s is corrupted",
1469 hdev->name, fw_name);
1470 ret = -EINVAL;
1471 goto reset_fw;
1472 }
1473
1474 cmd_param = fw_ptr;
1475 fw_ptr += cmd->plen;
1476 fw_size -= cmd->plen;
1477
1478 opcode = le16_to_cpu(cmd->opcode);
1479
1480 skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param,
1481 HCI_INIT_TIMEOUT);
1482 if (IS_ERR(skb)) {
1483 ret = PTR_ERR(skb);
1484 BT_ERR("%s: BCM: patch command %04x failed (%ld)",
1485 hdev->name, opcode, ret);
1486 goto reset_fw;
1487 }
1488 kfree_skb(skb);
1489 }
1490
1491 /* 250 msec delay after Launch Ram completes */
1492 msleep(250);
1493
1494reset_fw:
1495 /* Reset */
1496 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
1497 if (IS_ERR(skb)) {
1498 ret = PTR_ERR(skb);
1499 BT_ERR("%s: HCI_OP_RESET failed (%ld)", hdev->name, ret);
1500 goto done;
1501 }
1502 kfree_skb(skb);
1503
1504 /* Read Local Version Info */
1505 skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
1506 HCI_INIT_TIMEOUT);
1507 if (IS_ERR(skb)) {
1508 ret = PTR_ERR(skb);
1509 BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
1510 hdev->name, ret);
1511 goto done;
1512 }
1513
1514 if (skb->len != sizeof(*ver)) {
1515 BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
1516 hdev->name);
1517 kfree_skb(skb);
1518 ret = -EIO;
1519 goto done;
1520 }
1521
1522 ver = (struct hci_rp_read_local_version *) skb->data;
1523 BT_INFO("%s: BCM: firmware hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
1524 "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev,
1525 ver->lmp_ver, ver->lmp_subver);
1526 kfree_skb(skb);
1527
1528done:
1529 release_firmware(fw);
1530
1531 return ret;
1532}
1533
1384static int btusb_probe(struct usb_interface *intf, 1534static int btusb_probe(struct usb_interface *intf,
1385 const struct usb_device_id *id) 1535 const struct usb_device_id *id)
1386{ 1536{
@@ -1486,6 +1636,9 @@ static int btusb_probe(struct usb_interface *intf,
1486 if (id->driver_info & BTUSB_BCM92035) 1636 if (id->driver_info & BTUSB_BCM92035)
1487 hdev->setup = btusb_setup_bcm92035; 1637 hdev->setup = btusb_setup_bcm92035;
1488 1638
1639 if (id->driver_info & BTUSB_BCM_PATCHRAM)
1640 hdev->setup = btusb_setup_bcm_patchram;
1641
1489 if (id->driver_info & BTUSB_INTEL) 1642 if (id->driver_info & BTUSB_INTEL)
1490 hdev->setup = btusb_setup_intel; 1643 hdev->setup = btusb_setup_intel;
1491 1644
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 7048a583fe51..66db9a803373 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -55,13 +55,6 @@ struct h4_struct {
55 struct sk_buff_head txq; 55 struct sk_buff_head txq;
56}; 56};
57 57
58/* H4 receiver States */
59#define H4_W4_PACKET_TYPE 0
60#define H4_W4_EVENT_HDR 1
61#define H4_W4_ACL_HDR 2
62#define H4_W4_SCO_HDR 3
63#define H4_W4_DATA 4
64
65/* Initialize protocol */ 58/* Initialize protocol */
66static int h4_open(struct hci_uart *hu) 59static int h4_open(struct hci_uart *hu)
67{ 60{
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 527a43da3d33..3795fce8a830 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -116,9 +116,25 @@ static struct ti_dt_clk am43xx_clks[] = {
116 116
117int __init am43xx_dt_clk_init(void) 117int __init am43xx_dt_clk_init(void)
118{ 118{
119 struct clk *clk1, *clk2;
120
119 ti_dt_clocks_register(am43xx_clks); 121 ti_dt_clocks_register(am43xx_clks);
120 122
121 omap2_clk_disable_autoidle_all(); 123 omap2_clk_disable_autoidle_all();
122 124
125 /*
126 * cpsw_cpts_rft_clk has got the choice of 3 clocksources
127 * dpll_core_m4_ck, dpll_core_m5_ck and dpll_disp_m2_ck.
128 * By default dpll_core_m4_ck is selected, witn this as clock
129 * source the CPTS doesnot work properly. It gives clockcheck errors
130 * while running PTP.
131 * clockcheck: clock jumped backward or running slower than expected!
132 * By selecting dpll_core_m5_ck as the clocksource fixes this issue.
133 * In AM335x dpll_core_m5_ck is the default clocksource.
134 */
135 clk1 = clk_get_sys(NULL, "cpsw_cpts_rft_clk");
136 clk2 = clk_get_sys(NULL, "dpll_core_m5_ck");
137 clk_set_parent(clk1, clk2);
138
123 return 0; 139 return 0;
124} 140}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 6c8b032cacba..ed9350d42764 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -404,7 +404,7 @@ static u32 next_vp;
404 * performance critical channels (IDE, SCSI and Network) will be uniformly 404 * performance critical channels (IDE, SCSI and Network) will be uniformly
405 * distributed across all available CPUs. 405 * distributed across all available CPUs.
406 */ 406 */
407static void init_vp_index(struct vmbus_channel *channel, uuid_le *type_guid) 407static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid)
408{ 408{
409 u32 cur_cpu; 409 u32 cur_cpu;
410 int i; 410 int i;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 18d1a8404cbc..22b750749a39 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -649,9 +649,9 @@ extern struct vmbus_connection vmbus_connection;
649 649
650/* General vmbus interface */ 650/* General vmbus interface */
651 651
652struct hv_device *vmbus_device_create(uuid_le *type, 652struct hv_device *vmbus_device_create(const uuid_le *type,
653 uuid_le *instance, 653 const uuid_le *instance,
654 struct vmbus_channel *channel); 654 struct vmbus_channel *channel);
655 655
656int vmbus_device_register(struct hv_device *child_device_obj); 656int vmbus_device_register(struct hv_device *child_device_obj);
657void vmbus_device_unregister(struct hv_device *device_obj); 657void vmbus_device_unregister(struct hv_device *device_obj);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8e53a3c2607e..4d6b26979fbd 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -435,7 +435,7 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
435 return ret; 435 return ret;
436} 436}
437 437
438static uuid_le null_guid; 438static const uuid_le null_guid;
439 439
440static inline bool is_null_guid(const __u8 *guid) 440static inline bool is_null_guid(const __u8 *guid)
441{ 441{
@@ -450,7 +450,7 @@ static inline bool is_null_guid(const __u8 *guid)
450 */ 450 */
451static const struct hv_vmbus_device_id *hv_vmbus_get_id( 451static const struct hv_vmbus_device_id *hv_vmbus_get_id(
452 const struct hv_vmbus_device_id *id, 452 const struct hv_vmbus_device_id *id,
453 __u8 *guid) 453 const __u8 *guid)
454{ 454{
455 for (; !is_null_guid(id->guid); id++) 455 for (; !is_null_guid(id->guid); id++)
456 if (!memcmp(&id->guid, guid, sizeof(uuid_le))) 456 if (!memcmp(&id->guid, guid, sizeof(uuid_le)))
@@ -779,9 +779,9 @@ EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
779 * vmbus_device_create - Creates and registers a new child device 779 * vmbus_device_create - Creates and registers a new child device
780 * on the vmbus. 780 * on the vmbus.
781 */ 781 */
782struct hv_device *vmbus_device_create(uuid_le *type, 782struct hv_device *vmbus_device_create(const uuid_le *type,
783 uuid_le *instance, 783 const uuid_le *instance,
784 struct vmbus_channel *channel) 784 struct vmbus_channel *channel)
785{ 785{
786 struct hv_device *child_device_obj; 786 struct hv_device *child_device_obj;
787 787
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 96d7131ab974..5e153f6d4b48 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -234,12 +234,16 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
234 234
235static void set_emss(struct c4iw_ep *ep, u16 opt) 235static void set_emss(struct c4iw_ep *ep, u16 opt)
236{ 236{
237 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; 237 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] -
238 sizeof(struct iphdr) - sizeof(struct tcphdr);
238 ep->mss = ep->emss; 239 ep->mss = ep->emss;
239 if (GET_TCPOPT_TSTAMP(opt)) 240 if (GET_TCPOPT_TSTAMP(opt))
240 ep->emss -= 12; 241 ep->emss -= 12;
241 if (ep->emss < 128) 242 if (ep->emss < 128)
242 ep->emss = 128; 243 ep->emss = 128;
244 if (ep->emss & 7)
245 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
246 GET_TCPOPT_MSS(opt), ep->mss, ep->emss);
243 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), 247 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
244 ep->mss, ep->emss); 248 ep->mss, ep->emss);
245} 249}
@@ -473,7 +477,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
473 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 477 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
474 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); 478 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
475 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 479 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
476 flowc->mnemval[6].val = cpu_to_be32(snd_win); 480 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
477 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 481 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
478 flowc->mnemval[7].val = cpu_to_be32(ep->emss); 482 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
479 /* Pad WR to 16 byte boundary */ 483 /* Pad WR to 16 byte boundary */
@@ -565,6 +569,17 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep,
565 sizeof(ep->com.mapped_remote_addr)); 569 sizeof(ep->com.mapped_remote_addr));
566} 570}
567 571
572static void best_mtu(const unsigned short *mtus, unsigned short mtu,
573 unsigned int *idx, int use_ts)
574{
575 unsigned short hdr_size = sizeof(struct iphdr) +
576 sizeof(struct tcphdr) +
577 (use_ts ? 12 : 0);
578 unsigned short data_size = mtu - hdr_size;
579
580 cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
581}
582
568static int send_connect(struct c4iw_ep *ep) 583static int send_connect(struct c4iw_ep *ep)
569{ 584{
570 struct cpl_act_open_req *req; 585 struct cpl_act_open_req *req;
@@ -591,6 +606,7 @@ static int send_connect(struct c4iw_ep *ep)
591 &ep->com.mapped_local_addr; 606 &ep->com.mapped_local_addr;
592 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *) 607 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
593 &ep->com.mapped_remote_addr; 608 &ep->com.mapped_remote_addr;
609 int win;
594 610
595 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? 611 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
596 roundup(sizev4, 16) : 612 roundup(sizev4, 16) :
@@ -606,8 +622,18 @@ static int send_connect(struct c4iw_ep *ep)
606 } 622 }
607 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 623 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
608 624
609 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 625 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
626 enable_tcp_timestamps);
610 wscale = compute_wscale(rcv_win); 627 wscale = compute_wscale(rcv_win);
628
629 /*
630 * Specify the largest window that will fit in opt0. The
631 * remainder will be specified in the rx_data_ack.
632 */
633 win = ep->rcv_win >> 10;
634 if (win > RCV_BUFSIZ_MASK)
635 win = RCV_BUFSIZ_MASK;
636
611 opt0 = (nocong ? NO_CONG(1) : 0) | 637 opt0 = (nocong ? NO_CONG(1) : 0) |
612 KEEP_ALIVE(1) | 638 KEEP_ALIVE(1) |
613 DELACK(1) | 639 DELACK(1) |
@@ -618,7 +644,7 @@ static int send_connect(struct c4iw_ep *ep)
618 SMAC_SEL(ep->smac_idx) | 644 SMAC_SEL(ep->smac_idx) |
619 DSCP(ep->tos) | 645 DSCP(ep->tos) |
620 ULP_MODE(ULP_MODE_TCPDDP) | 646 ULP_MODE(ULP_MODE_TCPDDP) |
621 RCV_BUFSIZ(rcv_win>>10); 647 RCV_BUFSIZ(win);
622 opt2 = RX_CHANNEL(0) | 648 opt2 = RX_CHANNEL(0) |
623 CCTRL_ECN(enable_ecn) | 649 CCTRL_ECN(enable_ecn) |
624 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 650 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
@@ -674,6 +700,13 @@ static int send_connect(struct c4iw_ep *ep)
674 req6->opt2 = cpu_to_be32(opt2); 700 req6->opt2 = cpu_to_be32(opt2);
675 } 701 }
676 } else { 702 } else {
703 u32 isn = (prandom_u32() & ~7UL) - 1;
704
705 opt2 |= T5_OPT_2_VALID;
706 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
707 if (peer2peer)
708 isn += 4;
709
677 if (ep->com.remote_addr.ss_family == AF_INET) { 710 if (ep->com.remote_addr.ss_family == AF_INET) {
678 t5_req = (struct cpl_t5_act_open_req *) 711 t5_req = (struct cpl_t5_act_open_req *)
679 skb_put(skb, wrlen); 712 skb_put(skb, wrlen);
@@ -690,6 +723,9 @@ static int send_connect(struct c4iw_ep *ep)
690 cxgb4_select_ntuple( 723 cxgb4_select_ntuple(
691 ep->com.dev->rdev.lldi.ports[0], 724 ep->com.dev->rdev.lldi.ports[0],
692 ep->l2t))); 725 ep->l2t)));
726 t5_req->rsvd = cpu_to_be32(isn);
727 PDBG("%s snd_isn %u\n", __func__,
728 be32_to_cpu(t5_req->rsvd));
693 t5_req->opt2 = cpu_to_be32(opt2); 729 t5_req->opt2 = cpu_to_be32(opt2);
694 } else { 730 } else {
695 t5_req6 = (struct cpl_t5_act_open_req6 *) 731 t5_req6 = (struct cpl_t5_act_open_req6 *)
@@ -713,6 +749,9 @@ static int send_connect(struct c4iw_ep *ep)
713 cxgb4_select_ntuple( 749 cxgb4_select_ntuple(
714 ep->com.dev->rdev.lldi.ports[0], 750 ep->com.dev->rdev.lldi.ports[0],
715 ep->l2t)); 751 ep->l2t));
752 t5_req6->rsvd = cpu_to_be32(isn);
753 PDBG("%s snd_isn %u\n", __func__,
754 be32_to_cpu(t5_req6->rsvd));
716 t5_req6->opt2 = cpu_to_be32(opt2); 755 t5_req6->opt2 = cpu_to_be32(opt2);
717 } 756 }
718 } 757 }
@@ -1186,6 +1225,14 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1186 return 0; 1225 return 0;
1187 } 1226 }
1188 1227
1228 /*
1229 * If we couldn't specify the entire rcv window at connection setup
1230 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1231 * then add the overage in to the credits returned.
1232 */
1233 if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024)
1234 credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024;
1235
1189 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 1236 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
1190 memset(req, 0, wrlen); 1237 memset(req, 0, wrlen);
1191 INIT_TP_WR(req, ep->hwtid); 1238 INIT_TP_WR(req, ep->hwtid);
@@ -1659,6 +1706,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1659 unsigned int mtu_idx; 1706 unsigned int mtu_idx;
1660 int wscale; 1707 int wscale;
1661 struct sockaddr_in *sin; 1708 struct sockaddr_in *sin;
1709 int win;
1662 1710
1663 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1711 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1664 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); 1712 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
@@ -1681,8 +1729,18 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1681 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); 1729 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
1682 req->tcb.tx_max = (__force __be32) jiffies; 1730 req->tcb.tx_max = (__force __be32) jiffies;
1683 req->tcb.rcv_adv = htons(1); 1731 req->tcb.rcv_adv = htons(1);
1684 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 1732 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
1733 enable_tcp_timestamps);
1685 wscale = compute_wscale(rcv_win); 1734 wscale = compute_wscale(rcv_win);
1735
1736 /*
1737 * Specify the largest window that will fit in opt0. The
1738 * remainder will be specified in the rx_data_ack.
1739 */
1740 win = ep->rcv_win >> 10;
1741 if (win > RCV_BUFSIZ_MASK)
1742 win = RCV_BUFSIZ_MASK;
1743
1686 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | 1744 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
1687 (nocong ? NO_CONG(1) : 0) | 1745 (nocong ? NO_CONG(1) : 0) |
1688 KEEP_ALIVE(1) | 1746 KEEP_ALIVE(1) |
@@ -1694,7 +1752,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1694 SMAC_SEL(ep->smac_idx) | 1752 SMAC_SEL(ep->smac_idx) |
1695 DSCP(ep->tos) | 1753 DSCP(ep->tos) |
1696 ULP_MODE(ULP_MODE_TCPDDP) | 1754 ULP_MODE(ULP_MODE_TCPDDP) |
1697 RCV_BUFSIZ(rcv_win >> 10)); 1755 RCV_BUFSIZ(win));
1698 req->tcb.opt2 = (__force __be32) (PACE(1) | 1756 req->tcb.opt2 = (__force __be32) (PACE(1) |
1699 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 1757 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1700 RX_CHANNEL(0) | 1758 RX_CHANNEL(0) |
@@ -1731,6 +1789,13 @@ static int is_neg_adv(unsigned int status)
1731 status == CPL_ERR_KEEPALV_NEG_ADVICE; 1789 status == CPL_ERR_KEEPALV_NEG_ADVICE;
1732} 1790}
1733 1791
1792static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
1793{
1794 ep->snd_win = snd_win;
1795 ep->rcv_win = rcv_win;
1796 PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win);
1797}
1798
1734#define ACT_OPEN_RETRY_COUNT 2 1799#define ACT_OPEN_RETRY_COUNT 2
1735 1800
1736static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, 1801static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
@@ -1779,6 +1844,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
1779 ep->ctrlq_idx = cxgb4_port_idx(pdev); 1844 ep->ctrlq_idx = cxgb4_port_idx(pdev);
1780 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 1845 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
1781 cxgb4_port_idx(pdev) * step]; 1846 cxgb4_port_idx(pdev) * step];
1847 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
1782 dev_put(pdev); 1848 dev_put(pdev);
1783 } else { 1849 } else {
1784 pdev = get_real_dev(n->dev); 1850 pdev = get_real_dev(n->dev);
@@ -1797,6 +1863,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
1797 cdev->rdev.lldi.nchan; 1863 cdev->rdev.lldi.nchan;
1798 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 1864 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
1799 cxgb4_port_idx(pdev) * step]; 1865 cxgb4_port_idx(pdev) * step];
1866 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
1800 1867
1801 if (clear_mpa_v1) { 1868 if (clear_mpa_v1) {
1802 ep->retry_with_mpa_v1 = 0; 1869 ep->retry_with_mpa_v1 = 0;
@@ -2027,13 +2094,36 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2027 u64 opt0; 2094 u64 opt0;
2028 u32 opt2; 2095 u32 opt2;
2029 int wscale; 2096 int wscale;
2097 struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
2098 int win;
2030 2099
2031 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2100 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2032 BUG_ON(skb_cloned(skb)); 2101 BUG_ON(skb_cloned(skb));
2033 skb_trim(skb, sizeof(*rpl)); 2102
2034 skb_get(skb); 2103 skb_get(skb);
2035 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 2104 rpl = cplhdr(skb);
2105 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
2106 skb_trim(skb, roundup(sizeof(*rpl5), 16));
2107 rpl5 = (void *)rpl;
2108 INIT_TP_WR(rpl5, ep->hwtid);
2109 } else {
2110 skb_trim(skb, sizeof(*rpl));
2111 INIT_TP_WR(rpl, ep->hwtid);
2112 }
2113 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
2114 ep->hwtid));
2115
2116 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
2117 enable_tcp_timestamps && req->tcpopt.tstamp);
2036 wscale = compute_wscale(rcv_win); 2118 wscale = compute_wscale(rcv_win);
2119
2120 /*
2121 * Specify the largest window that will fit in opt0. The
2122 * remainder will be specified in the rx_data_ack.
2123 */
2124 win = ep->rcv_win >> 10;
2125 if (win > RCV_BUFSIZ_MASK)
2126 win = RCV_BUFSIZ_MASK;
2037 opt0 = (nocong ? NO_CONG(1) : 0) | 2127 opt0 = (nocong ? NO_CONG(1) : 0) |
2038 KEEP_ALIVE(1) | 2128 KEEP_ALIVE(1) |
2039 DELACK(1) | 2129 DELACK(1) |
@@ -2044,7 +2134,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2044 SMAC_SEL(ep->smac_idx) | 2134 SMAC_SEL(ep->smac_idx) |
2045 DSCP(ep->tos >> 2) | 2135 DSCP(ep->tos >> 2) |
2046 ULP_MODE(ULP_MODE_TCPDDP) | 2136 ULP_MODE(ULP_MODE_TCPDDP) |
2047 RCV_BUFSIZ(rcv_win>>10); 2137 RCV_BUFSIZ(win);
2048 opt2 = RX_CHANNEL(0) | 2138 opt2 = RX_CHANNEL(0) |
2049 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 2139 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
2050 2140
@@ -2064,14 +2154,18 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2064 opt2 |= CCTRL_ECN(1); 2154 opt2 |= CCTRL_ECN(1);
2065 } 2155 }
2066 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 2156 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
2157 u32 isn = (prandom_u32() & ~7UL) - 1;
2067 opt2 |= T5_OPT_2_VALID; 2158 opt2 |= T5_OPT_2_VALID;
2068 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 2159 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
2160 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
2161 rpl5 = (void *)rpl;
2162 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
2163 if (peer2peer)
2164 isn += 4;
2165 rpl5->iss = cpu_to_be32(isn);
2166 PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
2069 } 2167 }
2070 2168
2071 rpl = cplhdr(skb);
2072 INIT_TP_WR(rpl, ep->hwtid);
2073 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
2074 ep->hwtid));
2075 rpl->opt0 = cpu_to_be64(opt0); 2169 rpl->opt0 = cpu_to_be64(opt0);
2076 rpl->opt2 = cpu_to_be32(opt2); 2170 rpl->opt2 = cpu_to_be32(opt2);
2077 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 2171 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
@@ -2136,6 +2230,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2136 int err; 2230 int err;
2137 u16 peer_mss = ntohs(req->tcpopt.mss); 2231 u16 peer_mss = ntohs(req->tcpopt.mss);
2138 int iptype; 2232 int iptype;
2233 unsigned short hdrs;
2139 2234
2140 parent_ep = lookup_stid(t, stid); 2235 parent_ep = lookup_stid(t, stid);
2141 if (!parent_ep) { 2236 if (!parent_ep) {
@@ -2193,8 +2288,10 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2193 goto reject; 2288 goto reject;
2194 } 2289 }
2195 2290
2196 if (peer_mss && child_ep->mtu > (peer_mss + 40)) 2291 hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
2197 child_ep->mtu = peer_mss + 40; 2292 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
2293 if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
2294 child_ep->mtu = peer_mss + hdrs;
2198 2295
2199 state_set(&child_ep->com, CONNECTING); 2296 state_set(&child_ep->com, CONNECTING);
2200 child_ep->com.dev = dev; 2297 child_ep->com.dev = dev;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 7151a02b4ebb..c04292c950f1 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -134,7 +134,8 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
134 V_FW_RI_RES_WR_IQANUS(0) | 134 V_FW_RI_RES_WR_IQANUS(0) |
135 V_FW_RI_RES_WR_IQANUD(1) | 135 V_FW_RI_RES_WR_IQANUD(1) |
136 F_FW_RI_RES_WR_IQANDST | 136 F_FW_RI_RES_WR_IQANDST |
137 V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids)); 137 V_FW_RI_RES_WR_IQANDSTINDEX(
138 rdev->lldi.ciq_ids[cq->vector]));
138 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( 139 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
139 F_FW_RI_RES_WR_IQDROPRSS | 140 F_FW_RI_RES_WR_IQDROPRSS |
140 V_FW_RI_RES_WR_IQPCIECH(2) | 141 V_FW_RI_RES_WR_IQPCIECH(2) |
@@ -870,6 +871,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
870 871
871 rhp = to_c4iw_dev(ibdev); 872 rhp = to_c4iw_dev(ibdev);
872 873
874 if (vector >= rhp->rdev.lldi.nciq)
875 return ERR_PTR(-EINVAL);
876
873 chp = kzalloc(sizeof(*chp), GFP_KERNEL); 877 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
874 if (!chp) 878 if (!chp)
875 return ERR_PTR(-ENOMEM); 879 return ERR_PTR(-ENOMEM);
@@ -915,6 +919,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
915 } 919 }
916 chp->cq.size = hwentries; 920 chp->cq.size = hwentries;
917 chp->cq.memsize = memsize; 921 chp->cq.memsize = memsize;
922 chp->cq.vector = vector;
918 923
919 ret = create_cq(&rhp->rdev, &chp->cq, 924 ret = create_cq(&rhp->rdev, &chp->cq,
920 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 925 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 6f533fbcc4b3..125bc5d1e175 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -810,6 +810,8 @@ struct c4iw_ep {
810 u8 retry_with_mpa_v1; 810 u8 retry_with_mpa_v1;
811 u8 tried_with_mpa_v1; 811 u8 tried_with_mpa_v1;
812 unsigned int retry_count; 812 unsigned int retry_count;
813 int snd_win;
814 int rcv_win;
813}; 815};
814 816
815static inline void print_addr(struct c4iw_ep_common *epc, const char *func, 817static inline void print_addr(struct c4iw_ep_common *epc, const char *func,
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index c777e22bd8d5..b1d305338de6 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -500,7 +500,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
500 dev->ibdev.node_type = RDMA_NODE_RNIC; 500 dev->ibdev.node_type = RDMA_NODE_RNIC;
501 memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC)); 501 memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
502 dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports; 502 dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
503 dev->ibdev.num_comp_vectors = 1; 503 dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq;
504 dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev); 504 dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
505 dev->ibdev.query_device = c4iw_query_device; 505 dev->ibdev.query_device = c4iw_query_device;
506 dev->ibdev.query_port = c4iw_query_port; 506 dev->ibdev.query_port = c4iw_query_port;
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 2178f3198410..68b0a6bf4eb0 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -542,6 +542,7 @@ struct t4_cq {
542 size_t memsize; 542 size_t memsize;
543 __be64 bits_type_ts; 543 __be64 bits_type_ts;
544 u32 cqid; 544 u32 cqid;
545 int vector;
545 u16 size; /* including status page */ 546 u16 size; /* including status page */
546 u16 cidx; 547 u16 cidx;
547 u16 sw_pidx; 548 u16 sw_pidx;
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 6121ca08fe58..91289a051af9 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -848,6 +848,7 @@ enum { /* TCP congestion control algorithms */
848#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL) 848#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
849#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL) 849#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
850 850
851#define CONG_CNTRL_VALID (1 << 18)
851#define T5_OPT_2_VALID (1 << 31) 852#define T5_OPT_2_VALID (1 << 31)
852 853
853#endif /* _T4FW_RI_API_H_ */ 854#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index c4b3940845e6..078cadd6c797 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -105,5 +105,5 @@ static const struct ethtool_ops ipoib_ethtool_ops = {
105 105
106void ipoib_set_ethtool_ops(struct net_device *dev) 106void ipoib_set_ethtool_ops(struct net_device *dev)
107{ 107{
108 SET_ETHTOOL_OPS(dev, &ipoib_ethtool_ops); 108 dev->ethtool_ops = &ipoib_ethtool_ops;
109} 109}
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index 9816c51eb5c2..7641b3096ea6 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -1,11 +1,3 @@
1config ISDN_DRV_AVMB1_VERBOSE_REASON
2 bool "Verbose reason code reporting"
3 default y
4 help
5 If you say Y here, the CAPI drivers will give verbose reasons for
6 disconnecting. This will increase the size of the kernel by 7 KB. If
7 unsure, say Y.
8
9config CAPI_TRACE 1config CAPI_TRACE
10 bool "CAPI trace support" 2 bool "CAPI trace support"
11 default y 3 default y
@@ -17,7 +9,7 @@ config CAPI_TRACE
17 If unsure, say Y. 9 If unsure, say Y.
18 10
19config ISDN_CAPI_CAPI20 11config ISDN_CAPI_CAPI20
20 tristate "CAPI2.0 /dev/capi support" 12 tristate "CAPI2.0 /dev/capi20 support"
21 help 13 help
22 This option will provide the CAPI 2.0 interface to userspace 14 This option will provide the CAPI 2.0 interface to userspace
23 applications via /dev/capi20. Applications should use the 15 applications via /dev/capi20. Applications should use the
@@ -42,3 +34,11 @@ config ISDN_CAPI_CAPIDRV
42 the legacy isdn4linux link layer. If you have a card which is 34 the legacy isdn4linux link layer. If you have a card which is
43 supported by a CAPI driver, but still want to use old features like 35 supported by a CAPI driver, but still want to use old features like
44 ippp interfaces or ttyI emulation, say Y/M here. 36 ippp interfaces or ttyI emulation, say Y/M here.
37
38config ISDN_CAPI_CAPIDRV_VERBOSE
39 bool "Verbose reason code reporting"
40 depends on ISDN_CAPI_CAPIDRV
41 help
42 If you say Y here, the capidrv interface will give verbose reasons
43 for disconnecting. This will increase the size of the kernel by 7 KB.
44 If unsure, say N.
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index ac6f72b455d1..f9a87ed2392b 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -1271,7 +1271,7 @@ static int __init capinc_tty_init(void)
1271 return -ENOMEM; 1271 return -ENOMEM;
1272 } 1272 }
1273 drv->driver_name = "capi_nc"; 1273 drv->driver_name = "capi_nc";
1274 drv->name = "capi"; 1274 drv->name = "capi!";
1275 drv->major = 0; 1275 drv->major = 0;
1276 drv->minor_start = 0; 1276 drv->minor_start = 0;
1277 drv->type = TTY_DRIVER_TYPE_SERIAL; 1277 drv->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1417,7 +1417,7 @@ static int __init capi_init(void)
1417 return PTR_ERR(capi_class); 1417 return PTR_ERR(capi_class);
1418 } 1418 }
1419 1419
1420 device_create(capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi"); 1420 device_create(capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi20");
1421 1421
1422 if (capinc_tty_init() < 0) { 1422 if (capinc_tty_init() < 0) {
1423 device_destroy(capi_class, MKDEV(capi_major, 0)); 1423 device_destroy(capi_class, MKDEV(capi_major, 0));
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index cc9f1927a322..fd6d28f3fc36 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -763,6 +763,201 @@ static inline int new_bchan(capidrv_contr *card)
763} 763}
764 764
765/* ------------------------------------------------------------------- */ 765/* ------------------------------------------------------------------- */
766static char *capi_info2str(u16 reason)
767{
768#ifndef CONFIG_ISDN_CAPI_CAPIDRV_VERBOSE
769 return "..";
770#else
771 switch (reason) {
772
773/*-- informative values (corresponding message was processed) -----*/
774 case 0x0001:
775 return "NCPI not supported by current protocol, NCPI ignored";
776 case 0x0002:
777 return "Flags not supported by current protocol, flags ignored";
778 case 0x0003:
779 return "Alert already sent by another application";
780
781/*-- error information concerning CAPI_REGISTER -----*/
782 case 0x1001:
783 return "Too many applications";
784 case 0x1002:
785 return "Logical block size too small, must be at least 128 Bytes";
786 case 0x1003:
787 return "Buffer exceeds 64 kByte";
788 case 0x1004:
789 return "Message buffer size too small, must be at least 1024 Bytes";
790 case 0x1005:
791 return "Max. number of logical connections not supported";
792 case 0x1006:
793 return "Reserved";
794 case 0x1007:
795 return "The message could not be accepted because of an internal busy condition";
796 case 0x1008:
797 return "OS resource error (no memory ?)";
798 case 0x1009:
799 return "CAPI not installed";
800 case 0x100A:
801 return "Controller does not support external equipment";
802 case 0x100B:
803 return "Controller does only support external equipment";
804
805/*-- error information concerning message exchange functions -----*/
806 case 0x1101:
807 return "Illegal application number";
808 case 0x1102:
809 return "Illegal command or subcommand or message length less than 12 bytes";
810 case 0x1103:
811 return "The message could not be accepted because of a queue full condition !! The error code does not imply that CAPI cannot receive messages directed to another controller, PLCI or NCCI";
812 case 0x1104:
813 return "Queue is empty";
814 case 0x1105:
815 return "Queue overflow, a message was lost !! This indicates a configuration error. The only recovery from this error is to perform a CAPI_RELEASE";
816 case 0x1106:
817 return "Unknown notification parameter";
818 case 0x1107:
819 return "The Message could not be accepted because of an internal busy condition";
820 case 0x1108:
821 return "OS Resource error (no memory ?)";
822 case 0x1109:
823 return "CAPI not installed";
824 case 0x110A:
825 return "Controller does not support external equipment";
826 case 0x110B:
827 return "Controller does only support external equipment";
828
829/*-- error information concerning resource / coding problems -----*/
830 case 0x2001:
831 return "Message not supported in current state";
832 case 0x2002:
833 return "Illegal Controller / PLCI / NCCI";
834 case 0x2003:
835 return "Out of PLCI";
836 case 0x2004:
837 return "Out of NCCI";
838 case 0x2005:
839 return "Out of LISTEN";
840 case 0x2006:
841 return "Out of FAX resources (protocol T.30)";
842 case 0x2007:
843 return "Illegal message parameter coding";
844
845/*-- error information concerning requested services -----*/
846 case 0x3001:
847 return "B1 protocol not supported";
848 case 0x3002:
849 return "B2 protocol not supported";
850 case 0x3003:
851 return "B3 protocol not supported";
852 case 0x3004:
853 return "B1 protocol parameter not supported";
854 case 0x3005:
855 return "B2 protocol parameter not supported";
856 case 0x3006:
857 return "B3 protocol parameter not supported";
858 case 0x3007:
859 return "B protocol combination not supported";
860 case 0x3008:
861 return "NCPI not supported";
862 case 0x3009:
863 return "CIP Value unknown";
864 case 0x300A:
865 return "Flags not supported (reserved bits)";
866 case 0x300B:
867 return "Facility not supported";
868 case 0x300C:
869 return "Data length not supported by current protocol";
870 case 0x300D:
871 return "Reset procedure not supported by current protocol";
872
873/*-- informations about the clearing of a physical connection -----*/
874 case 0x3301:
875 return "Protocol error layer 1 (broken line or B-channel removed by signalling protocol)";
876 case 0x3302:
877 return "Protocol error layer 2";
878 case 0x3303:
879 return "Protocol error layer 3";
880 case 0x3304:
881 return "Another application got that call";
882/*-- T.30 specific reasons -----*/
883 case 0x3311:
884 return "Connecting not successful (remote station is no FAX G3 machine)";
885 case 0x3312:
886 return "Connecting not successful (training error)";
887 case 0x3313:
888 return "Disconnected before transfer (remote station does not support transfer mode, e.g. resolution)";
889 case 0x3314:
890 return "Disconnected during transfer (remote abort)";
891 case 0x3315:
892 return "Disconnected during transfer (remote procedure error, e.g. unsuccessful repetition of T.30 commands)";
893 case 0x3316:
894 return "Disconnected during transfer (local tx data underrun)";
895 case 0x3317:
896 return "Disconnected during transfer (local rx data overflow)";
897 case 0x3318:
898 return "Disconnected during transfer (local abort)";
899 case 0x3319:
900 return "Illegal parameter coding (e.g. SFF coding error)";
901
902/*-- disconnect causes from the network according to ETS 300 102-1/Q.931 -----*/
903 case 0x3481: return "Unallocated (unassigned) number";
904 case 0x3482: return "No route to specified transit network";
905 case 0x3483: return "No route to destination";
906 case 0x3486: return "Channel unacceptable";
907 case 0x3487:
908 return "Call awarded and being delivered in an established channel";
909 case 0x3490: return "Normal call clearing";
910 case 0x3491: return "User busy";
911 case 0x3492: return "No user responding";
912 case 0x3493: return "No answer from user (user alerted)";
913 case 0x3495: return "Call rejected";
914 case 0x3496: return "Number changed";
915 case 0x349A: return "Non-selected user clearing";
916 case 0x349B: return "Destination out of order";
917 case 0x349C: return "Invalid number format";
918 case 0x349D: return "Facility rejected";
919 case 0x349E: return "Response to STATUS ENQUIRY";
920 case 0x349F: return "Normal, unspecified";
921 case 0x34A2: return "No circuit / channel available";
922 case 0x34A6: return "Network out of order";
923 case 0x34A9: return "Temporary failure";
924 case 0x34AA: return "Switching equipment congestion";
925 case 0x34AB: return "Access information discarded";
926 case 0x34AC: return "Requested circuit / channel not available";
927 case 0x34AF: return "Resources unavailable, unspecified";
928 case 0x34B1: return "Quality of service unavailable";
929 case 0x34B2: return "Requested facility not subscribed";
930 case 0x34B9: return "Bearer capability not authorized";
931 case 0x34BA: return "Bearer capability not presently available";
932 case 0x34BF: return "Service or option not available, unspecified";
933 case 0x34C1: return "Bearer capability not implemented";
934 case 0x34C2: return "Channel type not implemented";
935 case 0x34C5: return "Requested facility not implemented";
936 case 0x34C6: return "Only restricted digital information bearer capability is available";
937 case 0x34CF: return "Service or option not implemented, unspecified";
938 case 0x34D1: return "Invalid call reference value";
939 case 0x34D2: return "Identified channel does not exist";
940 case 0x34D3: return "A suspended call exists, but this call identity does not";
941 case 0x34D4: return "Call identity in use";
942 case 0x34D5: return "No call suspended";
943 case 0x34D6: return "Call having the requested call identity has been cleared";
944 case 0x34D8: return "Incompatible destination";
945 case 0x34DB: return "Invalid transit network selection";
946 case 0x34DF: return "Invalid message, unspecified";
947 case 0x34E0: return "Mandatory information element is missing";
948 case 0x34E1: return "Message type non-existent or not implemented";
949 case 0x34E2: return "Message not compatible with call state or message type non-existent or not implemented";
950 case 0x34E3: return "Information element non-existent or not implemented";
951 case 0x34E4: return "Invalid information element contents";
952 case 0x34E5: return "Message not compatible with call state";
953 case 0x34E6: return "Recovery on timer expiry";
954 case 0x34EF: return "Protocol error, unspecified";
955 case 0x34FF: return "Interworking, unspecified";
956
957 default: return "No additional information";
958 }
959#endif
960}
766 961
767static void handle_controller(_cmsg *cmsg) 962static void handle_controller(_cmsg *cmsg)
768{ 963{
diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c
index d26f17033b68..6e797e502cfa 100644
--- a/drivers/isdn/capi/capiutil.c
+++ b/drivers/isdn/capi/capiutil.c
@@ -22,205 +22,6 @@
22 22
23/* from CAPI2.0 DDK AVM Berlin GmbH */ 23/* from CAPI2.0 DDK AVM Berlin GmbH */
24 24
25#ifndef CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON
26char *capi_info2str(u16 reason)
27{
28 return "..";
29}
30#else
31char *capi_info2str(u16 reason)
32{
33 switch (reason) {
34
35/*-- informative values (corresponding message was processed) -----*/
36 case 0x0001:
37 return "NCPI not supported by current protocol, NCPI ignored";
38 case 0x0002:
39 return "Flags not supported by current protocol, flags ignored";
40 case 0x0003:
41 return "Alert already sent by another application";
42
43/*-- error information concerning CAPI_REGISTER -----*/
44 case 0x1001:
45 return "Too many applications";
46 case 0x1002:
47 return "Logical block size too small, must be at least 128 Bytes";
48 case 0x1003:
49 return "Buffer exceeds 64 kByte";
50 case 0x1004:
51 return "Message buffer size too small, must be at least 1024 Bytes";
52 case 0x1005:
53 return "Max. number of logical connections not supported";
54 case 0x1006:
55 return "Reserved";
56 case 0x1007:
57 return "The message could not be accepted because of an internal busy condition";
58 case 0x1008:
59 return "OS resource error (no memory ?)";
60 case 0x1009:
61 return "CAPI not installed";
62 case 0x100A:
63 return "Controller does not support external equipment";
64 case 0x100B:
65 return "Controller does only support external equipment";
66
67/*-- error information concerning message exchange functions -----*/
68 case 0x1101:
69 return "Illegal application number";
70 case 0x1102:
71 return "Illegal command or subcommand or message length less than 12 bytes";
72 case 0x1103:
73 return "The message could not be accepted because of a queue full condition !! The error code does not imply that CAPI cannot receive messages directed to another controller, PLCI or NCCI";
74 case 0x1104:
75 return "Queue is empty";
76 case 0x1105:
77 return "Queue overflow, a message was lost !! This indicates a configuration error. The only recovery from this error is to perform a CAPI_RELEASE";
78 case 0x1106:
79 return "Unknown notification parameter";
80 case 0x1107:
81 return "The Message could not be accepted because of an internal busy condition";
82 case 0x1108:
83 return "OS Resource error (no memory ?)";
84 case 0x1109:
85 return "CAPI not installed";
86 case 0x110A:
87 return "Controller does not support external equipment";
88 case 0x110B:
89 return "Controller does only support external equipment";
90
91/*-- error information concerning resource / coding problems -----*/
92 case 0x2001:
93 return "Message not supported in current state";
94 case 0x2002:
95 return "Illegal Controller / PLCI / NCCI";
96 case 0x2003:
97 return "Out of PLCI";
98 case 0x2004:
99 return "Out of NCCI";
100 case 0x2005:
101 return "Out of LISTEN";
102 case 0x2006:
103 return "Out of FAX resources (protocol T.30)";
104 case 0x2007:
105 return "Illegal message parameter coding";
106
107/*-- error information concerning requested services -----*/
108 case 0x3001:
109 return "B1 protocol not supported";
110 case 0x3002:
111 return "B2 protocol not supported";
112 case 0x3003:
113 return "B3 protocol not supported";
114 case 0x3004:
115 return "B1 protocol parameter not supported";
116 case 0x3005:
117 return "B2 protocol parameter not supported";
118 case 0x3006:
119 return "B3 protocol parameter not supported";
120 case 0x3007:
121 return "B protocol combination not supported";
122 case 0x3008:
123 return "NCPI not supported";
124 case 0x3009:
125 return "CIP Value unknown";
126 case 0x300A:
127 return "Flags not supported (reserved bits)";
128 case 0x300B:
129 return "Facility not supported";
130 case 0x300C:
131 return "Data length not supported by current protocol";
132 case 0x300D:
133 return "Reset procedure not supported by current protocol";
134
135/*-- informations about the clearing of a physical connection -----*/
136 case 0x3301:
137 return "Protocol error layer 1 (broken line or B-channel removed by signalling protocol)";
138 case 0x3302:
139 return "Protocol error layer 2";
140 case 0x3303:
141 return "Protocol error layer 3";
142 case 0x3304:
143 return "Another application got that call";
144/*-- T.30 specific reasons -----*/
145 case 0x3311:
146 return "Connecting not successful (remote station is no FAX G3 machine)";
147 case 0x3312:
148 return "Connecting not successful (training error)";
149 case 0x3313:
150 return "Disconnected before transfer (remote station does not support transfer mode, e.g. resolution)";
151 case 0x3314:
152 return "Disconnected during transfer (remote abort)";
153 case 0x3315:
154 return "Disconnected during transfer (remote procedure error, e.g. unsuccessful repetition of T.30 commands)";
155 case 0x3316:
156 return "Disconnected during transfer (local tx data underrun)";
157 case 0x3317:
158 return "Disconnected during transfer (local rx data overflow)";
159 case 0x3318:
160 return "Disconnected during transfer (local abort)";
161 case 0x3319:
162 return "Illegal parameter coding (e.g. SFF coding error)";
163
164/*-- disconnect causes from the network according to ETS 300 102-1/Q.931 -----*/
165 case 0x3481: return "Unallocated (unassigned) number";
166 case 0x3482: return "No route to specified transit network";
167 case 0x3483: return "No route to destination";
168 case 0x3486: return "Channel unacceptable";
169 case 0x3487:
170 return "Call awarded and being delivered in an established channel";
171 case 0x3490: return "Normal call clearing";
172 case 0x3491: return "User busy";
173 case 0x3492: return "No user responding";
174 case 0x3493: return "No answer from user (user alerted)";
175 case 0x3495: return "Call rejected";
176 case 0x3496: return "Number changed";
177 case 0x349A: return "Non-selected user clearing";
178 case 0x349B: return "Destination out of order";
179 case 0x349C: return "Invalid number format";
180 case 0x349D: return "Facility rejected";
181 case 0x349E: return "Response to STATUS ENQUIRY";
182 case 0x349F: return "Normal, unspecified";
183 case 0x34A2: return "No circuit / channel available";
184 case 0x34A6: return "Network out of order";
185 case 0x34A9: return "Temporary failure";
186 case 0x34AA: return "Switching equipment congestion";
187 case 0x34AB: return "Access information discarded";
188 case 0x34AC: return "Requested circuit / channel not available";
189 case 0x34AF: return "Resources unavailable, unspecified";
190 case 0x34B1: return "Quality of service unavailable";
191 case 0x34B2: return "Requested facility not subscribed";
192 case 0x34B9: return "Bearer capability not authorized";
193 case 0x34BA: return "Bearer capability not presently available";
194 case 0x34BF: return "Service or option not available, unspecified";
195 case 0x34C1: return "Bearer capability not implemented";
196 case 0x34C2: return "Channel type not implemented";
197 case 0x34C5: return "Requested facility not implemented";
198 case 0x34C6: return "Only restricted digital information bearer capability is available";
199 case 0x34CF: return "Service or option not implemented, unspecified";
200 case 0x34D1: return "Invalid call reference value";
201 case 0x34D2: return "Identified channel does not exist";
202 case 0x34D3: return "A suspended call exists, but this call identity does not";
203 case 0x34D4: return "Call identity in use";
204 case 0x34D5: return "No call suspended";
205 case 0x34D6: return "Call having the requested call identity has been cleared";
206 case 0x34D8: return "Incompatible destination";
207 case 0x34DB: return "Invalid transit network selection";
208 case 0x34DF: return "Invalid message, unspecified";
209 case 0x34E0: return "Mandatory information element is missing";
210 case 0x34E1: return "Message type non-existent or not implemented";
211 case 0x34E2: return "Message not compatible with call state or message type non-existent or not implemented";
212 case 0x34E3: return "Information element non-existent or not implemented";
213 case 0x34E4: return "Invalid information element contents";
214 case 0x34E5: return "Message not compatible with call state";
215 case 0x34E6: return "Recovery on timer expiry";
216 case 0x34EF: return "Protocol error, unspecified";
217 case 0x34FF: return "Interworking, unspecified";
218
219 default: return "No additional information";
220 }
221}
222#endif
223
224typedef struct { 25typedef struct {
225 int typ; 26 int typ;
226 size_t off; 27 size_t off;
@@ -1073,4 +874,3 @@ EXPORT_SYMBOL(capi_cmsg_header);
1073EXPORT_SYMBOL(capi_cmd2str); 874EXPORT_SYMBOL(capi_cmd2str);
1074EXPORT_SYMBOL(capi_cmsg2str); 875EXPORT_SYMBOL(capi_cmsg2str);
1075EXPORT_SYMBOL(capi_message2str); 876EXPORT_SYMBOL(capi_message2str);
1076EXPORT_SYMBOL(capi_info2str);
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 414dbf6da89a..fc9f9d03fa13 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -197,25 +197,6 @@ typedef struct _hfc4s8s_hw {
197 197
198 198
199 199
200/***************************/
201/* inline function defines */
202/***************************/
203#ifdef HISAX_HFC4S8S_PCIMEM /* inline functions memory mapped */
204
205/* memory write and dummy IO read to avoid PCI byte merge problems */
206#define Write_hfc8(a, b, c) {(*((volatile u_char *)(a->membase + b)) = c); inb(a->iobase + 4);}
207/* memory write without dummy IO access for fifo data access */
208#define fWrite_hfc8(a, b, c) (*((volatile u_char *)(a->membase + b)) = c)
209#define Read_hfc8(a, b) (*((volatile u_char *)(a->membase + b)))
210#define Write_hfc16(a, b, c) (*((volatile unsigned short *)(a->membase + b)) = c)
211#define Read_hfc16(a, b) (*((volatile unsigned short *)(a->membase + b)))
212#define Write_hfc32(a, b, c) (*((volatile unsigned long *)(a->membase + b)) = c)
213#define Read_hfc32(a, b) (*((volatile unsigned long *)(a->membase + b)))
214#define wait_busy(a) {while ((Read_hfc8(a, R_STATUS) & M_BUSY));}
215#define PCI_ENA_MEMIO 0x03
216
217#else
218
219/* inline functions io mapped */ 200/* inline functions io mapped */
220static inline void 201static inline void
221SetRegAddr(hfc4s8s_hw *a, u_char b) 202SetRegAddr(hfc4s8s_hw *a, u_char b)
@@ -306,8 +287,6 @@ wait_busy(hfc4s8s_hw *a)
306 287
307#define PCI_ENA_REGIO 0x01 288#define PCI_ENA_REGIO 0x01
308 289
309#endif /* HISAX_HFC4S8S_PCIMEM */
310
311/******************************************************/ 290/******************************************************/
312/* function to read critical counter registers that */ 291/* function to read critical counter registers that */
313/* may be updated by the chip during read */ 292/* may be updated by the chip during read */
@@ -724,26 +703,15 @@ rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
724 return; 703 return;
725 } else { 704 } else {
726 /* read errornous D frame */ 705 /* read errornous D frame */
727
728#ifndef HISAX_HFC4S8S_PCIMEM
729 SetRegAddr(l1p->hw, A_FIFO_DATA0); 706 SetRegAddr(l1p->hw, A_FIFO_DATA0);
730#endif
731 707
732 while (z1 >= 4) { 708 while (z1 >= 4) {
733#ifdef HISAX_HFC4S8S_PCIMEM
734 Read_hfc32(l1p->hw, A_FIFO_DATA0);
735#else
736 fRead_hfc32(l1p->hw); 709 fRead_hfc32(l1p->hw);
737#endif
738 z1 -= 4; 710 z1 -= 4;
739 } 711 }
740 712
741 while (z1--) 713 while (z1--)
742#ifdef HISAX_HFC4S8S_PCIMEM 714 fRead_hfc8(l1p->hw);
743 Read_hfc8(l1p->hw, A_FIFO_DATA0);
744#else
745 fRead_hfc8(l1p->hw);
746#endif
747 715
748 Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); 716 Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1);
749 wait_busy(l1p->hw); 717 wait_busy(l1p->hw);
@@ -753,27 +721,16 @@ rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
753 721
754 cp = skb->data; 722 cp = skb->data;
755 723
756#ifndef HISAX_HFC4S8S_PCIMEM
757 SetRegAddr(l1p->hw, A_FIFO_DATA0); 724 SetRegAddr(l1p->hw, A_FIFO_DATA0);
758#endif
759 725
760 while (z1 >= 4) { 726 while (z1 >= 4) {
761#ifdef HISAX_HFC4S8S_PCIMEM
762 *((unsigned long *) cp) =
763 Read_hfc32(l1p->hw, A_FIFO_DATA0);
764#else
765 *((unsigned long *) cp) = fRead_hfc32(l1p->hw); 727 *((unsigned long *) cp) = fRead_hfc32(l1p->hw);
766#endif
767 cp += 4; 728 cp += 4;
768 z1 -= 4; 729 z1 -= 4;
769 } 730 }
770 731
771 while (z1--) 732 while (z1--)
772#ifdef HISAX_HFC4S8S_PCIMEM 733 *cp++ = fRead_hfc8(l1p->hw);
773 *cp++ = Read_hfc8(l1p->hw, A_FIFO_DATA0);
774#else
775 *cp++ = fRead_hfc8(l1p->hw);
776#endif
777 734
778 Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */ 735 Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
779 wait_busy(l1p->hw); 736 wait_busy(l1p->hw);
@@ -859,28 +816,17 @@ rx_b_frame(struct hfc4s8s_btype *bch)
859 wait_busy(l1->hw); 816 wait_busy(l1->hw);
860 return; 817 return;
861 } 818 }
862#ifndef HISAX_HFC4S8S_PCIMEM
863 SetRegAddr(l1->hw, A_FIFO_DATA0); 819 SetRegAddr(l1->hw, A_FIFO_DATA0);
864#endif
865 820
866 while (z1 >= 4) { 821 while (z1 >= 4) {
867#ifdef HISAX_HFC4S8S_PCIMEM
868 *((unsigned long *) bch->rx_ptr) =
869 Read_hfc32(l1->hw, A_FIFO_DATA0);
870#else
871 *((unsigned long *) bch->rx_ptr) = 822 *((unsigned long *) bch->rx_ptr) =
872 fRead_hfc32(l1->hw); 823 fRead_hfc32(l1->hw);
873#endif
874 bch->rx_ptr += 4; 824 bch->rx_ptr += 4;
875 z1 -= 4; 825 z1 -= 4;
876 } 826 }
877 827
878 while (z1--) 828 while (z1--)
879#ifdef HISAX_HFC4S8S_PCIMEM 829 *(bch->rx_ptr++) = fRead_hfc8(l1->hw);
880 *(bch->rx_ptr++) = Read_hfc8(l1->hw, A_FIFO_DATA0);
881#else
882 *(bch->rx_ptr++) = fRead_hfc8(l1->hw);
883#endif
884 830
885 if (hdlc_complete) { 831 if (hdlc_complete) {
886 /* increment f counter */ 832 /* increment f counter */
@@ -940,29 +886,17 @@ tx_d_frame(struct hfc4s8s_l1 *l1p)
940 if ((skb = skb_dequeue(&l1p->d_tx_queue))) { 886 if ((skb = skb_dequeue(&l1p->d_tx_queue))) {
941 cp = skb->data; 887 cp = skb->data;
942 cnt = skb->len; 888 cnt = skb->len;
943#ifndef HISAX_HFC4S8S_PCIMEM
944 SetRegAddr(l1p->hw, A_FIFO_DATA0); 889 SetRegAddr(l1p->hw, A_FIFO_DATA0);
945#endif
946 890
947 while (cnt >= 4) { 891 while (cnt >= 4) {
948#ifdef HISAX_HFC4S8S_PCIMEM
949 fWrite_hfc32(l1p->hw, A_FIFO_DATA0,
950 *(unsigned long *) cp);
951#else
952 SetRegAddr(l1p->hw, A_FIFO_DATA0); 892 SetRegAddr(l1p->hw, A_FIFO_DATA0);
953 fWrite_hfc32(l1p->hw, *(unsigned long *) cp); 893 fWrite_hfc32(l1p->hw, *(unsigned long *) cp);
954#endif
955 cp += 4; 894 cp += 4;
956 cnt -= 4; 895 cnt -= 4;
957 } 896 }
958 897
959#ifdef HISAX_HFC4S8S_PCIMEM
960 while (cnt--)
961 fWrite_hfc8(l1p->hw, A_FIFO_DATA0, *cp++);
962#else
963 while (cnt--) 898 while (cnt--)
964 fWrite_hfc8(l1p->hw, *cp++); 899 fWrite_hfc8(l1p->hw, *cp++);
965#endif
966 900
967 l1p->tx_cnt = skb->truesize; 901 l1p->tx_cnt = skb->truesize;
968 Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */ 902 Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
@@ -1037,26 +971,15 @@ tx_b_frame(struct hfc4s8s_btype *bch)
1037 cp = skb->data + bch->tx_cnt; 971 cp = skb->data + bch->tx_cnt;
1038 bch->tx_cnt += cnt; 972 bch->tx_cnt += cnt;
1039 973
1040#ifndef HISAX_HFC4S8S_PCIMEM
1041 SetRegAddr(l1->hw, A_FIFO_DATA0); 974 SetRegAddr(l1->hw, A_FIFO_DATA0);
1042#endif
1043 while (cnt >= 4) { 975 while (cnt >= 4) {
1044#ifdef HISAX_HFC4S8S_PCIMEM
1045 fWrite_hfc32(l1->hw, A_FIFO_DATA0,
1046 *(unsigned long *) cp);
1047#else
1048 fWrite_hfc32(l1->hw, *(unsigned long *) cp); 976 fWrite_hfc32(l1->hw, *(unsigned long *) cp);
1049#endif
1050 cp += 4; 977 cp += 4;
1051 cnt -= 4; 978 cnt -= 4;
1052 } 979 }
1053 980
1054 while (cnt--) 981 while (cnt--)
1055#ifdef HISAX_HFC4S8S_PCIMEM 982 fWrite_hfc8(l1->hw, *cp++);
1056 fWrite_hfc8(l1->hw, A_FIFO_DATA0, *cp++);
1057#else
1058 fWrite_hfc8(l1->hw, *cp++);
1059#endif
1060 983
1061 if (bch->tx_cnt >= skb->len) { 984 if (bch->tx_cnt >= skb->len) {
1062 if (bch->mode == L1_MODE_HDLC) { 985 if (bch->mode == L1_MODE_HDLC) {
@@ -1281,10 +1204,8 @@ hfc4s8s_interrupt(int intno, void *dev_id)
1281 if (!hw || !(hw->mr.r_irq_ctrl & M_GLOB_IRQ_EN)) 1204 if (!hw || !(hw->mr.r_irq_ctrl & M_GLOB_IRQ_EN))
1282 return IRQ_NONE; 1205 return IRQ_NONE;
1283 1206
1284#ifndef HISAX_HFC4S8S_PCIMEM
1285 /* read current selected regsister */ 1207 /* read current selected regsister */
1286 old_ioreg = GetRegAddr(hw); 1208 old_ioreg = GetRegAddr(hw);
1287#endif
1288 1209
1289 /* Layer 1 State change */ 1210 /* Layer 1 State change */
1290 hw->mr.r_irq_statech |= 1211 hw->mr.r_irq_statech |=
@@ -1292,9 +1213,7 @@ hfc4s8s_interrupt(int intno, void *dev_id)
1292 if (! 1213 if (!
1293 (b = (Read_hfc8(hw, R_STATUS) & (M_MISC_IRQSTA | M_FR_IRQSTA))) 1214 (b = (Read_hfc8(hw, R_STATUS) & (M_MISC_IRQSTA | M_FR_IRQSTA)))
1294 && !hw->mr.r_irq_statech) { 1215 && !hw->mr.r_irq_statech) {
1295#ifndef HISAX_HFC4S8S_PCIMEM
1296 SetRegAddr(hw, old_ioreg); 1216 SetRegAddr(hw, old_ioreg);
1297#endif
1298 return IRQ_NONE; 1217 return IRQ_NONE;
1299 } 1218 }
1300 1219
@@ -1322,9 +1241,7 @@ hfc4s8s_interrupt(int intno, void *dev_id)
1322 /* queue the request to allow other cards to interrupt */ 1241 /* queue the request to allow other cards to interrupt */
1323 schedule_work(&hw->tqueue); 1242 schedule_work(&hw->tqueue);
1324 1243
1325#ifndef HISAX_HFC4S8S_PCIMEM
1326 SetRegAddr(hw, old_ioreg); 1244 SetRegAddr(hw, old_ioreg);
1327#endif
1328 return IRQ_HANDLED; 1245 return IRQ_HANDLED;
1329} /* hfc4s8s_interrupt */ 1246} /* hfc4s8s_interrupt */
1330 1247
@@ -1471,13 +1388,8 @@ static void
1471release_pci_ports(hfc4s8s_hw *hw) 1388release_pci_ports(hfc4s8s_hw *hw)
1472{ 1389{
1473 pci_write_config_word(hw->pdev, PCI_COMMAND, 0); 1390 pci_write_config_word(hw->pdev, PCI_COMMAND, 0);
1474#ifdef HISAX_HFC4S8S_PCIMEM
1475 if (hw->membase)
1476 iounmap((void *) hw->membase);
1477#else
1478 if (hw->iobase) 1391 if (hw->iobase)
1479 release_region(hw->iobase, 8); 1392 release_region(hw->iobase, 8);
1480#endif
1481} 1393}
1482 1394
1483/*****************************************/ 1395/*****************************************/
@@ -1486,11 +1398,7 @@ release_pci_ports(hfc4s8s_hw *hw)
1486static void 1398static void
1487enable_pci_ports(hfc4s8s_hw *hw) 1399enable_pci_ports(hfc4s8s_hw *hw)
1488{ 1400{
1489#ifdef HISAX_HFC4S8S_PCIMEM
1490 pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
1491#else
1492 pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_REGIO); 1401 pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_REGIO);
1493#endif
1494} 1402}
1495 1403
1496/*************************************/ 1404/*************************************/
@@ -1561,15 +1469,9 @@ setup_instance(hfc4s8s_hw *hw)
1561 hw->irq); 1469 hw->irq);
1562 goto out; 1470 goto out;
1563 } 1471 }
1564#ifdef HISAX_HFC4S8S_PCIMEM
1565 printk(KERN_INFO
1566 "HFC-4S/8S: found PCI card at membase 0x%p, irq %d\n",
1567 hw->hw_membase, hw->irq);
1568#else
1569 printk(KERN_INFO 1472 printk(KERN_INFO
1570 "HFC-4S/8S: found PCI card at iobase 0x%x, irq %d\n", 1473 "HFC-4S/8S: found PCI card at iobase 0x%x, irq %d\n",
1571 hw->iobase, hw->irq); 1474 hw->iobase, hw->irq);
1572#endif
1573 1475
1574 hfc_hardware_enable(hw, 1, 0); 1476 hfc_hardware_enable(hw, 1, 0);
1575 1477
@@ -1614,17 +1516,12 @@ hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1614 hw->irq = pdev->irq; 1516 hw->irq = pdev->irq;
1615 hw->iobase = pci_resource_start(pdev, 0); 1517 hw->iobase = pci_resource_start(pdev, 0);
1616 1518
1617#ifdef HISAX_HFC4S8S_PCIMEM
1618 hw->hw_membase = (u_char *) pci_resource_start(pdev, 1);
1619 hw->membase = ioremap((ulong) hw->hw_membase, 256);
1620#else
1621 if (!request_region(hw->iobase, 8, hw->card_name)) { 1519 if (!request_region(hw->iobase, 8, hw->card_name)) {
1622 printk(KERN_INFO 1520 printk(KERN_INFO
1623 "HFC-4S/8S: failed to request address space at 0x%04x\n", 1521 "HFC-4S/8S: failed to request address space at 0x%04x\n",
1624 hw->iobase); 1522 hw->iobase);
1625 goto out; 1523 goto out;
1626 } 1524 }
1627#endif
1628 1525
1629 pci_set_drvdata(pdev, hw); 1526 pci_set_drvdata(pdev, hw);
1630 err = setup_instance(hw); 1527 err = setup_instance(hw);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index a5da511e3c9a..61ac63237446 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -634,7 +634,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
634#ifdef CONFIG_IPPP_FILTER 634#ifdef CONFIG_IPPP_FILTER
635 case PPPIOCSPASS: 635 case PPPIOCSPASS:
636 { 636 {
637 struct sock_fprog fprog; 637 struct sock_fprog_kern fprog;
638 struct sock_filter *code; 638 struct sock_filter *code;
639 int err, len = get_filter(argp, &code); 639 int err, len = get_filter(argp, &code);
640 640
@@ -653,7 +653,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
653 } 653 }
654 case PPPIOCSACTIVE: 654 case PPPIOCSACTIVE:
655 { 655 {
656 struct sock_fprog fprog; 656 struct sock_fprog_kern fprog;
657 struct sock_filter *code; 657 struct sock_filter *code;
658 int err, len = get_filter(argp, &code); 658 int err, len = get_filter(argp, &code);
659 659
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 2c0d2c2bf946..9f454d76cc06 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -287,11 +287,9 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
287 p = frame; 287 p = frame;
288 288
289 /* restart timer */ 289 /* restart timer */
290 if ((int)(hc->keep_tl.expires-jiffies) < 5 * HZ) { 290 if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ))
291 del_timer(&hc->keep_tl); 291 mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ);
292 hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ; 292 else
293 add_timer(&hc->keep_tl);
294 } else
295 hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ; 293 hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
296 294
297 if (debug & DEBUG_L1OIP_MSG) 295 if (debug & DEBUG_L1OIP_MSG)
@@ -621,11 +619,9 @@ multiframe:
621 goto multiframe; 619 goto multiframe;
622 620
623 /* restart timer */ 621 /* restart timer */
624 if ((int)(hc->timeout_tl.expires-jiffies) < 5 * HZ || !hc->timeout_on) { 622 if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) {
625 hc->timeout_on = 1; 623 hc->timeout_on = 1;
626 del_timer(&hc->timeout_tl); 624 mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ);
627 hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT * HZ;
628 add_timer(&hc->timeout_tl);
629 } else /* only adjust timer */ 625 } else /* only adjust timer */
630 hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT * HZ; 626 hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT * HZ;
631 627
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 0a87e5691341..cc8d4a6099cd 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -448,7 +448,6 @@ mmc_spi_command_send(struct mmc_spi_host *host,
448{ 448{
449 struct scratch *data = host->data; 449 struct scratch *data = host->data;
450 u8 *cp = data->status; 450 u8 *cp = data->status;
451 u32 arg = cmd->arg;
452 int status; 451 int status;
453 struct spi_transfer *t; 452 struct spi_transfer *t;
454 453
@@ -465,14 +464,12 @@ mmc_spi_command_send(struct mmc_spi_host *host,
465 * We init the whole buffer to all-ones, which is what we need 464 * We init the whole buffer to all-ones, which is what we need
466 * to write while we're reading (later) response data. 465 * to write while we're reading (later) response data.
467 */ 466 */
468 memset(cp++, 0xff, sizeof(data->status)); 467 memset(cp, 0xff, sizeof(data->status));
469 468
470 *cp++ = 0x40 | cmd->opcode; 469 cp[1] = 0x40 | cmd->opcode;
471 *cp++ = (u8)(arg >> 24); 470 put_unaligned_be32(cmd->arg, cp+2);
472 *cp++ = (u8)(arg >> 16); 471 cp[6] = crc7_be(0, cp+1, 5) | 0x01;
473 *cp++ = (u8)(arg >> 8); 472 cp += 7;
474 *cp++ = (u8)arg;
475 *cp++ = (crc7(0, &data->status[1], 5) << 1) | 0x01;
476 473
477 /* Then, read up to 13 bytes (while writing all-ones): 474 /* Then, read up to 13 bytes (while writing all-ones):
478 * - N(CR) (== 1..8) bytes of all-ones 475 * - N(CR) (== 1..8) bytes of all-ones
@@ -711,10 +708,7 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
711 * so we have to cope with this situation and check the response 708 * so we have to cope with this situation and check the response
712 * bit-by-bit. Arggh!!! 709 * bit-by-bit. Arggh!!!
713 */ 710 */
714 pattern = scratch->status[0] << 24; 711 pattern = get_unaligned_be32(scratch->status);
715 pattern |= scratch->status[1] << 16;
716 pattern |= scratch->status[2] << 8;
717 pattern |= scratch->status[3];
718 712
719 /* First 3 bit of pattern are undefined */ 713 /* First 3 bit of pattern are undefined */
720 pattern |= 0xE0000000; 714 pattern |= 0xE0000000;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index b667a51ed215..0dfeaf5da3f2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -157,7 +157,7 @@ static inline struct aggregator *__get_first_agg(struct port *port)
157 157
158 rcu_read_lock(); 158 rcu_read_lock();
159 first_slave = bond_first_slave_rcu(bond); 159 first_slave = bond_first_slave_rcu(bond);
160 agg = first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL; 160 agg = first_slave ? &(SLAVE_AD_INFO(first_slave)->aggregator) : NULL;
161 rcu_read_unlock(); 161 rcu_read_unlock();
162 162
163 return agg; 163 return agg;
@@ -192,7 +192,7 @@ static inline void __enable_port(struct port *port)
192{ 192{
193 struct slave *slave = port->slave; 193 struct slave *slave = port->slave;
194 194
195 if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev)) 195 if ((slave->link == BOND_LINK_UP) && bond_slave_is_up(slave))
196 bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER); 196 bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER);
197} 197}
198 198
@@ -241,7 +241,7 @@ static inline int __check_agg_selection_timer(struct port *port)
241 */ 241 */
242static inline void __get_state_machine_lock(struct port *port) 242static inline void __get_state_machine_lock(struct port *port)
243{ 243{
244 spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); 244 spin_lock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock));
245} 245}
246 246
247/** 247/**
@@ -250,7 +250,7 @@ static inline void __get_state_machine_lock(struct port *port)
250 */ 250 */
251static inline void __release_state_machine_lock(struct port *port) 251static inline void __release_state_machine_lock(struct port *port)
252{ 252{
253 spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); 253 spin_unlock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock));
254} 254}
255 255
256/** 256/**
@@ -350,7 +350,7 @@ static u8 __get_duplex(struct port *port)
350static inline void __initialize_port_locks(struct slave *slave) 350static inline void __initialize_port_locks(struct slave *slave)
351{ 351{
352 /* make sure it isn't called twice */ 352 /* make sure it isn't called twice */
353 spin_lock_init(&(SLAVE_AD_INFO(slave).state_machine_lock)); 353 spin_lock_init(&(SLAVE_AD_INFO(slave)->state_machine_lock));
354} 354}
355 355
356/* Conversions */ 356/* Conversions */
@@ -688,8 +688,8 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
688 struct slave *slave; 688 struct slave *slave;
689 689
690 bond_for_each_slave_rcu(bond, slave, iter) 690 bond_for_each_slave_rcu(bond, slave, iter)
691 if (SLAVE_AD_INFO(slave).aggregator.is_active) 691 if (SLAVE_AD_INFO(slave)->aggregator.is_active)
692 return &(SLAVE_AD_INFO(slave).aggregator); 692 return &(SLAVE_AD_INFO(slave)->aggregator);
693 693
694 return NULL; 694 return NULL;
695} 695}
@@ -1293,7 +1293,7 @@ static void ad_port_selection_logic(struct port *port)
1293 } 1293 }
1294 /* search on all aggregators for a suitable aggregator for this port */ 1294 /* search on all aggregators for a suitable aggregator for this port */
1295 bond_for_each_slave(bond, slave, iter) { 1295 bond_for_each_slave(bond, slave, iter) {
1296 aggregator = &(SLAVE_AD_INFO(slave).aggregator); 1296 aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
1297 1297
1298 /* keep a free aggregator for later use(if needed) */ 1298 /* keep a free aggregator for later use(if needed) */
1299 if (!aggregator->lag_ports) { 1299 if (!aggregator->lag_ports) {
@@ -1504,7 +1504,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
1504 best = (active && agg_device_up(active)) ? active : NULL; 1504 best = (active && agg_device_up(active)) ? active : NULL;
1505 1505
1506 bond_for_each_slave_rcu(bond, slave, iter) { 1506 bond_for_each_slave_rcu(bond, slave, iter) {
1507 agg = &(SLAVE_AD_INFO(slave).aggregator); 1507 agg = &(SLAVE_AD_INFO(slave)->aggregator);
1508 1508
1509 agg->is_active = 0; 1509 agg->is_active = 0;
1510 1510
@@ -1549,7 +1549,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
1549 best->slave ? best->slave->dev->name : "NULL"); 1549 best->slave ? best->slave->dev->name : "NULL");
1550 1550
1551 bond_for_each_slave_rcu(bond, slave, iter) { 1551 bond_for_each_slave_rcu(bond, slave, iter) {
1552 agg = &(SLAVE_AD_INFO(slave).aggregator); 1552 agg = &(SLAVE_AD_INFO(slave)->aggregator);
1553 1553
1554 pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n", 1554 pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
1555 agg->aggregator_identifier, agg->num_of_ports, 1555 agg->aggregator_identifier, agg->num_of_ports,
@@ -1840,16 +1840,16 @@ void bond_3ad_bind_slave(struct slave *slave)
1840 struct aggregator *aggregator; 1840 struct aggregator *aggregator;
1841 1841
1842 /* check that the slave has not been initialized yet. */ 1842 /* check that the slave has not been initialized yet. */
1843 if (SLAVE_AD_INFO(slave).port.slave != slave) { 1843 if (SLAVE_AD_INFO(slave)->port.slave != slave) {
1844 1844
1845 /* port initialization */ 1845 /* port initialization */
1846 port = &(SLAVE_AD_INFO(slave).port); 1846 port = &(SLAVE_AD_INFO(slave)->port);
1847 1847
1848 ad_initialize_port(port, bond->params.lacp_fast); 1848 ad_initialize_port(port, bond->params.lacp_fast);
1849 1849
1850 __initialize_port_locks(slave); 1850 __initialize_port_locks(slave);
1851 port->slave = slave; 1851 port->slave = slave;
1852 port->actor_port_number = SLAVE_AD_INFO(slave).id; 1852 port->actor_port_number = SLAVE_AD_INFO(slave)->id;
1853 /* key is determined according to the link speed, duplex and user key(which 1853 /* key is determined according to the link speed, duplex and user key(which
1854 * is yet not supported) 1854 * is yet not supported)
1855 */ 1855 */
@@ -1874,7 +1874,7 @@ void bond_3ad_bind_slave(struct slave *slave)
1874 __disable_port(port); 1874 __disable_port(port);
1875 1875
1876 /* aggregator initialization */ 1876 /* aggregator initialization */
1877 aggregator = &(SLAVE_AD_INFO(slave).aggregator); 1877 aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
1878 1878
1879 ad_initialize_agg(aggregator); 1879 ad_initialize_agg(aggregator);
1880 1880
@@ -1903,8 +1903,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
1903 struct slave *slave_iter; 1903 struct slave *slave_iter;
1904 struct list_head *iter; 1904 struct list_head *iter;
1905 1905
1906 aggregator = &(SLAVE_AD_INFO(slave).aggregator); 1906 aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
1907 port = &(SLAVE_AD_INFO(slave).port); 1907 port = &(SLAVE_AD_INFO(slave)->port);
1908 1908
1909 /* if slave is null, the whole port is not initialized */ 1909 /* if slave is null, the whole port is not initialized */
1910 if (!port->slave) { 1910 if (!port->slave) {
@@ -1932,7 +1932,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
1932 (aggregator->lag_ports->next_port_in_aggregator)) { 1932 (aggregator->lag_ports->next_port_in_aggregator)) {
1933 /* find new aggregator for the related port(s) */ 1933 /* find new aggregator for the related port(s) */
1934 bond_for_each_slave(bond, slave_iter, iter) { 1934 bond_for_each_slave(bond, slave_iter, iter) {
1935 new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator); 1935 new_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator);
1936 /* if the new aggregator is empty, or it is 1936 /* if the new aggregator is empty, or it is
1937 * connected to our port only 1937 * connected to our port only
1938 */ 1938 */
@@ -2010,7 +2010,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
2010 2010
2011 /* find the aggregator that this port is connected to */ 2011 /* find the aggregator that this port is connected to */
2012 bond_for_each_slave(bond, slave_iter, iter) { 2012 bond_for_each_slave(bond, slave_iter, iter) {
2013 temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator); 2013 temp_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator);
2014 prev_port = NULL; 2014 prev_port = NULL;
2015 /* search the port in the aggregator's related ports */ 2015 /* search the port in the aggregator's related ports */
2016 for (temp_port = temp_aggregator->lag_ports; temp_port; 2016 for (temp_port = temp_aggregator->lag_ports; temp_port;
@@ -2076,7 +2076,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2076 if (BOND_AD_INFO(bond).agg_select_timer && 2076 if (BOND_AD_INFO(bond).agg_select_timer &&
2077 !(--BOND_AD_INFO(bond).agg_select_timer)) { 2077 !(--BOND_AD_INFO(bond).agg_select_timer)) {
2078 slave = bond_first_slave_rcu(bond); 2078 slave = bond_first_slave_rcu(bond);
2079 port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL; 2079 port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
2080 2080
2081 /* select the active aggregator for the bond */ 2081 /* select the active aggregator for the bond */
2082 if (port) { 2082 if (port) {
@@ -2094,7 +2094,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2094 2094
2095 /* for each port run the state machines */ 2095 /* for each port run the state machines */
2096 bond_for_each_slave_rcu(bond, slave, iter) { 2096 bond_for_each_slave_rcu(bond, slave, iter) {
2097 port = &(SLAVE_AD_INFO(slave).port); 2097 port = &(SLAVE_AD_INFO(slave)->port);
2098 if (!port->slave) { 2098 if (!port->slave) {
2099 pr_warn_ratelimited("%s: Warning: Found an uninitialized port\n", 2099 pr_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
2100 bond->dev->name); 2100 bond->dev->name);
@@ -2155,7 +2155,7 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
2155 2155
2156 if (length >= sizeof(struct lacpdu)) { 2156 if (length >= sizeof(struct lacpdu)) {
2157 2157
2158 port = &(SLAVE_AD_INFO(slave).port); 2158 port = &(SLAVE_AD_INFO(slave)->port);
2159 2159
2160 if (!port->slave) { 2160 if (!port->slave) {
2161 pr_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n", 2161 pr_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
@@ -2212,7 +2212,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
2212{ 2212{
2213 struct port *port; 2213 struct port *port;
2214 2214
2215 port = &(SLAVE_AD_INFO(slave).port); 2215 port = &(SLAVE_AD_INFO(slave)->port);
2216 2216
2217 /* if slave is null, the whole port is not initialized */ 2217 /* if slave is null, the whole port is not initialized */
2218 if (!port->slave) { 2218 if (!port->slave) {
@@ -2245,7 +2245,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2245{ 2245{
2246 struct port *port; 2246 struct port *port;
2247 2247
2248 port = &(SLAVE_AD_INFO(slave).port); 2248 port = &(SLAVE_AD_INFO(slave)->port);
2249 2249
2250 /* if slave is null, the whole port is not initialized */ 2250 /* if slave is null, the whole port is not initialized */
2251 if (!port->slave) { 2251 if (!port->slave) {
@@ -2279,7 +2279,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2279{ 2279{
2280 struct port *port; 2280 struct port *port;
2281 2281
2282 port = &(SLAVE_AD_INFO(slave).port); 2282 port = &(SLAVE_AD_INFO(slave)->port);
2283 2283
2284 /* if slave is null, the whole port is not initialized */ 2284 /* if slave is null, the whole port is not initialized */
2285 if (!port->slave) { 2285 if (!port->slave) {
@@ -2347,7 +2347,7 @@ int bond_3ad_set_carrier(struct bonding *bond)
2347 ret = 0; 2347 ret = 0;
2348 goto out; 2348 goto out;
2349 } 2349 }
2350 active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator)); 2350 active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
2351 if (active) { 2351 if (active) {
2352 /* are enough slaves available to consider link up? */ 2352 /* are enough slaves available to consider link up? */
2353 if (active->num_of_ports < bond->params.min_links) { 2353 if (active->num_of_ports < bond->params.min_links) {
@@ -2384,7 +2384,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
2384 struct port *port; 2384 struct port *port;
2385 2385
2386 bond_for_each_slave_rcu(bond, slave, iter) { 2386 bond_for_each_slave_rcu(bond, slave, iter) {
2387 port = &(SLAVE_AD_INFO(slave).port); 2387 port = &(SLAVE_AD_INFO(slave)->port);
2388 if (port->aggregator && port->aggregator->is_active) { 2388 if (port->aggregator && port->aggregator->is_active) {
2389 aggregator = port->aggregator; 2389 aggregator = port->aggregator;
2390 break; 2390 break;
@@ -2440,22 +2440,22 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2440 goto err_free; 2440 goto err_free;
2441 } 2441 }
2442 2442
2443 slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg); 2443 slave_agg_no = bond_xmit_hash(bond, skb) % slaves_in_agg;
2444 first_ok_slave = NULL; 2444 first_ok_slave = NULL;
2445 2445
2446 bond_for_each_slave_rcu(bond, slave, iter) { 2446 bond_for_each_slave_rcu(bond, slave, iter) {
2447 agg = SLAVE_AD_INFO(slave).port.aggregator; 2447 agg = SLAVE_AD_INFO(slave)->port.aggregator;
2448 if (!agg || agg->aggregator_identifier != agg_id) 2448 if (!agg || agg->aggregator_identifier != agg_id)
2449 continue; 2449 continue;
2450 2450
2451 if (slave_agg_no >= 0) { 2451 if (slave_agg_no >= 0) {
2452 if (!first_ok_slave && SLAVE_IS_OK(slave)) 2452 if (!first_ok_slave && bond_slave_can_tx(slave))
2453 first_ok_slave = slave; 2453 first_ok_slave = slave;
2454 slave_agg_no--; 2454 slave_agg_no--;
2455 continue; 2455 continue;
2456 } 2456 }
2457 2457
2458 if (SLAVE_IS_OK(slave)) { 2458 if (bond_slave_can_tx(slave)) {
2459 bond_dev_queue_xmit(bond, skb, slave->dev); 2459 bond_dev_queue_xmit(bond, skb, slave->dev);
2460 goto out; 2460 goto out;
2461 } 2461 }
@@ -2522,7 +2522,7 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
2522 2522
2523 lacp_fast = bond->params.lacp_fast; 2523 lacp_fast = bond->params.lacp_fast;
2524 bond_for_each_slave(bond, slave, iter) { 2524 bond_for_each_slave(bond, slave, iter) {
2525 port = &(SLAVE_AD_INFO(slave).port); 2525 port = &(SLAVE_AD_INFO(slave)->port);
2526 __get_state_machine_lock(port); 2526 __get_state_machine_lock(port);
2527 if (lacp_fast) 2527 if (lacp_fast)
2528 port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; 2528 port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 93580a47cc54..76c0dade233f 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -229,7 +229,7 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
229 229
230 /* Find the slave with the largest gap */ 230 /* Find the slave with the largest gap */
231 bond_for_each_slave_rcu(bond, slave, iter) { 231 bond_for_each_slave_rcu(bond, slave, iter) {
232 if (SLAVE_IS_OK(slave)) { 232 if (bond_slave_can_tx(slave)) {
233 long long gap = compute_gap(slave); 233 long long gap = compute_gap(slave);
234 234
235 if (max_gap < gap) { 235 if (max_gap < gap) {
@@ -384,7 +384,7 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
384 bool found = false; 384 bool found = false;
385 385
386 bond_for_each_slave(bond, slave, iter) { 386 bond_for_each_slave(bond, slave, iter) {
387 if (!SLAVE_IS_OK(slave)) 387 if (!bond_slave_can_tx(slave))
388 continue; 388 continue;
389 if (!found) { 389 if (!found) {
390 if (!before || before->speed < slave->speed) 390 if (!before || before->speed < slave->speed)
@@ -417,7 +417,7 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
417 bool found = false; 417 bool found = false;
418 418
419 bond_for_each_slave_rcu(bond, slave, iter) { 419 bond_for_each_slave_rcu(bond, slave, iter) {
420 if (!SLAVE_IS_OK(slave)) 420 if (!bond_slave_can_tx(slave))
421 continue; 421 continue;
422 if (!found) { 422 if (!found) {
423 if (!before || before->speed < slave->speed) 423 if (!before || before->speed < slave->speed)
@@ -755,7 +755,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
755 /* Don't modify or load balance ARPs that do not originate locally 755 /* Don't modify or load balance ARPs that do not originate locally
756 * (e.g.,arrive via a bridge). 756 * (e.g.,arrive via a bridge).
757 */ 757 */
758 if (!bond_slave_has_mac_rcu(bond, arp->mac_src)) 758 if (!bond_slave_has_mac_rx(bond, arp->mac_src))
759 return NULL; 759 return NULL;
760 760
761 if (arp->op_code == htons(ARPOP_REPLY)) { 761 if (arp->op_code == htons(ARPOP_REPLY)) {
@@ -1039,11 +1039,14 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
1039 struct bonding *bond = bond_get_bond_by_slave(slave); 1039 struct bonding *bond = bond_get_bond_by_slave(slave);
1040 struct net_device *upper; 1040 struct net_device *upper;
1041 struct list_head *iter; 1041 struct list_head *iter;
1042 struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
1042 1043
1043 /* send untagged */ 1044 /* send untagged */
1044 alb_send_lp_vid(slave, mac_addr, 0, 0); 1045 alb_send_lp_vid(slave, mac_addr, 0, 0);
1045 1046
1046 /* loop through vlans and send one packet for each */ 1047 /* loop through all devices and see if we need to send a packet
1048 * for that device.
1049 */
1047 rcu_read_lock(); 1050 rcu_read_lock();
1048 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { 1051 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
1049 if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) { 1052 if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
@@ -1059,6 +1062,16 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
1059 vlan_dev_vlan_id(upper)); 1062 vlan_dev_vlan_id(upper));
1060 } 1063 }
1061 } 1064 }
1065
1066 /* If this is a macvlan device, then only send updates
1067 * when strict_match is turned off.
1068 */
1069 if (netif_is_macvlan(upper) && !strict_match) {
1070 memset(tags, 0, sizeof(tags));
1071 bond_verify_device_path(bond->dev, upper, tags);
1072 alb_send_lp_vid(slave, upper->dev_addr,
1073 tags[0].vlan_proto, tags[0].vlan_id);
1074 }
1062 } 1075 }
1063 rcu_read_unlock(); 1076 rcu_read_unlock();
1064} 1077}
@@ -1068,7 +1081,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
1068 struct net_device *dev = slave->dev; 1081 struct net_device *dev = slave->dev;
1069 struct sockaddr s_addr; 1082 struct sockaddr s_addr;
1070 1083
1071 if (slave->bond->params.mode == BOND_MODE_TLB) { 1084 if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
1072 memcpy(dev->dev_addr, addr, dev->addr_len); 1085 memcpy(dev->dev_addr, addr, dev->addr_len);
1073 return 0; 1086 return 0;
1074 } 1087 }
@@ -1111,13 +1124,13 @@ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
1111static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, 1124static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1112 struct slave *slave2) 1125 struct slave *slave2)
1113{ 1126{
1114 int slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2)); 1127 int slaves_state_differ = (bond_slave_can_tx(slave1) != bond_slave_can_tx(slave2));
1115 struct slave *disabled_slave = NULL; 1128 struct slave *disabled_slave = NULL;
1116 1129
1117 ASSERT_RTNL(); 1130 ASSERT_RTNL();
1118 1131
1119 /* fasten the change in the switch */ 1132 /* fasten the change in the switch */
1120 if (SLAVE_IS_OK(slave1)) { 1133 if (bond_slave_can_tx(slave1)) {
1121 alb_send_learning_packets(slave1, slave1->dev->dev_addr, false); 1134 alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
1122 if (bond->alb_info.rlb_enabled) { 1135 if (bond->alb_info.rlb_enabled) {
1123 /* inform the clients that the mac address 1136 /* inform the clients that the mac address
@@ -1129,7 +1142,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1129 disabled_slave = slave1; 1142 disabled_slave = slave1;
1130 } 1143 }
1131 1144
1132 if (SLAVE_IS_OK(slave2)) { 1145 if (bond_slave_can_tx(slave2)) {
1133 alb_send_learning_packets(slave2, slave2->dev->dev_addr, false); 1146 alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
1134 if (bond->alb_info.rlb_enabled) { 1147 if (bond->alb_info.rlb_enabled) {
1135 /* inform the clients that the mac address 1148 /* inform the clients that the mac address
@@ -1358,6 +1371,77 @@ void bond_alb_deinitialize(struct bonding *bond)
1358 rlb_deinitialize(bond); 1371 rlb_deinitialize(bond);
1359} 1372}
1360 1373
1374static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
1375 struct slave *tx_slave)
1376{
1377 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1378 struct ethhdr *eth_data = eth_hdr(skb);
1379
1380 if (!tx_slave) {
1381 /* unbalanced or unassigned, send through primary */
1382 tx_slave = rcu_dereference(bond->curr_active_slave);
1383 if (bond->params.tlb_dynamic_lb)
1384 bond_info->unbalanced_load += skb->len;
1385 }
1386
1387 if (tx_slave && bond_slave_can_tx(tx_slave)) {
1388 if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
1389 ether_addr_copy(eth_data->h_source,
1390 tx_slave->dev->dev_addr);
1391 }
1392
1393 bond_dev_queue_xmit(bond, skb, tx_slave->dev);
1394 goto out;
1395 }
1396
1397 if (tx_slave && bond->params.tlb_dynamic_lb) {
1398 _lock_tx_hashtbl(bond);
1399 __tlb_clear_slave(bond, tx_slave, 0);
1400 _unlock_tx_hashtbl(bond);
1401 }
1402
1403 /* no suitable interface, frame not sent */
1404 dev_kfree_skb_any(skb);
1405out:
1406 return NETDEV_TX_OK;
1407}
1408
1409int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1410{
1411 struct bonding *bond = netdev_priv(bond_dev);
1412 struct ethhdr *eth_data;
1413 struct slave *tx_slave = NULL;
1414 u32 hash_index;
1415
1416 skb_reset_mac_header(skb);
1417 eth_data = eth_hdr(skb);
1418
1419 /* Do not TX balance any multicast or broadcast */
1420 if (!is_multicast_ether_addr(eth_data->h_dest)) {
1421 switch (skb->protocol) {
1422 case htons(ETH_P_IP):
1423 case htons(ETH_P_IPX):
1424 /* In case of IPX, it will falback to L2 hash */
1425 case htons(ETH_P_IPV6):
1426 hash_index = bond_xmit_hash(bond, skb);
1427 if (bond->params.tlb_dynamic_lb) {
1428 tx_slave = tlb_choose_channel(bond,
1429 hash_index & 0xFF,
1430 skb->len);
1431 } else {
1432 struct list_head *iter;
1433 int idx = hash_index % bond->slave_cnt;
1434
1435 bond_for_each_slave_rcu(bond, tx_slave, iter)
1436 if (--idx < 0)
1437 break;
1438 }
1439 break;
1440 }
1441 }
1442 return bond_do_alb_xmit(skb, bond, tx_slave);
1443}
1444
1361int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) 1445int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1362{ 1446{
1363 struct bonding *bond = netdev_priv(bond_dev); 1447 struct bonding *bond = netdev_priv(bond_dev);
@@ -1366,7 +1450,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1366 struct slave *tx_slave = NULL; 1450 struct slave *tx_slave = NULL;
1367 static const __be32 ip_bcast = htonl(0xffffffff); 1451 static const __be32 ip_bcast = htonl(0xffffffff);
1368 int hash_size = 0; 1452 int hash_size = 0;
1369 int do_tx_balance = 1; 1453 bool do_tx_balance = true;
1370 u32 hash_index = 0; 1454 u32 hash_index = 0;
1371 const u8 *hash_start = NULL; 1455 const u8 *hash_start = NULL;
1372 struct ipv6hdr *ip6hdr; 1456 struct ipv6hdr *ip6hdr;
@@ -1381,7 +1465,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1381 if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) || 1465 if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
1382 (iph->daddr == ip_bcast) || 1466 (iph->daddr == ip_bcast) ||
1383 (iph->protocol == IPPROTO_IGMP)) { 1467 (iph->protocol == IPPROTO_IGMP)) {
1384 do_tx_balance = 0; 1468 do_tx_balance = false;
1385 break; 1469 break;
1386 } 1470 }
1387 hash_start = (char *)&(iph->daddr); 1471 hash_start = (char *)&(iph->daddr);
@@ -1393,7 +1477,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1393 * that here just in case. 1477 * that here just in case.
1394 */ 1478 */
1395 if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) { 1479 if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) {
1396 do_tx_balance = 0; 1480 do_tx_balance = false;
1397 break; 1481 break;
1398 } 1482 }
1399 1483
@@ -1401,7 +1485,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1401 * broadcasts in IPv4. 1485 * broadcasts in IPv4.
1402 */ 1486 */
1403 if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) { 1487 if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
1404 do_tx_balance = 0; 1488 do_tx_balance = false;
1405 break; 1489 break;
1406 } 1490 }
1407 1491
@@ -1411,7 +1495,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1411 */ 1495 */
1412 ip6hdr = ipv6_hdr(skb); 1496 ip6hdr = ipv6_hdr(skb);
1413 if (ipv6_addr_any(&ip6hdr->saddr)) { 1497 if (ipv6_addr_any(&ip6hdr->saddr)) {
1414 do_tx_balance = 0; 1498 do_tx_balance = false;
1415 break; 1499 break;
1416 } 1500 }
1417 1501
@@ -1421,7 +1505,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1421 case ETH_P_IPX: 1505 case ETH_P_IPX:
1422 if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) { 1506 if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
1423 /* something is wrong with this packet */ 1507 /* something is wrong with this packet */
1424 do_tx_balance = 0; 1508 do_tx_balance = false;
1425 break; 1509 break;
1426 } 1510 }
1427 1511
@@ -1430,7 +1514,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1430 * this family since it has an "ARP" like 1514 * this family since it has an "ARP" like
1431 * mechanism 1515 * mechanism
1432 */ 1516 */
1433 do_tx_balance = 0; 1517 do_tx_balance = false;
1434 break; 1518 break;
1435 } 1519 }
1436 1520
@@ -1438,12 +1522,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1438 hash_size = ETH_ALEN; 1522 hash_size = ETH_ALEN;
1439 break; 1523 break;
1440 case ETH_P_ARP: 1524 case ETH_P_ARP:
1441 do_tx_balance = 0; 1525 do_tx_balance = false;
1442 if (bond_info->rlb_enabled) 1526 if (bond_info->rlb_enabled)
1443 tx_slave = rlb_arp_xmit(skb, bond); 1527 tx_slave = rlb_arp_xmit(skb, bond);
1444 break; 1528 break;
1445 default: 1529 default:
1446 do_tx_balance = 0; 1530 do_tx_balance = false;
1447 break; 1531 break;
1448 } 1532 }
1449 1533
@@ -1452,32 +1536,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1452 tx_slave = tlb_choose_channel(bond, hash_index, skb->len); 1536 tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
1453 } 1537 }
1454 1538
1455 if (!tx_slave) { 1539 return bond_do_alb_xmit(skb, bond, tx_slave);
1456 /* unbalanced or unassigned, send through primary */
1457 tx_slave = rcu_dereference(bond->curr_active_slave);
1458 bond_info->unbalanced_load += skb->len;
1459 }
1460
1461 if (tx_slave && SLAVE_IS_OK(tx_slave)) {
1462 if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
1463 ether_addr_copy(eth_data->h_source,
1464 tx_slave->dev->dev_addr);
1465 }
1466
1467 bond_dev_queue_xmit(bond, skb, tx_slave->dev);
1468 goto out;
1469 }
1470
1471 if (tx_slave) {
1472 _lock_tx_hashtbl(bond);
1473 __tlb_clear_slave(bond, tx_slave, 0);
1474 _unlock_tx_hashtbl(bond);
1475 }
1476
1477 /* no suitable interface, frame not sent */
1478 dev_kfree_skb_any(skb);
1479out:
1480 return NETDEV_TX_OK;
1481} 1540}
1482 1541
1483void bond_alb_monitor(struct work_struct *work) 1542void bond_alb_monitor(struct work_struct *work)
@@ -1514,8 +1573,10 @@ void bond_alb_monitor(struct work_struct *work)
1514 /* If updating current_active, use all currently 1573 /* If updating current_active, use all currently
1515 * user mac addreses (!strict_match). Otherwise, only 1574 * user mac addreses (!strict_match). Otherwise, only
1516 * use mac of the slave device. 1575 * use mac of the slave device.
1576 * In RLB mode, we always use strict matches.
1517 */ 1577 */
1518 strict_match = (slave != bond->curr_active_slave); 1578 strict_match = (slave != bond->curr_active_slave ||
1579 bond_info->rlb_enabled);
1519 alb_send_learning_packets(slave, slave->dev->dev_addr, 1580 alb_send_learning_packets(slave, slave->dev->dev_addr,
1520 strict_match); 1581 strict_match);
1521 } 1582 }
@@ -1719,7 +1780,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1719 /* in TLB mode, the slave might flip down/up with the old dev_addr, 1780 /* in TLB mode, the slave might flip down/up with the old dev_addr,
1720 * and thus filter bond->dev_addr's packets, so force bond's mac 1781 * and thus filter bond->dev_addr's packets, so force bond's mac
1721 */ 1782 */
1722 if (bond->params.mode == BOND_MODE_TLB) { 1783 if (BOND_MODE(bond) == BOND_MODE_TLB) {
1723 struct sockaddr sa; 1784 struct sockaddr sa;
1724 u8 tmp_addr[ETH_ALEN]; 1785 u8 tmp_addr[ETH_ALEN];
1725 1786
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index e09dd4bfafff..5fc76c01636c 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -175,6 +175,7 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
175void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link); 175void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
176void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave); 176void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
177int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev); 177int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
178int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
178void bond_alb_monitor(struct work_struct *); 179void bond_alb_monitor(struct work_struct *);
179int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr); 180int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
180void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id); 181void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 2d3f7fa541ff..658e761c4568 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -23,7 +23,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
23 struct rlb_client_info *client_info; 23 struct rlb_client_info *client_info;
24 u32 hash_index; 24 u32 hash_index;
25 25
26 if (bond->params.mode != BOND_MODE_ALB) 26 if (BOND_MODE(bond) != BOND_MODE_ALB)
27 return 0; 27 return 0;
28 28
29 seq_printf(m, "SourceIP DestinationIP " 29 seq_printf(m, "SourceIP DestinationIP "
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d3a67896d435..04f35f960cb8 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -343,7 +343,7 @@ static int bond_set_carrier(struct bonding *bond)
343 if (!bond_has_slaves(bond)) 343 if (!bond_has_slaves(bond))
344 goto down; 344 goto down;
345 345
346 if (bond->params.mode == BOND_MODE_8023AD) 346 if (BOND_MODE(bond) == BOND_MODE_8023AD)
347 return bond_3ad_set_carrier(bond); 347 return bond_3ad_set_carrier(bond);
348 348
349 bond_for_each_slave(bond, slave, iter) { 349 bond_for_each_slave(bond, slave, iter) {
@@ -497,7 +497,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
497 struct list_head *iter; 497 struct list_head *iter;
498 int err = 0; 498 int err = 0;
499 499
500 if (USES_PRIMARY(bond->params.mode)) { 500 if (bond_uses_primary(bond)) {
501 /* write lock already acquired */ 501 /* write lock already acquired */
502 if (bond->curr_active_slave) { 502 if (bond->curr_active_slave) {
503 err = dev_set_promiscuity(bond->curr_active_slave->dev, 503 err = dev_set_promiscuity(bond->curr_active_slave->dev,
@@ -523,7 +523,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
523 struct list_head *iter; 523 struct list_head *iter;
524 int err = 0; 524 int err = 0;
525 525
526 if (USES_PRIMARY(bond->params.mode)) { 526 if (bond_uses_primary(bond)) {
527 /* write lock already acquired */ 527 /* write lock already acquired */
528 if (bond->curr_active_slave) { 528 if (bond->curr_active_slave) {
529 err = dev_set_allmulti(bond->curr_active_slave->dev, 529 err = dev_set_allmulti(bond->curr_active_slave->dev,
@@ -574,7 +574,7 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
574 dev_uc_unsync(slave_dev, bond_dev); 574 dev_uc_unsync(slave_dev, bond_dev);
575 dev_mc_unsync(slave_dev, bond_dev); 575 dev_mc_unsync(slave_dev, bond_dev);
576 576
577 if (bond->params.mode == BOND_MODE_8023AD) { 577 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
578 /* del lacpdu mc addr from mc list */ 578 /* del lacpdu mc addr from mc list */
579 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; 579 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
580 580
@@ -585,8 +585,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
585/*--------------------------- Active slave change ---------------------------*/ 585/*--------------------------- Active slave change ---------------------------*/
586 586
587/* Update the hardware address list and promisc/allmulti for the new and 587/* Update the hardware address list and promisc/allmulti for the new and
588 * old active slaves (if any). Modes that are !USES_PRIMARY keep all 588 * old active slaves (if any). Modes that are not using primary keep all
589 * slaves up date at all times; only the USES_PRIMARY modes need to call 589 * slaves up date at all times; only the modes that use primary need to call
590 * this function to swap these settings during a failover. 590 * this function to swap these settings during a failover.
591 */ 591 */
592static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, 592static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
@@ -747,7 +747,7 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
747 bond_for_each_slave(bond, slave, iter) { 747 bond_for_each_slave(bond, slave, iter) {
748 if (slave->link == BOND_LINK_UP) 748 if (slave->link == BOND_LINK_UP)
749 return slave; 749 return slave;
750 if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) && 750 if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
751 slave->delay < mintime) { 751 slave->delay < mintime) {
752 mintime = slave->delay; 752 mintime = slave->delay;
753 bestslave = slave; 753 bestslave = slave;
@@ -801,7 +801,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
801 new_active->last_link_up = jiffies; 801 new_active->last_link_up = jiffies;
802 802
803 if (new_active->link == BOND_LINK_BACK) { 803 if (new_active->link == BOND_LINK_BACK) {
804 if (USES_PRIMARY(bond->params.mode)) { 804 if (bond_uses_primary(bond)) {
805 pr_info("%s: making interface %s the new active one %d ms earlier\n", 805 pr_info("%s: making interface %s the new active one %d ms earlier\n",
806 bond->dev->name, new_active->dev->name, 806 bond->dev->name, new_active->dev->name,
807 (bond->params.updelay - new_active->delay) * bond->params.miimon); 807 (bond->params.updelay - new_active->delay) * bond->params.miimon);
@@ -810,20 +810,20 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
810 new_active->delay = 0; 810 new_active->delay = 0;
811 new_active->link = BOND_LINK_UP; 811 new_active->link = BOND_LINK_UP;
812 812
813 if (bond->params.mode == BOND_MODE_8023AD) 813 if (BOND_MODE(bond) == BOND_MODE_8023AD)
814 bond_3ad_handle_link_change(new_active, BOND_LINK_UP); 814 bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
815 815
816 if (bond_is_lb(bond)) 816 if (bond_is_lb(bond))
817 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP); 817 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
818 } else { 818 } else {
819 if (USES_PRIMARY(bond->params.mode)) { 819 if (bond_uses_primary(bond)) {
820 pr_info("%s: making interface %s the new active one\n", 820 pr_info("%s: making interface %s the new active one\n",
821 bond->dev->name, new_active->dev->name); 821 bond->dev->name, new_active->dev->name);
822 } 822 }
823 } 823 }
824 } 824 }
825 825
826 if (USES_PRIMARY(bond->params.mode)) 826 if (bond_uses_primary(bond))
827 bond_hw_addr_swap(bond, new_active, old_active); 827 bond_hw_addr_swap(bond, new_active, old_active);
828 828
829 if (bond_is_lb(bond)) { 829 if (bond_is_lb(bond)) {
@@ -838,7 +838,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
838 rcu_assign_pointer(bond->curr_active_slave, new_active); 838 rcu_assign_pointer(bond->curr_active_slave, new_active);
839 } 839 }
840 840
841 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { 841 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
842 if (old_active) 842 if (old_active)
843 bond_set_slave_inactive_flags(old_active, 843 bond_set_slave_inactive_flags(old_active,
844 BOND_SLAVE_NOTIFY_NOW); 844 BOND_SLAVE_NOTIFY_NOW);
@@ -876,8 +876,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
876 * resend only if bond is brought up with the affected 876 * resend only if bond is brought up with the affected
877 * bonding modes and the retransmission is enabled */ 877 * bonding modes and the retransmission is enabled */
878 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) && 878 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
879 ((USES_PRIMARY(bond->params.mode) && new_active) || 879 ((bond_uses_primary(bond) && new_active) ||
880 bond->params.mode == BOND_MODE_ROUNDROBIN)) { 880 BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
881 bond->igmp_retrans = bond->params.resend_igmp; 881 bond->igmp_retrans = bond->params.resend_igmp;
882 queue_delayed_work(bond->wq, &bond->mcast_work, 1); 882 queue_delayed_work(bond->wq, &bond->mcast_work, 1);
883 } 883 }
@@ -958,7 +958,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
958 struct slave *slave; 958 struct slave *slave;
959 959
960 bond_for_each_slave(bond, slave, iter) 960 bond_for_each_slave(bond, slave, iter)
961 if (IS_UP(slave->dev)) 961 if (bond_slave_is_up(slave))
962 slave_disable_netpoll(slave); 962 slave_disable_netpoll(slave);
963} 963}
964 964
@@ -1038,6 +1038,7 @@ static void bond_compute_features(struct bonding *bond)
1038 1038
1039 if (!bond_has_slaves(bond)) 1039 if (!bond_has_slaves(bond))
1040 goto done; 1040 goto done;
1041 vlan_features &= NETIF_F_ALL_FOR_ALL;
1041 1042
1042 bond_for_each_slave(bond, slave, iter) { 1043 bond_for_each_slave(bond, slave, iter) {
1043 vlan_features = netdev_increment_features(vlan_features, 1044 vlan_features = netdev_increment_features(vlan_features,
@@ -1084,7 +1085,7 @@ static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1084 struct bonding *bond) 1085 struct bonding *bond)
1085{ 1086{
1086 if (bond_is_slave_inactive(slave)) { 1087 if (bond_is_slave_inactive(slave)) {
1087 if (bond->params.mode == BOND_MODE_ALB && 1088 if (BOND_MODE(bond) == BOND_MODE_ALB &&
1088 skb->pkt_type != PACKET_BROADCAST && 1089 skb->pkt_type != PACKET_BROADCAST &&
1089 skb->pkt_type != PACKET_MULTICAST) 1090 skb->pkt_type != PACKET_MULTICAST)
1090 return false; 1091 return false;
@@ -1126,7 +1127,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1126 1127
1127 skb->dev = bond->dev; 1128 skb->dev = bond->dev;
1128 1129
1129 if (bond->params.mode == BOND_MODE_ALB && 1130 if (BOND_MODE(bond) == BOND_MODE_ALB &&
1130 bond->dev->priv_flags & IFF_BRIDGE_PORT && 1131 bond->dev->priv_flags & IFF_BRIDGE_PORT &&
1131 skb->pkt_type == PACKET_HOST) { 1132 skb->pkt_type == PACKET_HOST) {
1132 1133
@@ -1163,6 +1164,35 @@ static void bond_upper_dev_unlink(struct net_device *bond_dev,
1163 rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL); 1164 rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
1164} 1165}
1165 1166
1167static struct slave *bond_alloc_slave(struct bonding *bond)
1168{
1169 struct slave *slave = NULL;
1170
1171 slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
1172 if (!slave)
1173 return NULL;
1174
1175 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1176 SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
1177 GFP_KERNEL);
1178 if (!SLAVE_AD_INFO(slave)) {
1179 kfree(slave);
1180 return NULL;
1181 }
1182 }
1183 return slave;
1184}
1185
1186static void bond_free_slave(struct slave *slave)
1187{
1188 struct bonding *bond = bond_get_bond_by_slave(slave);
1189
1190 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1191 kfree(SLAVE_AD_INFO(slave));
1192
1193 kfree(slave);
1194}
1195
1166/* enslave device <slave> to bond device <master> */ 1196/* enslave device <slave> to bond device <master> */
1167int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) 1197int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1168{ 1198{
@@ -1269,7 +1299,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1269 if (!bond_has_slaves(bond)) { 1299 if (!bond_has_slaves(bond)) {
1270 pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n", 1300 pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
1271 bond_dev->name); 1301 bond_dev->name);
1272 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { 1302 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
1273 bond->params.fail_over_mac = BOND_FOM_ACTIVE; 1303 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1274 pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n", 1304 pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
1275 bond_dev->name); 1305 bond_dev->name);
@@ -1290,11 +1320,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1290 bond->dev->addr_assign_type == NET_ADDR_RANDOM) 1320 bond->dev->addr_assign_type == NET_ADDR_RANDOM)
1291 bond_set_dev_addr(bond->dev, slave_dev); 1321 bond_set_dev_addr(bond->dev, slave_dev);
1292 1322
1293 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); 1323 new_slave = bond_alloc_slave(bond);
1294 if (!new_slave) { 1324 if (!new_slave) {
1295 res = -ENOMEM; 1325 res = -ENOMEM;
1296 goto err_undo_flags; 1326 goto err_undo_flags;
1297 } 1327 }
1328
1329 new_slave->bond = bond;
1330 new_slave->dev = slave_dev;
1298 /* 1331 /*
1299 * Set the new_slave's queue_id to be zero. Queue ID mapping 1332 * Set the new_slave's queue_id to be zero. Queue ID mapping
1300 * is set via sysfs or module option if desired. 1333 * is set via sysfs or module option if desired.
@@ -1317,7 +1350,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1317 ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr); 1350 ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
1318 1351
1319 if (!bond->params.fail_over_mac || 1352 if (!bond->params.fail_over_mac ||
1320 bond->params.mode != BOND_MODE_ACTIVEBACKUP) { 1353 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1321 /* 1354 /*
1322 * Set slave to master's mac address. The application already 1355 * Set slave to master's mac address. The application already
1323 * set the master's mac address to that of the first slave 1356 * set the master's mac address to that of the first slave
@@ -1338,8 +1371,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1338 goto err_restore_mac; 1371 goto err_restore_mac;
1339 } 1372 }
1340 1373
1341 new_slave->bond = bond;
1342 new_slave->dev = slave_dev;
1343 slave_dev->priv_flags |= IFF_BONDING; 1374 slave_dev->priv_flags |= IFF_BONDING;
1344 1375
1345 if (bond_is_lb(bond)) { 1376 if (bond_is_lb(bond)) {
@@ -1351,10 +1382,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1351 goto err_close; 1382 goto err_close;
1352 } 1383 }
1353 1384
1354 /* If the mode USES_PRIMARY, then the following is handled by 1385 /* If the mode uses primary, then the following is handled by
1355 * bond_change_active_slave(). 1386 * bond_change_active_slave().
1356 */ 1387 */
1357 if (!USES_PRIMARY(bond->params.mode)) { 1388 if (!bond_uses_primary(bond)) {
1358 /* set promiscuity level to new slave */ 1389 /* set promiscuity level to new slave */
1359 if (bond_dev->flags & IFF_PROMISC) { 1390 if (bond_dev->flags & IFF_PROMISC) {
1360 res = dev_set_promiscuity(slave_dev, 1); 1391 res = dev_set_promiscuity(slave_dev, 1);
@@ -1377,7 +1408,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1377 netif_addr_unlock_bh(bond_dev); 1408 netif_addr_unlock_bh(bond_dev);
1378 } 1409 }
1379 1410
1380 if (bond->params.mode == BOND_MODE_8023AD) { 1411 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1381 /* add lacpdu mc addr to mc list */ 1412 /* add lacpdu mc addr to mc list */
1382 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; 1413 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1383 1414
@@ -1450,7 +1481,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1450 new_slave->link == BOND_LINK_DOWN ? "DOWN" : 1481 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
1451 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); 1482 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1452 1483
1453 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { 1484 if (bond_uses_primary(bond) && bond->params.primary[0]) {
1454 /* if there is a primary slave, remember it */ 1485 /* if there is a primary slave, remember it */
1455 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { 1486 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
1456 bond->primary_slave = new_slave; 1487 bond->primary_slave = new_slave;
@@ -1458,7 +1489,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1458 } 1489 }
1459 } 1490 }
1460 1491
1461 switch (bond->params.mode) { 1492 switch (BOND_MODE(bond)) {
1462 case BOND_MODE_ACTIVEBACKUP: 1493 case BOND_MODE_ACTIVEBACKUP:
1463 bond_set_slave_inactive_flags(new_slave, 1494 bond_set_slave_inactive_flags(new_slave,
1464 BOND_SLAVE_NOTIFY_NOW); 1495 BOND_SLAVE_NOTIFY_NOW);
@@ -1471,14 +1502,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1471 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); 1502 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
1472 /* if this is the first slave */ 1503 /* if this is the first slave */
1473 if (!prev_slave) { 1504 if (!prev_slave) {
1474 SLAVE_AD_INFO(new_slave).id = 1; 1505 SLAVE_AD_INFO(new_slave)->id = 1;
1475 /* Initialize AD with the number of times that the AD timer is called in 1 second 1506 /* Initialize AD with the number of times that the AD timer is called in 1 second
1476 * can be called only after the mac address of the bond is set 1507 * can be called only after the mac address of the bond is set
1477 */ 1508 */
1478 bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL); 1509 bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
1479 } else { 1510 } else {
1480 SLAVE_AD_INFO(new_slave).id = 1511 SLAVE_AD_INFO(new_slave)->id =
1481 SLAVE_AD_INFO(prev_slave).id + 1; 1512 SLAVE_AD_INFO(prev_slave)->id + 1;
1482 } 1513 }
1483 1514
1484 bond_3ad_bind_slave(new_slave); 1515 bond_3ad_bind_slave(new_slave);
@@ -1539,7 +1570,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1539 bond_compute_features(bond); 1570 bond_compute_features(bond);
1540 bond_set_carrier(bond); 1571 bond_set_carrier(bond);
1541 1572
1542 if (USES_PRIMARY(bond->params.mode)) { 1573 if (bond_uses_primary(bond)) {
1543 block_netpoll_tx(); 1574 block_netpoll_tx();
1544 write_lock_bh(&bond->curr_slave_lock); 1575 write_lock_bh(&bond->curr_slave_lock);
1545 bond_select_active_slave(bond); 1576 bond_select_active_slave(bond);
@@ -1563,7 +1594,7 @@ err_unregister:
1563 netdev_rx_handler_unregister(slave_dev); 1594 netdev_rx_handler_unregister(slave_dev);
1564 1595
1565err_detach: 1596err_detach:
1566 if (!USES_PRIMARY(bond->params.mode)) 1597 if (!bond_uses_primary(bond))
1567 bond_hw_addr_flush(bond_dev, slave_dev); 1598 bond_hw_addr_flush(bond_dev, slave_dev);
1568 1599
1569 vlan_vids_del_by_dev(slave_dev, bond_dev); 1600 vlan_vids_del_by_dev(slave_dev, bond_dev);
@@ -1585,7 +1616,7 @@ err_close:
1585 1616
1586err_restore_mac: 1617err_restore_mac:
1587 if (!bond->params.fail_over_mac || 1618 if (!bond->params.fail_over_mac ||
1588 bond->params.mode != BOND_MODE_ACTIVEBACKUP) { 1619 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1589 /* XXX TODO - fom follow mode needs to change master's 1620 /* XXX TODO - fom follow mode needs to change master's
1590 * MAC if this slave's MAC is in use by the bond, or at 1621 * MAC if this slave's MAC is in use by the bond, or at
1591 * least print a warning. 1622 * least print a warning.
@@ -1599,7 +1630,7 @@ err_restore_mtu:
1599 dev_set_mtu(slave_dev, new_slave->original_mtu); 1630 dev_set_mtu(slave_dev, new_slave->original_mtu);
1600 1631
1601err_free: 1632err_free:
1602 kfree(new_slave); 1633 bond_free_slave(new_slave);
1603 1634
1604err_undo_flags: 1635err_undo_flags:
1605 /* Enslave of first slave has failed and we need to fix master's mac */ 1636 /* Enslave of first slave has failed and we need to fix master's mac */
@@ -1661,7 +1692,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1661 write_lock_bh(&bond->lock); 1692 write_lock_bh(&bond->lock);
1662 1693
1663 /* Inform AD package of unbinding of slave. */ 1694 /* Inform AD package of unbinding of slave. */
1664 if (bond->params.mode == BOND_MODE_8023AD) 1695 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1665 bond_3ad_unbind_slave(slave); 1696 bond_3ad_unbind_slave(slave);
1666 1697
1667 write_unlock_bh(&bond->lock); 1698 write_unlock_bh(&bond->lock);
@@ -1676,7 +1707,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1676 bond->current_arp_slave = NULL; 1707 bond->current_arp_slave = NULL;
1677 1708
1678 if (!all && (!bond->params.fail_over_mac || 1709 if (!all && (!bond->params.fail_over_mac ||
1679 bond->params.mode != BOND_MODE_ACTIVEBACKUP)) { 1710 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
1680 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) && 1711 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
1681 bond_has_slaves(bond)) 1712 bond_has_slaves(bond))
1682 pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n", 1713 pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
@@ -1748,10 +1779,10 @@ static int __bond_release_one(struct net_device *bond_dev,
1748 /* must do this from outside any spinlocks */ 1779 /* must do this from outside any spinlocks */
1749 vlan_vids_del_by_dev(slave_dev, bond_dev); 1780 vlan_vids_del_by_dev(slave_dev, bond_dev);
1750 1781
1751 /* If the mode USES_PRIMARY, then this cases was handled above by 1782 /* If the mode uses primary, then this cases was handled above by
1752 * bond_change_active_slave(..., NULL) 1783 * bond_change_active_slave(..., NULL)
1753 */ 1784 */
1754 if (!USES_PRIMARY(bond->params.mode)) { 1785 if (!bond_uses_primary(bond)) {
1755 /* unset promiscuity level from slave 1786 /* unset promiscuity level from slave
1756 * NOTE: The NETDEV_CHANGEADDR call above may change the value 1787 * NOTE: The NETDEV_CHANGEADDR call above may change the value
1757 * of the IFF_PROMISC flag in the bond_dev, but we need the 1788 * of the IFF_PROMISC flag in the bond_dev, but we need the
@@ -1775,7 +1806,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1775 dev_close(slave_dev); 1806 dev_close(slave_dev);
1776 1807
1777 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE || 1808 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
1778 bond->params.mode != BOND_MODE_ACTIVEBACKUP) { 1809 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1779 /* restore original ("permanent") mac address */ 1810 /* restore original ("permanent") mac address */
1780 ether_addr_copy(addr.sa_data, slave->perm_hwaddr); 1811 ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
1781 addr.sa_family = slave_dev->type; 1812 addr.sa_family = slave_dev->type;
@@ -1786,7 +1817,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1786 1817
1787 slave_dev->priv_flags &= ~IFF_BONDING; 1818 slave_dev->priv_flags &= ~IFF_BONDING;
1788 1819
1789 kfree(slave); 1820 bond_free_slave(slave);
1790 1821
1791 return 0; /* deletion OK */ 1822 return 0; /* deletion OK */
1792} 1823}
@@ -1821,7 +1852,7 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
1821{ 1852{
1822 struct bonding *bond = netdev_priv(bond_dev); 1853 struct bonding *bond = netdev_priv(bond_dev);
1823 1854
1824 info->bond_mode = bond->params.mode; 1855 info->bond_mode = BOND_MODE(bond);
1825 info->miimon = bond->params.miimon; 1856 info->miimon = bond->params.miimon;
1826 1857
1827 info->num_slaves = bond->slave_cnt; 1858 info->num_slaves = bond->slave_cnt;
@@ -1877,7 +1908,7 @@ static int bond_miimon_inspect(struct bonding *bond)
1877 if (slave->delay) { 1908 if (slave->delay) {
1878 pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n", 1909 pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
1879 bond->dev->name, 1910 bond->dev->name,
1880 (bond->params.mode == 1911 (BOND_MODE(bond) ==
1881 BOND_MODE_ACTIVEBACKUP) ? 1912 BOND_MODE_ACTIVEBACKUP) ?
1882 (bond_is_active_slave(slave) ? 1913 (bond_is_active_slave(slave) ?
1883 "active " : "backup ") : "", 1914 "active " : "backup ") : "",
@@ -1968,10 +1999,10 @@ static void bond_miimon_commit(struct bonding *bond)
1968 slave->link = BOND_LINK_UP; 1999 slave->link = BOND_LINK_UP;
1969 slave->last_link_up = jiffies; 2000 slave->last_link_up = jiffies;
1970 2001
1971 if (bond->params.mode == BOND_MODE_8023AD) { 2002 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1972 /* prevent it from being the active one */ 2003 /* prevent it from being the active one */
1973 bond_set_backup_slave(slave); 2004 bond_set_backup_slave(slave);
1974 } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { 2005 } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1975 /* make it immediately active */ 2006 /* make it immediately active */
1976 bond_set_active_slave(slave); 2007 bond_set_active_slave(slave);
1977 } else if (slave != bond->primary_slave) { 2008 } else if (slave != bond->primary_slave) {
@@ -1985,7 +2016,7 @@ static void bond_miimon_commit(struct bonding *bond)
1985 slave->duplex ? "full" : "half"); 2016 slave->duplex ? "full" : "half");
1986 2017
1987 /* notify ad that the link status has changed */ 2018 /* notify ad that the link status has changed */
1988 if (bond->params.mode == BOND_MODE_8023AD) 2019 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1989 bond_3ad_handle_link_change(slave, BOND_LINK_UP); 2020 bond_3ad_handle_link_change(slave, BOND_LINK_UP);
1990 2021
1991 if (bond_is_lb(bond)) 2022 if (bond_is_lb(bond))
@@ -2004,15 +2035,15 @@ static void bond_miimon_commit(struct bonding *bond)
2004 2035
2005 slave->link = BOND_LINK_DOWN; 2036 slave->link = BOND_LINK_DOWN;
2006 2037
2007 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || 2038 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
2008 bond->params.mode == BOND_MODE_8023AD) 2039 BOND_MODE(bond) == BOND_MODE_8023AD)
2009 bond_set_slave_inactive_flags(slave, 2040 bond_set_slave_inactive_flags(slave,
2010 BOND_SLAVE_NOTIFY_NOW); 2041 BOND_SLAVE_NOTIFY_NOW);
2011 2042
2012 pr_info("%s: link status definitely down for interface %s, disabling it\n", 2043 pr_info("%s: link status definitely down for interface %s, disabling it\n",
2013 bond->dev->name, slave->dev->name); 2044 bond->dev->name, slave->dev->name);
2014 2045
2015 if (bond->params.mode == BOND_MODE_8023AD) 2046 if (BOND_MODE(bond) == BOND_MODE_8023AD)
2016 bond_3ad_handle_link_change(slave, 2047 bond_3ad_handle_link_change(slave,
2017 BOND_LINK_DOWN); 2048 BOND_LINK_DOWN);
2018 2049
@@ -2175,9 +2206,9 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2175 * When the path is validated, collect any vlan information in the 2206 * When the path is validated, collect any vlan information in the
2176 * path. 2207 * path.
2177 */ 2208 */
2178static bool bond_verify_device_path(struct net_device *start_dev, 2209bool bond_verify_device_path(struct net_device *start_dev,
2179 struct net_device *end_dev, 2210 struct net_device *end_dev,
2180 struct bond_vlan_tag *tags) 2211 struct bond_vlan_tag *tags)
2181{ 2212{
2182 struct net_device *upper; 2213 struct net_device *upper;
2183 struct list_head *iter; 2214 struct list_head *iter;
@@ -2287,8 +2318,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2287 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); 2318 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
2288 2319
2289 if (!slave_do_arp_validate(bond, slave)) { 2320 if (!slave_do_arp_validate(bond, slave)) {
2290 if ((slave_do_arp_validate_only(bond, slave) && is_arp) || 2321 if ((slave_do_arp_validate_only(bond) && is_arp) ||
2291 !slave_do_arp_validate_only(bond, slave)) 2322 !slave_do_arp_validate_only(bond))
2292 slave->last_rx = jiffies; 2323 slave->last_rx = jiffies;
2293 return RX_HANDLER_ANOTHER; 2324 return RX_HANDLER_ANOTHER;
2294 } else if (!is_arp) { 2325 } else if (!is_arp) {
@@ -2456,7 +2487,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2456 * do - all replies will be rx'ed on same link causing slaves 2487 * do - all replies will be rx'ed on same link causing slaves
2457 * to be unstable during low/no traffic periods 2488 * to be unstable during low/no traffic periods
2458 */ 2489 */
2459 if (IS_UP(slave->dev)) 2490 if (bond_slave_is_up(slave))
2460 bond_arp_send_all(bond, slave); 2491 bond_arp_send_all(bond, slave);
2461 } 2492 }
2462 2493
@@ -2678,10 +2709,10 @@ static bool bond_ab_arp_probe(struct bonding *bond)
2678 bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER); 2709 bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
2679 2710
2680 bond_for_each_slave_rcu(bond, slave, iter) { 2711 bond_for_each_slave_rcu(bond, slave, iter) {
2681 if (!found && !before && IS_UP(slave->dev)) 2712 if (!found && !before && bond_slave_is_up(slave))
2682 before = slave; 2713 before = slave;
2683 2714
2684 if (found && !new_slave && IS_UP(slave->dev)) 2715 if (found && !new_slave && bond_slave_is_up(slave))
2685 new_slave = slave; 2716 new_slave = slave;
2686 /* if the link state is up at this point, we 2717 /* if the link state is up at this point, we
2687 * mark it down - this can happen if we have 2718 * mark it down - this can happen if we have
@@ -2690,7 +2721,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
2690 * one the current slave so it is still marked 2721 * one the current slave so it is still marked
2691 * up when it is actually down 2722 * up when it is actually down
2692 */ 2723 */
2693 if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) { 2724 if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
2694 slave->link = BOND_LINK_DOWN; 2725 slave->link = BOND_LINK_DOWN;
2695 if (slave->link_failure_count < UINT_MAX) 2726 if (slave->link_failure_count < UINT_MAX)
2696 slave->link_failure_count++; 2727 slave->link_failure_count++;
@@ -2853,7 +2884,7 @@ static int bond_slave_netdev_event(unsigned long event,
2853 2884
2854 bond_update_speed_duplex(slave); 2885 bond_update_speed_duplex(slave);
2855 2886
2856 if (bond->params.mode == BOND_MODE_8023AD) { 2887 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2857 if (old_speed != slave->speed) 2888 if (old_speed != slave->speed)
2858 bond_3ad_adapter_speed_changed(slave); 2889 bond_3ad_adapter_speed_changed(slave);
2859 if (old_duplex != slave->duplex) 2890 if (old_duplex != slave->duplex)
@@ -2881,7 +2912,7 @@ static int bond_slave_netdev_event(unsigned long event,
2881 break; 2912 break;
2882 case NETDEV_CHANGENAME: 2913 case NETDEV_CHANGENAME:
2883 /* we don't care if we don't have primary set */ 2914 /* we don't care if we don't have primary set */
2884 if (!USES_PRIMARY(bond->params.mode) || 2915 if (!bond_uses_primary(bond) ||
2885 !bond->params.primary[0]) 2916 !bond->params.primary[0])
2886 break; 2917 break;
2887 2918
@@ -3011,20 +3042,18 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
3011 * bond_xmit_hash - generate a hash value based on the xmit policy 3042 * bond_xmit_hash - generate a hash value based on the xmit policy
3012 * @bond: bonding device 3043 * @bond: bonding device
3013 * @skb: buffer to use for headers 3044 * @skb: buffer to use for headers
3014 * @count: modulo value
3015 * 3045 *
3016 * This function will extract the necessary headers from the skb buffer and use 3046 * This function will extract the necessary headers from the skb buffer and use
3017 * them to generate a hash based on the xmit_policy set in the bonding device 3047 * them to generate a hash based on the xmit_policy set in the bonding device
3018 * which will be reduced modulo count before returning.
3019 */ 3048 */
3020int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count) 3049u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
3021{ 3050{
3022 struct flow_keys flow; 3051 struct flow_keys flow;
3023 u32 hash; 3052 u32 hash;
3024 3053
3025 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 || 3054 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
3026 !bond_flow_dissect(bond, skb, &flow)) 3055 !bond_flow_dissect(bond, skb, &flow))
3027 return bond_eth_hash(skb) % count; 3056 return bond_eth_hash(skb);
3028 3057
3029 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 || 3058 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
3030 bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) 3059 bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
@@ -3035,7 +3064,7 @@ int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
3035 hash ^= (hash >> 16); 3064 hash ^= (hash >> 16);
3036 hash ^= (hash >> 8); 3065 hash ^= (hash >> 8);
3037 3066
3038 return hash % count; 3067 return hash;
3039} 3068}
3040 3069
3041/*-------------------------- Device entry points ----------------------------*/ 3070/*-------------------------- Device entry points ----------------------------*/
@@ -3046,7 +3075,7 @@ static void bond_work_init_all(struct bonding *bond)
3046 bond_resend_igmp_join_requests_delayed); 3075 bond_resend_igmp_join_requests_delayed);
3047 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); 3076 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
3048 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); 3077 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3049 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) 3078 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3050 INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon); 3079 INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
3051 else 3080 else
3052 INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon); 3081 INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
@@ -3073,7 +3102,7 @@ static int bond_open(struct net_device *bond_dev)
3073 if (bond_has_slaves(bond)) { 3102 if (bond_has_slaves(bond)) {
3074 read_lock(&bond->curr_slave_lock); 3103 read_lock(&bond->curr_slave_lock);
3075 bond_for_each_slave(bond, slave, iter) { 3104 bond_for_each_slave(bond, slave, iter) {
3076 if (USES_PRIMARY(bond->params.mode) 3105 if (bond_uses_primary(bond)
3077 && (slave != bond->curr_active_slave)) { 3106 && (slave != bond->curr_active_slave)) {
3078 bond_set_slave_inactive_flags(slave, 3107 bond_set_slave_inactive_flags(slave,
3079 BOND_SLAVE_NOTIFY_NOW); 3108 BOND_SLAVE_NOTIFY_NOW);
@@ -3092,9 +3121,10 @@ static int bond_open(struct net_device *bond_dev)
3092 /* bond_alb_initialize must be called before the timer 3121 /* bond_alb_initialize must be called before the timer
3093 * is started. 3122 * is started.
3094 */ 3123 */
3095 if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) 3124 if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
3096 return -ENOMEM; 3125 return -ENOMEM;
3097 queue_delayed_work(bond->wq, &bond->alb_work, 0); 3126 if (bond->params.tlb_dynamic_lb)
3127 queue_delayed_work(bond->wq, &bond->alb_work, 0);
3098 } 3128 }
3099 3129
3100 if (bond->params.miimon) /* link check interval, in milliseconds. */ 3130 if (bond->params.miimon) /* link check interval, in milliseconds. */
@@ -3105,7 +3135,7 @@ static int bond_open(struct net_device *bond_dev)
3105 bond->recv_probe = bond_arp_rcv; 3135 bond->recv_probe = bond_arp_rcv;
3106 } 3136 }
3107 3137
3108 if (bond->params.mode == BOND_MODE_8023AD) { 3138 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
3109 queue_delayed_work(bond->wq, &bond->ad_work, 0); 3139 queue_delayed_work(bond->wq, &bond->ad_work, 0);
3110 /* register to receive LACPDUs */ 3140 /* register to receive LACPDUs */
3111 bond->recv_probe = bond_3ad_lacpdu_recv; 3141 bond->recv_probe = bond_3ad_lacpdu_recv;
@@ -3310,7 +3340,7 @@ static void bond_set_rx_mode(struct net_device *bond_dev)
3310 3340
3311 3341
3312 rcu_read_lock(); 3342 rcu_read_lock();
3313 if (USES_PRIMARY(bond->params.mode)) { 3343 if (bond_uses_primary(bond)) {
3314 slave = rcu_dereference(bond->curr_active_slave); 3344 slave = rcu_dereference(bond->curr_active_slave);
3315 if (slave) { 3345 if (slave) {
3316 dev_uc_sync(slave->dev, bond_dev); 3346 dev_uc_sync(slave->dev, bond_dev);
@@ -3464,7 +3494,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3464 struct list_head *iter; 3494 struct list_head *iter;
3465 int res = 0; 3495 int res = 0;
3466 3496
3467 if (bond->params.mode == BOND_MODE_ALB) 3497 if (BOND_MODE(bond) == BOND_MODE_ALB)
3468 return bond_alb_set_mac_address(bond_dev, addr); 3498 return bond_alb_set_mac_address(bond_dev, addr);
3469 3499
3470 3500
@@ -3475,7 +3505,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3475 * Returning an error causes ifenslave to fail. 3505 * Returning an error causes ifenslave to fail.
3476 */ 3506 */
3477 if (bond->params.fail_over_mac && 3507 if (bond->params.fail_over_mac &&
3478 bond->params.mode == BOND_MODE_ACTIVEBACKUP) 3508 BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3479 return 0; 3509 return 0;
3480 3510
3481 if (!is_valid_ether_addr(sa->sa_data)) 3511 if (!is_valid_ether_addr(sa->sa_data))
@@ -3555,7 +3585,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
3555 /* Here we start from the slave with slave_id */ 3585 /* Here we start from the slave with slave_id */
3556 bond_for_each_slave_rcu(bond, slave, iter) { 3586 bond_for_each_slave_rcu(bond, slave, iter) {
3557 if (--i < 0) { 3587 if (--i < 0) {
3558 if (slave_can_tx(slave)) { 3588 if (bond_slave_can_tx(slave)) {
3559 bond_dev_queue_xmit(bond, skb, slave->dev); 3589 bond_dev_queue_xmit(bond, skb, slave->dev);
3560 return; 3590 return;
3561 } 3591 }
@@ -3567,7 +3597,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
3567 bond_for_each_slave_rcu(bond, slave, iter) { 3597 bond_for_each_slave_rcu(bond, slave, iter) {
3568 if (--i < 0) 3598 if (--i < 0)
3569 break; 3599 break;
3570 if (slave_can_tx(slave)) { 3600 if (bond_slave_can_tx(slave)) {
3571 bond_dev_queue_xmit(bond, skb, slave->dev); 3601 bond_dev_queue_xmit(bond, skb, slave->dev);
3572 return; 3602 return;
3573 } 3603 }
@@ -3624,7 +3654,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
3624 */ 3654 */
3625 if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) { 3655 if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
3626 slave = rcu_dereference(bond->curr_active_slave); 3656 slave = rcu_dereference(bond->curr_active_slave);
3627 if (slave && slave_can_tx(slave)) 3657 if (slave && bond_slave_can_tx(slave))
3628 bond_dev_queue_xmit(bond, skb, slave->dev); 3658 bond_dev_queue_xmit(bond, skb, slave->dev);
3629 else 3659 else
3630 bond_xmit_slave_id(bond, skb, 0); 3660 bond_xmit_slave_id(bond, skb, 0);
@@ -3662,7 +3692,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
3662{ 3692{
3663 struct bonding *bond = netdev_priv(bond_dev); 3693 struct bonding *bond = netdev_priv(bond_dev);
3664 3694
3665 bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt)); 3695 bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt);
3666 3696
3667 return NETDEV_TX_OK; 3697 return NETDEV_TX_OK;
3668} 3698}
@@ -3677,7 +3707,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
3677 bond_for_each_slave_rcu(bond, slave, iter) { 3707 bond_for_each_slave_rcu(bond, slave, iter) {
3678 if (bond_is_last_slave(bond, slave)) 3708 if (bond_is_last_slave(bond, slave))
3679 break; 3709 break;
3680 if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) { 3710 if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
3681 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 3711 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
3682 3712
3683 if (!skb2) { 3713 if (!skb2) {
@@ -3689,7 +3719,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
3689 bond_dev_queue_xmit(bond, skb2, slave->dev); 3719 bond_dev_queue_xmit(bond, skb2, slave->dev);
3690 } 3720 }
3691 } 3721 }
3692 if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP) 3722 if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
3693 bond_dev_queue_xmit(bond, skb, slave->dev); 3723 bond_dev_queue_xmit(bond, skb, slave->dev);
3694 else 3724 else
3695 dev_kfree_skb_any(skb); 3725 dev_kfree_skb_any(skb);
@@ -3714,7 +3744,7 @@ static inline int bond_slave_override(struct bonding *bond,
3714 /* Find out if any slaves have the same mapping as this skb. */ 3744 /* Find out if any slaves have the same mapping as this skb. */
3715 bond_for_each_slave_rcu(bond, slave, iter) { 3745 bond_for_each_slave_rcu(bond, slave, iter) {
3716 if (slave->queue_id == skb->queue_mapping) { 3746 if (slave->queue_id == skb->queue_mapping) {
3717 if (slave_can_tx(slave)) { 3747 if (bond_slave_can_tx(slave)) {
3718 bond_dev_queue_xmit(bond, skb, slave->dev); 3748 bond_dev_queue_xmit(bond, skb, slave->dev);
3719 return 0; 3749 return 0;
3720 } 3750 }
@@ -3755,12 +3785,11 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
3755{ 3785{
3756 struct bonding *bond = netdev_priv(dev); 3786 struct bonding *bond = netdev_priv(dev);
3757 3787
3758 if (TX_QUEUE_OVERRIDE(bond->params.mode)) { 3788 if (bond_should_override_tx_queue(bond) &&
3759 if (!bond_slave_override(bond, skb)) 3789 !bond_slave_override(bond, skb))
3760 return NETDEV_TX_OK; 3790 return NETDEV_TX_OK;
3761 }
3762 3791
3763 switch (bond->params.mode) { 3792 switch (BOND_MODE(bond)) {
3764 case BOND_MODE_ROUNDROBIN: 3793 case BOND_MODE_ROUNDROBIN:
3765 return bond_xmit_roundrobin(skb, dev); 3794 return bond_xmit_roundrobin(skb, dev);
3766 case BOND_MODE_ACTIVEBACKUP: 3795 case BOND_MODE_ACTIVEBACKUP:
@@ -3772,12 +3801,13 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
3772 case BOND_MODE_8023AD: 3801 case BOND_MODE_8023AD:
3773 return bond_3ad_xmit_xor(skb, dev); 3802 return bond_3ad_xmit_xor(skb, dev);
3774 case BOND_MODE_ALB: 3803 case BOND_MODE_ALB:
3775 case BOND_MODE_TLB:
3776 return bond_alb_xmit(skb, dev); 3804 return bond_alb_xmit(skb, dev);
3805 case BOND_MODE_TLB:
3806 return bond_tlb_xmit(skb, dev);
3777 default: 3807 default:
3778 /* Should never happen, mode already checked */ 3808 /* Should never happen, mode already checked */
3779 pr_err("%s: Error: Unknown bonding mode %d\n", 3809 pr_err("%s: Error: Unknown bonding mode %d\n",
3780 dev->name, bond->params.mode); 3810 dev->name, BOND_MODE(bond));
3781 WARN_ON_ONCE(1); 3811 WARN_ON_ONCE(1);
3782 dev_kfree_skb_any(skb); 3812 dev_kfree_skb_any(skb);
3783 return NETDEV_TX_OK; 3813 return NETDEV_TX_OK;
@@ -3817,14 +3847,14 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
3817 ecmd->duplex = DUPLEX_UNKNOWN; 3847 ecmd->duplex = DUPLEX_UNKNOWN;
3818 ecmd->port = PORT_OTHER; 3848 ecmd->port = PORT_OTHER;
3819 3849
3820 /* Since SLAVE_IS_OK returns false for all inactive or down slaves, we 3850 /* Since bond_slave_can_tx returns false for all inactive or down slaves, we
3821 * do not need to check mode. Though link speed might not represent 3851 * do not need to check mode. Though link speed might not represent
3822 * the true receive or transmit bandwidth (not all modes are symmetric) 3852 * the true receive or transmit bandwidth (not all modes are symmetric)
3823 * this is an accurate maximum. 3853 * this is an accurate maximum.
3824 */ 3854 */
3825 read_lock(&bond->lock); 3855 read_lock(&bond->lock);
3826 bond_for_each_slave(bond, slave, iter) { 3856 bond_for_each_slave(bond, slave, iter) {
3827 if (SLAVE_IS_OK(slave)) { 3857 if (bond_slave_can_tx(slave)) {
3828 if (slave->speed != SPEED_UNKNOWN) 3858 if (slave->speed != SPEED_UNKNOWN)
3829 speed += slave->speed; 3859 speed += slave->speed;
3830 if (ecmd->duplex == DUPLEX_UNKNOWN && 3860 if (ecmd->duplex == DUPLEX_UNKNOWN &&
@@ -3915,7 +3945,7 @@ void bond_setup(struct net_device *bond_dev)
3915 /* Initialize the device options */ 3945 /* Initialize the device options */
3916 bond_dev->tx_queue_len = 0; 3946 bond_dev->tx_queue_len = 0;
3917 bond_dev->flags |= IFF_MASTER|IFF_MULTICAST; 3947 bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
3918 bond_dev->priv_flags |= IFF_BONDING; 3948 bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT;
3919 bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 3949 bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
3920 3950
3921 /* At first, we block adding VLANs. That's the only way to 3951 /* At first, we block adding VLANs. That's the only way to
@@ -3994,7 +4024,8 @@ static int bond_check_params(struct bond_params *params)
3994 4024
3995 if (xmit_hash_policy) { 4025 if (xmit_hash_policy) {
3996 if ((bond_mode != BOND_MODE_XOR) && 4026 if ((bond_mode != BOND_MODE_XOR) &&
3997 (bond_mode != BOND_MODE_8023AD)) { 4027 (bond_mode != BOND_MODE_8023AD) &&
4028 (bond_mode != BOND_MODE_TLB)) {
3998 pr_info("xmit_hash_policy param is irrelevant in mode %s\n", 4029 pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
3999 bond_mode_name(bond_mode)); 4030 bond_mode_name(bond_mode));
4000 } else { 4031 } else {
@@ -4079,7 +4110,7 @@ static int bond_check_params(struct bond_params *params)
4079 } 4110 }
4080 4111
4081 /* reset values for 802.3ad/TLB/ALB */ 4112 /* reset values for 802.3ad/TLB/ALB */
4082 if (BOND_NO_USES_ARP(bond_mode)) { 4113 if (!bond_mode_uses_arp(bond_mode)) {
4083 if (!miimon) { 4114 if (!miimon) {
4084 pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); 4115 pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
4085 pr_warn("Forcing miimon to 100msec\n"); 4116 pr_warn("Forcing miimon to 100msec\n");
@@ -4161,7 +4192,7 @@ static int bond_check_params(struct bond_params *params)
4161 catch mistakes */ 4192 catch mistakes */
4162 __be32 ip; 4193 __be32 ip;
4163 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) || 4194 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
4164 IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) { 4195 !bond_is_ip_target_ok(ip)) {
4165 pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", 4196 pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
4166 arp_ip_target[i]); 4197 arp_ip_target[i]);
4167 arp_interval = 0; 4198 arp_interval = 0;
@@ -4234,7 +4265,7 @@ static int bond_check_params(struct bond_params *params)
4234 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n"); 4265 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
4235 } 4266 }
4236 4267
4237 if (primary && !USES_PRIMARY(bond_mode)) { 4268 if (primary && !bond_mode_uses_primary(bond_mode)) {
4238 /* currently, using a primary only makes sense 4269 /* currently, using a primary only makes sense
4239 * in active backup, TLB or ALB modes 4270 * in active backup, TLB or ALB modes
4240 */ 4271 */
@@ -4300,6 +4331,7 @@ static int bond_check_params(struct bond_params *params)
4300 params->min_links = min_links; 4331 params->min_links = min_links;
4301 params->lp_interval = lp_interval; 4332 params->lp_interval = lp_interval;
4302 params->packets_per_slave = packets_per_slave; 4333 params->packets_per_slave = packets_per_slave;
4334 params->tlb_dynamic_lb = 1; /* Default value */
4303 if (packets_per_slave > 0) { 4335 if (packets_per_slave > 0) {
4304 params->reciprocal_packets_per_slave = 4336 params->reciprocal_packets_per_slave =
4305 reciprocal_value(packets_per_slave); 4337 reciprocal_value(packets_per_slave);
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index f847e165d252..5ab3c1847e67 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -56,10 +56,10 @@ static int bond_fill_slave_info(struct sk_buff *skb,
56 if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id)) 56 if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
57 goto nla_put_failure; 57 goto nla_put_failure;
58 58
59 if (slave->bond->params.mode == BOND_MODE_8023AD) { 59 if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
60 const struct aggregator *agg; 60 const struct aggregator *agg;
61 61
62 agg = SLAVE_AD_INFO(slave).port.aggregator; 62 agg = SLAVE_AD_INFO(slave)->port.aggregator;
63 if (agg) 63 if (agg)
64 if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, 64 if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
65 agg->aggregator_identifier)) 65 agg->aggregator_identifier))
@@ -407,7 +407,7 @@ static int bond_fill_info(struct sk_buff *skb,
407 unsigned int packets_per_slave; 407 unsigned int packets_per_slave;
408 int i, targets_added; 408 int i, targets_added;
409 409
410 if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode)) 410 if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
411 goto nla_put_failure; 411 goto nla_put_failure;
412 412
413 if (slave_dev && 413 if (slave_dev &&
@@ -505,7 +505,7 @@ static int bond_fill_info(struct sk_buff *skb,
505 bond->params.ad_select)) 505 bond->params.ad_select))
506 goto nla_put_failure; 506 goto nla_put_failure;
507 507
508 if (bond->params.mode == BOND_MODE_8023AD) { 508 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
509 struct ad_info info; 509 struct ad_info info;
510 510
511 if (!bond_3ad_get_active_agg_info(bond, &info)) { 511 if (!bond_3ad_get_active_agg_info(bond, &info)) {
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 832070298446..540e0167bf24 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -70,6 +70,8 @@ static int bond_option_mode_set(struct bonding *bond,
70 const struct bond_opt_value *newval); 70 const struct bond_opt_value *newval);
71static int bond_option_slaves_set(struct bonding *bond, 71static int bond_option_slaves_set(struct bonding *bond,
72 const struct bond_opt_value *newval); 72 const struct bond_opt_value *newval);
73static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
74 const struct bond_opt_value *newval);
73 75
74 76
75static const struct bond_opt_value bond_mode_tbl[] = { 77static const struct bond_opt_value bond_mode_tbl[] = {
@@ -180,6 +182,12 @@ static const struct bond_opt_value bond_lp_interval_tbl[] = {
180 { NULL, -1, 0}, 182 { NULL, -1, 0},
181}; 183};
182 184
185static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
186 { "off", 0, 0},
187 { "on", 1, BOND_VALFLAG_DEFAULT},
188 { NULL, -1, 0}
189};
190
183static const struct bond_option bond_opts[] = { 191static const struct bond_option bond_opts[] = {
184 [BOND_OPT_MODE] = { 192 [BOND_OPT_MODE] = {
185 .id = BOND_OPT_MODE, 193 .id = BOND_OPT_MODE,
@@ -200,7 +208,7 @@ static const struct bond_option bond_opts[] = {
200 [BOND_OPT_XMIT_HASH] = { 208 [BOND_OPT_XMIT_HASH] = {
201 .id = BOND_OPT_XMIT_HASH, 209 .id = BOND_OPT_XMIT_HASH,
202 .name = "xmit_hash_policy", 210 .name = "xmit_hash_policy",
203 .desc = "balance-xor and 802.3ad hashing method", 211 .desc = "balance-xor, 802.3ad, and tlb hashing method",
204 .values = bond_xmit_hashtype_tbl, 212 .values = bond_xmit_hashtype_tbl,
205 .set = bond_option_xmit_hash_policy_set 213 .set = bond_option_xmit_hash_policy_set
206 }, 214 },
@@ -365,9 +373,33 @@ static const struct bond_option bond_opts[] = {
365 .flags = BOND_OPTFLAG_RAWVAL, 373 .flags = BOND_OPTFLAG_RAWVAL,
366 .set = bond_option_slaves_set 374 .set = bond_option_slaves_set
367 }, 375 },
376 [BOND_OPT_TLB_DYNAMIC_LB] = {
377 .id = BOND_OPT_TLB_DYNAMIC_LB,
378 .name = "tlb_dynamic_lb",
379 .desc = "Enable dynamic flow shuffling",
380 .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)),
381 .values = bond_tlb_dynamic_lb_tbl,
382 .flags = BOND_OPTFLAG_IFDOWN,
383 .set = bond_option_tlb_dynamic_lb_set,
384 },
368 { } 385 { }
369}; 386};
370 387
388/* Searches for an option by name */
389const struct bond_option *bond_opt_get_by_name(const char *name)
390{
391 const struct bond_option *opt;
392 int option;
393
394 for (option = 0; option < BOND_OPT_LAST; option++) {
395 opt = bond_opt_get(option);
396 if (opt && !strcmp(opt->name, name))
397 return opt;
398 }
399
400 return NULL;
401}
402
371/* Searches for a value in opt's values[] table */ 403/* Searches for a value in opt's values[] table */
372const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val) 404const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
373{ 405{
@@ -641,7 +673,7 @@ const struct bond_option *bond_opt_get(unsigned int option)
641 673
642int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval) 674int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval)
643{ 675{
644 if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) { 676 if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
645 pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n", 677 pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
646 bond->dev->name, newval->string); 678 bond->dev->name, newval->string);
647 /* disable arp monitoring */ 679 /* disable arp monitoring */
@@ -662,7 +694,7 @@ int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newv
662static struct net_device *__bond_option_active_slave_get(struct bonding *bond, 694static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
663 struct slave *slave) 695 struct slave *slave)
664{ 696{
665 return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL; 697 return bond_uses_primary(bond) && slave ? slave->dev : NULL;
666} 698}
667 699
668struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond) 700struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
@@ -727,7 +759,7 @@ static int bond_option_active_slave_set(struct bonding *bond,
727 bond->dev->name, new_active->dev->name); 759 bond->dev->name, new_active->dev->name);
728 } else { 760 } else {
729 if (old_active && (new_active->link == BOND_LINK_UP) && 761 if (old_active && (new_active->link == BOND_LINK_UP) &&
730 IS_UP(new_active->dev)) { 762 bond_slave_is_up(new_active)) {
731 pr_info("%s: Setting %s as active slave\n", 763 pr_info("%s: Setting %s as active slave\n",
732 bond->dev->name, new_active->dev->name); 764 bond->dev->name, new_active->dev->name);
733 bond_change_active_slave(bond, new_active); 765 bond_change_active_slave(bond, new_active);
@@ -746,6 +778,10 @@ static int bond_option_active_slave_set(struct bonding *bond,
746 return ret; 778 return ret;
747} 779}
748 780
781/* There are two tricky bits here. First, if MII monitoring is activated, then
782 * we must disable ARP monitoring. Second, if the timer isn't running, we must
783 * start it.
784 */
749static int bond_option_miimon_set(struct bonding *bond, 785static int bond_option_miimon_set(struct bonding *bond,
750 const struct bond_opt_value *newval) 786 const struct bond_opt_value *newval)
751{ 787{
@@ -784,6 +820,10 @@ static int bond_option_miimon_set(struct bonding *bond,
784 return 0; 820 return 0;
785} 821}
786 822
823/* Set up and down delays. These must be multiples of the
824 * MII monitoring value, and are stored internally as the multiplier.
825 * Thus, we must translate to MS for the real world.
826 */
787static int bond_option_updelay_set(struct bonding *bond, 827static int bond_option_updelay_set(struct bonding *bond,
788 const struct bond_opt_value *newval) 828 const struct bond_opt_value *newval)
789{ 829{
@@ -842,6 +882,10 @@ static int bond_option_use_carrier_set(struct bonding *bond,
842 return 0; 882 return 0;
843} 883}
844 884
885/* There are two tricky bits here. First, if ARP monitoring is activated, then
886 * we must disable MII monitoring. Second, if the ARP timer isn't running,
887 * we must start it.
888 */
845static int bond_option_arp_interval_set(struct bonding *bond, 889static int bond_option_arp_interval_set(struct bonding *bond,
846 const struct bond_opt_value *newval) 890 const struct bond_opt_value *newval)
847{ 891{
@@ -899,7 +943,7 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
899 __be32 *targets = bond->params.arp_targets; 943 __be32 *targets = bond->params.arp_targets;
900 int ind; 944 int ind;
901 945
902 if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) { 946 if (!bond_is_ip_target_ok(target)) {
903 pr_err("%s: invalid ARP target %pI4 specified for addition\n", 947 pr_err("%s: invalid ARP target %pI4 specified for addition\n",
904 bond->dev->name, &target); 948 bond->dev->name, &target);
905 return -EINVAL; 949 return -EINVAL;
@@ -944,7 +988,7 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
944 unsigned long *targets_rx; 988 unsigned long *targets_rx;
945 int ind, i; 989 int ind, i;
946 990
947 if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) { 991 if (!bond_is_ip_target_ok(target)) {
948 pr_err("%s: invalid ARP target %pI4 specified for removal\n", 992 pr_err("%s: invalid ARP target %pI4 specified for removal\n",
949 bond->dev->name, &target); 993 bond->dev->name, &target);
950 return -EINVAL; 994 return -EINVAL;
@@ -1338,3 +1382,13 @@ err_no_cmd:
1338 ret = -EPERM; 1382 ret = -EPERM;
1339 goto out; 1383 goto out;
1340} 1384}
1385
1386static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
1387 const struct bond_opt_value *newval)
1388{
1389 pr_info("%s: Setting dynamic-lb to %s (%llu)\n",
1390 bond->dev->name, newval->string, newval->value);
1391 bond->params.tlb_dynamic_lb = newval->value;
1392
1393 return 0;
1394}
diff --git a/drivers/net/bonding/bond_options.h b/drivers/net/bonding/bond_options.h
index 12be9e1bfb0c..17ded5b29176 100644
--- a/drivers/net/bonding/bond_options.h
+++ b/drivers/net/bonding/bond_options.h
@@ -62,6 +62,7 @@ enum {
62 BOND_OPT_RESEND_IGMP, 62 BOND_OPT_RESEND_IGMP,
63 BOND_OPT_LP_INTERVAL, 63 BOND_OPT_LP_INTERVAL,
64 BOND_OPT_SLAVES, 64 BOND_OPT_SLAVES,
65 BOND_OPT_TLB_DYNAMIC_LB,
65 BOND_OPT_LAST 66 BOND_OPT_LAST
66}; 67};
67 68
@@ -104,6 +105,7 @@ int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
104const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt, 105const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
105 struct bond_opt_value *val); 106 struct bond_opt_value *val);
106const struct bond_option *bond_opt_get(unsigned int option); 107const struct bond_option *bond_opt_get(unsigned int option);
108const struct bond_option *bond_opt_get_by_name(const char *name);
107const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val); 109const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
108 110
109/* This helper is used to initialize a bond_opt_value structure for parameter 111/* This helper is used to initialize a bond_opt_value structure for parameter
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 013fdd0f45e9..b215b479bb3a 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -72,9 +72,9 @@ static void bond_info_show_master(struct seq_file *seq)
72 curr = rcu_dereference(bond->curr_active_slave); 72 curr = rcu_dereference(bond->curr_active_slave);
73 73
74 seq_printf(seq, "Bonding Mode: %s", 74 seq_printf(seq, "Bonding Mode: %s",
75 bond_mode_name(bond->params.mode)); 75 bond_mode_name(BOND_MODE(bond)));
76 76
77 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP && 77 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
78 bond->params.fail_over_mac) { 78 bond->params.fail_over_mac) {
79 optval = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC, 79 optval = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
80 bond->params.fail_over_mac); 80 bond->params.fail_over_mac);
@@ -83,15 +83,15 @@ static void bond_info_show_master(struct seq_file *seq)
83 83
84 seq_printf(seq, "\n"); 84 seq_printf(seq, "\n");
85 85
86 if (bond->params.mode == BOND_MODE_XOR || 86 if (BOND_MODE(bond) == BOND_MODE_XOR ||
87 bond->params.mode == BOND_MODE_8023AD) { 87 BOND_MODE(bond) == BOND_MODE_8023AD) {
88 optval = bond_opt_get_val(BOND_OPT_XMIT_HASH, 88 optval = bond_opt_get_val(BOND_OPT_XMIT_HASH,
89 bond->params.xmit_policy); 89 bond->params.xmit_policy);
90 seq_printf(seq, "Transmit Hash Policy: %s (%d)\n", 90 seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
91 optval->string, bond->params.xmit_policy); 91 optval->string, bond->params.xmit_policy);
92 } 92 }
93 93
94 if (USES_PRIMARY(bond->params.mode)) { 94 if (bond_uses_primary(bond)) {
95 seq_printf(seq, "Primary Slave: %s", 95 seq_printf(seq, "Primary Slave: %s",
96 (bond->primary_slave) ? 96 (bond->primary_slave) ?
97 bond->primary_slave->dev->name : "None"); 97 bond->primary_slave->dev->name : "None");
@@ -134,7 +134,7 @@ static void bond_info_show_master(struct seq_file *seq)
134 seq_printf(seq, "\n"); 134 seq_printf(seq, "\n");
135 } 135 }
136 136
137 if (bond->params.mode == BOND_MODE_8023AD) { 137 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
138 struct ad_info ad_info; 138 struct ad_info ad_info;
139 139
140 seq_puts(seq, "\n802.3ad info\n"); 140 seq_puts(seq, "\n802.3ad info\n");
@@ -188,9 +188,9 @@ static void bond_info_show_slave(struct seq_file *seq,
188 188
189 seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr); 189 seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
190 190
191 if (bond->params.mode == BOND_MODE_8023AD) { 191 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
192 const struct aggregator *agg 192 const struct aggregator *agg
193 = SLAVE_AD_INFO(slave).port.aggregator; 193 = SLAVE_AD_INFO(slave)->port.aggregator;
194 194
195 if (agg) 195 if (agg)
196 seq_printf(seq, "Aggregator ID: %d\n", 196 seq_printf(seq, "Aggregator ID: %d\n",
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 5f6babcfc26e..daed52f68ce1 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -45,8 +45,7 @@
45#define to_dev(obj) container_of(obj, struct device, kobj) 45#define to_dev(obj) container_of(obj, struct device, kobj)
46#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd)))) 46#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd))))
47 47
48/* 48/* "show" function for the bond_masters attribute.
49 * "show" function for the bond_masters attribute.
50 * The class parameter is ignored. 49 * The class parameter is ignored.
51 */ 50 */
52static ssize_t bonding_show_bonds(struct class *cls, 51static ssize_t bonding_show_bonds(struct class *cls,
@@ -88,14 +87,12 @@ static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifna
88 return NULL; 87 return NULL;
89} 88}
90 89
91/* 90/* "store" function for the bond_masters attribute. This is what
92 * "store" function for the bond_masters attribute. This is what
93 * creates and deletes entire bonds. 91 * creates and deletes entire bonds.
94 * 92 *
95 * The class parameter is ignored. 93 * The class parameter is ignored.
96 * 94 *
97 */ 95 */
98
99static ssize_t bonding_store_bonds(struct class *cls, 96static ssize_t bonding_store_bonds(struct class *cls,
100 struct class_attribute *attr, 97 struct class_attribute *attr,
101 const char *buffer, size_t count) 98 const char *buffer, size_t count)
@@ -158,9 +155,26 @@ static const struct class_attribute class_attr_bonding_masters = {
158 .store = bonding_store_bonds, 155 .store = bonding_store_bonds,
159}; 156};
160 157
161/* 158/* Generic "store" method for bonding sysfs option setting */
162 * Show the slaves in the current bond. 159static ssize_t bonding_sysfs_store_option(struct device *d,
163 */ 160 struct device_attribute *attr,
161 const char *buffer, size_t count)
162{
163 struct bonding *bond = to_bond(d);
164 const struct bond_option *opt;
165 int ret;
166
167 opt = bond_opt_get_by_name(attr->attr.name);
168 if (WARN_ON(!opt))
169 return -ENOENT;
170 ret = bond_opt_tryset_rtnl(bond, opt->id, (char *)buffer);
171 if (!ret)
172 ret = count;
173
174 return ret;
175}
176
177/* Show the slaves in the current bond. */
164static ssize_t bonding_show_slaves(struct device *d, 178static ssize_t bonding_show_slaves(struct device *d,
165 struct device_attribute *attr, char *buf) 179 struct device_attribute *attr, char *buf)
166{ 180{
@@ -190,62 +204,24 @@ static ssize_t bonding_show_slaves(struct device *d,
190 204
191 return res; 205 return res;
192} 206}
193
194/*
195 * Set the slaves in the current bond.
196 * This is supposed to be only thin wrapper for bond_enslave and bond_release.
197 * All hard work should be done there.
198 */
199static ssize_t bonding_store_slaves(struct device *d,
200 struct device_attribute *attr,
201 const char *buffer, size_t count)
202{
203 struct bonding *bond = to_bond(d);
204 int ret;
205
206 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_SLAVES, (char *)buffer);
207 if (!ret)
208 ret = count;
209
210 return ret;
211}
212static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves, 207static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves,
213 bonding_store_slaves); 208 bonding_sysfs_store_option);
214 209
215/* 210/* Show the bonding mode. */
216 * Show and set the bonding mode. The bond interface must be down to
217 * change the mode.
218 */
219static ssize_t bonding_show_mode(struct device *d, 211static ssize_t bonding_show_mode(struct device *d,
220 struct device_attribute *attr, char *buf) 212 struct device_attribute *attr, char *buf)
221{ 213{
222 struct bonding *bond = to_bond(d); 214 struct bonding *bond = to_bond(d);
223 const struct bond_opt_value *val; 215 const struct bond_opt_value *val;
224 216
225 val = bond_opt_get_val(BOND_OPT_MODE, bond->params.mode); 217 val = bond_opt_get_val(BOND_OPT_MODE, BOND_MODE(bond));
226 218
227 return sprintf(buf, "%s %d\n", val->string, bond->params.mode); 219 return sprintf(buf, "%s %d\n", val->string, BOND_MODE(bond));
228}
229
230static ssize_t bonding_store_mode(struct device *d,
231 struct device_attribute *attr,
232 const char *buf, size_t count)
233{
234 struct bonding *bond = to_bond(d);
235 int ret;
236
237 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MODE, (char *)buf);
238 if (!ret)
239 ret = count;
240
241 return ret;
242} 220}
243static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, 221static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
244 bonding_show_mode, bonding_store_mode); 222 bonding_show_mode, bonding_sysfs_store_option);
245 223
246/* 224/* Show the bonding transmit hash method. */
247 * Show and set the bonding transmit hash method.
248 */
249static ssize_t bonding_show_xmit_hash(struct device *d, 225static ssize_t bonding_show_xmit_hash(struct device *d,
250 struct device_attribute *attr, 226 struct device_attribute *attr,
251 char *buf) 227 char *buf)
@@ -257,26 +233,10 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
257 233
258 return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy); 234 return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
259} 235}
260
261static ssize_t bonding_store_xmit_hash(struct device *d,
262 struct device_attribute *attr,
263 const char *buf, size_t count)
264{
265 struct bonding *bond = to_bond(d);
266 int ret;
267
268 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_XMIT_HASH, (char *)buf);
269 if (!ret)
270 ret = count;
271
272 return ret;
273}
274static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR, 236static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR,
275 bonding_show_xmit_hash, bonding_store_xmit_hash); 237 bonding_show_xmit_hash, bonding_sysfs_store_option);
276 238
277/* 239/* Show arp_validate. */
278 * Show and set arp_validate.
279 */
280static ssize_t bonding_show_arp_validate(struct device *d, 240static ssize_t bonding_show_arp_validate(struct device *d,
281 struct device_attribute *attr, 241 struct device_attribute *attr,
282 char *buf) 242 char *buf)
@@ -289,26 +249,10 @@ static ssize_t bonding_show_arp_validate(struct device *d,
289 249
290 return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate); 250 return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
291} 251}
292
293static ssize_t bonding_store_arp_validate(struct device *d,
294 struct device_attribute *attr,
295 const char *buf, size_t count)
296{
297 struct bonding *bond = to_bond(d);
298 int ret;
299
300 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_VALIDATE, (char *)buf);
301 if (!ret)
302 ret = count;
303
304 return ret;
305}
306
307static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate, 252static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
308 bonding_store_arp_validate); 253 bonding_sysfs_store_option);
309/* 254
310 * Show and set arp_all_targets. 255/* Show arp_all_targets. */
311 */
312static ssize_t bonding_show_arp_all_targets(struct device *d, 256static ssize_t bonding_show_arp_all_targets(struct device *d,
313 struct device_attribute *attr, 257 struct device_attribute *attr,
314 char *buf) 258 char *buf)
@@ -321,28 +265,10 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
321 return sprintf(buf, "%s %d\n", 265 return sprintf(buf, "%s %d\n",
322 val->string, bond->params.arp_all_targets); 266 val->string, bond->params.arp_all_targets);
323} 267}
324
325static ssize_t bonding_store_arp_all_targets(struct device *d,
326 struct device_attribute *attr,
327 const char *buf, size_t count)
328{
329 struct bonding *bond = to_bond(d);
330 int ret;
331
332 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_ALL_TARGETS, (char *)buf);
333 if (!ret)
334 ret = count;
335
336 return ret;
337}
338
339static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR, 268static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
340 bonding_show_arp_all_targets, bonding_store_arp_all_targets); 269 bonding_show_arp_all_targets, bonding_sysfs_store_option);
341 270
342/* 271/* Show fail_over_mac. */
343 * Show and store fail_over_mac. User only allowed to change the
344 * value when there are no slaves.
345 */
346static ssize_t bonding_show_fail_over_mac(struct device *d, 272static ssize_t bonding_show_fail_over_mac(struct device *d,
347 struct device_attribute *attr, 273 struct device_attribute *attr,
348 char *buf) 274 char *buf)
@@ -355,30 +281,10 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
355 281
356 return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac); 282 return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
357} 283}
358
359static ssize_t bonding_store_fail_over_mac(struct device *d,
360 struct device_attribute *attr,
361 const char *buf, size_t count)
362{
363 struct bonding *bond = to_bond(d);
364 int ret;
365
366 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_FAIL_OVER_MAC, (char *)buf);
367 if (!ret)
368 ret = count;
369
370 return ret;
371}
372
373static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR, 284static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
374 bonding_show_fail_over_mac, bonding_store_fail_over_mac); 285 bonding_show_fail_over_mac, bonding_sysfs_store_option);
375 286
376/* 287/* Show the arp timer interval. */
377 * Show and set the arp timer interval. There are two tricky bits
378 * here. First, if ARP monitoring is activated, then we must disable
379 * MII monitoring. Second, if the ARP timer isn't running, we must
380 * start it.
381 */
382static ssize_t bonding_show_arp_interval(struct device *d, 288static ssize_t bonding_show_arp_interval(struct device *d,
383 struct device_attribute *attr, 289 struct device_attribute *attr,
384 char *buf) 290 char *buf)
@@ -387,26 +293,10 @@ static ssize_t bonding_show_arp_interval(struct device *d,
387 293
388 return sprintf(buf, "%d\n", bond->params.arp_interval); 294 return sprintf(buf, "%d\n", bond->params.arp_interval);
389} 295}
390
391static ssize_t bonding_store_arp_interval(struct device *d,
392 struct device_attribute *attr,
393 const char *buf, size_t count)
394{
395 struct bonding *bond = to_bond(d);
396 int ret;
397
398 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_INTERVAL, (char *)buf);
399 if (!ret)
400 ret = count;
401
402 return ret;
403}
404static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR, 296static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
405 bonding_show_arp_interval, bonding_store_arp_interval); 297 bonding_show_arp_interval, bonding_sysfs_store_option);
406 298
407/* 299/* Show the arp targets. */
408 * Show and set the arp targets.
409 */
410static ssize_t bonding_show_arp_targets(struct device *d, 300static ssize_t bonding_show_arp_targets(struct device *d,
411 struct device_attribute *attr, 301 struct device_attribute *attr,
412 char *buf) 302 char *buf)
@@ -424,27 +314,10 @@ static ssize_t bonding_show_arp_targets(struct device *d,
424 314
425 return res; 315 return res;
426} 316}
317static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR,
318 bonding_show_arp_targets, bonding_sysfs_store_option);
427 319
428static ssize_t bonding_store_arp_targets(struct device *d, 320/* Show the up and down delays. */
429 struct device_attribute *attr,
430 const char *buf, size_t count)
431{
432 struct bonding *bond = to_bond(d);
433 int ret;
434
435 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_TARGETS, (char *)buf);
436 if (!ret)
437 ret = count;
438
439 return ret;
440}
441static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
442
443/*
444 * Show and set the up and down delays. These must be multiples of the
445 * MII monitoring value, and are stored internally as the multiplier.
446 * Thus, we must translate to MS for the real world.
447 */
448static ssize_t bonding_show_downdelay(struct device *d, 321static ssize_t bonding_show_downdelay(struct device *d,
449 struct device_attribute *attr, 322 struct device_attribute *attr,
450 char *buf) 323 char *buf)
@@ -453,22 +326,8 @@ static ssize_t bonding_show_downdelay(struct device *d,
453 326
454 return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon); 327 return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
455} 328}
456
457static ssize_t bonding_store_downdelay(struct device *d,
458 struct device_attribute *attr,
459 const char *buf, size_t count)
460{
461 struct bonding *bond = to_bond(d);
462 int ret;
463
464 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_DOWNDELAY, (char *)buf);
465 if (!ret)
466 ret = count;
467
468 return ret;
469}
470static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR, 329static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
471 bonding_show_downdelay, bonding_store_downdelay); 330 bonding_show_downdelay, bonding_sysfs_store_option);
472 331
473static ssize_t bonding_show_updelay(struct device *d, 332static ssize_t bonding_show_updelay(struct device *d,
474 struct device_attribute *attr, 333 struct device_attribute *attr,
@@ -479,27 +338,10 @@ static ssize_t bonding_show_updelay(struct device *d,
479 return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon); 338 return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
480 339
481} 340}
482
483static ssize_t bonding_store_updelay(struct device *d,
484 struct device_attribute *attr,
485 const char *buf, size_t count)
486{
487 struct bonding *bond = to_bond(d);
488 int ret;
489
490 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_UPDELAY, (char *)buf);
491 if (!ret)
492 ret = count;
493
494 return ret;
495}
496static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR, 341static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
497 bonding_show_updelay, bonding_store_updelay); 342 bonding_show_updelay, bonding_sysfs_store_option);
498 343
499/* 344/* Show the LACP interval. */
500 * Show and set the LACP interval. Interface must be down, and the mode
501 * must be set to 802.3ad mode.
502 */
503static ssize_t bonding_show_lacp(struct device *d, 345static ssize_t bonding_show_lacp(struct device *d,
504 struct device_attribute *attr, 346 struct device_attribute *attr,
505 char *buf) 347 char *buf)
@@ -511,22 +353,8 @@ static ssize_t bonding_show_lacp(struct device *d,
511 353
512 return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast); 354 return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
513} 355}
514
515static ssize_t bonding_store_lacp(struct device *d,
516 struct device_attribute *attr,
517 const char *buf, size_t count)
518{
519 struct bonding *bond = to_bond(d);
520 int ret;
521
522 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LACP_RATE, (char *)buf);
523 if (!ret)
524 ret = count;
525
526 return ret;
527}
528static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, 356static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
529 bonding_show_lacp, bonding_store_lacp); 357 bonding_show_lacp, bonding_sysfs_store_option);
530 358
531static ssize_t bonding_show_min_links(struct device *d, 359static ssize_t bonding_show_min_links(struct device *d,
532 struct device_attribute *attr, 360 struct device_attribute *attr,
@@ -536,22 +364,8 @@ static ssize_t bonding_show_min_links(struct device *d,
536 364
537 return sprintf(buf, "%u\n", bond->params.min_links); 365 return sprintf(buf, "%u\n", bond->params.min_links);
538} 366}
539
540static ssize_t bonding_store_min_links(struct device *d,
541 struct device_attribute *attr,
542 const char *buf, size_t count)
543{
544 struct bonding *bond = to_bond(d);
545 int ret;
546
547 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MINLINKS, (char *)buf);
548 if (!ret)
549 ret = count;
550
551 return ret;
552}
553static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR, 367static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
554 bonding_show_min_links, bonding_store_min_links); 368 bonding_show_min_links, bonding_sysfs_store_option);
555 369
556static ssize_t bonding_show_ad_select(struct device *d, 370static ssize_t bonding_show_ad_select(struct device *d,
557 struct device_attribute *attr, 371 struct device_attribute *attr,
@@ -564,27 +378,10 @@ static ssize_t bonding_show_ad_select(struct device *d,
564 378
565 return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select); 379 return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
566} 380}
567
568
569static ssize_t bonding_store_ad_select(struct device *d,
570 struct device_attribute *attr,
571 const char *buf, size_t count)
572{
573 struct bonding *bond = to_bond(d);
574 int ret;
575
576 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_AD_SELECT, (char *)buf);
577 if (!ret)
578 ret = count;
579
580 return ret;
581}
582static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR, 381static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
583 bonding_show_ad_select, bonding_store_ad_select); 382 bonding_show_ad_select, bonding_sysfs_store_option);
584 383
585/* 384/* Show and set the number of peer notifications to send after a failover event. */
586 * Show and set the number of peer notifications to send after a failover event.
587 */
588static ssize_t bonding_show_num_peer_notif(struct device *d, 385static ssize_t bonding_show_num_peer_notif(struct device *d,
589 struct device_attribute *attr, 386 struct device_attribute *attr,
590 char *buf) 387 char *buf)
@@ -611,12 +408,7 @@ static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
611static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR, 408static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
612 bonding_show_num_peer_notif, bonding_store_num_peer_notif); 409 bonding_show_num_peer_notif, bonding_store_num_peer_notif);
613 410
614/* 411/* Show the MII monitor interval. */
615 * Show and set the MII monitor interval. There are two tricky bits
616 * here. First, if MII monitoring is activated, then we must disable
617 * ARP monitoring. Second, if the timer isn't running, we must
618 * start it.
619 */
620static ssize_t bonding_show_miimon(struct device *d, 412static ssize_t bonding_show_miimon(struct device *d,
621 struct device_attribute *attr, 413 struct device_attribute *attr,
622 char *buf) 414 char *buf)
@@ -625,30 +417,10 @@ static ssize_t bonding_show_miimon(struct device *d,
625 417
626 return sprintf(buf, "%d\n", bond->params.miimon); 418 return sprintf(buf, "%d\n", bond->params.miimon);
627} 419}
628
629static ssize_t bonding_store_miimon(struct device *d,
630 struct device_attribute *attr,
631 const char *buf, size_t count)
632{
633 struct bonding *bond = to_bond(d);
634 int ret;
635
636 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MIIMON, (char *)buf);
637 if (!ret)
638 ret = count;
639
640 return ret;
641}
642static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, 420static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
643 bonding_show_miimon, bonding_store_miimon); 421 bonding_show_miimon, bonding_sysfs_store_option);
644 422
645/* 423/* Show the primary slave. */
646 * Show and set the primary slave. The store function is much
647 * simpler than bonding_store_slaves function because it only needs to
648 * handle one interface name.
649 * The bond must be a mode that supports a primary for this be
650 * set.
651 */
652static ssize_t bonding_show_primary(struct device *d, 424static ssize_t bonding_show_primary(struct device *d,
653 struct device_attribute *attr, 425 struct device_attribute *attr,
654 char *buf) 426 char *buf)
@@ -661,26 +433,10 @@ static ssize_t bonding_show_primary(struct device *d,
661 433
662 return count; 434 return count;
663} 435}
664
665static ssize_t bonding_store_primary(struct device *d,
666 struct device_attribute *attr,
667 const char *buf, size_t count)
668{
669 struct bonding *bond = to_bond(d);
670 int ret;
671
672 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY, (char *)buf);
673 if (!ret)
674 ret = count;
675
676 return ret;
677}
678static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, 436static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
679 bonding_show_primary, bonding_store_primary); 437 bonding_show_primary, bonding_sysfs_store_option);
680 438
681/* 439/* Show the primary_reselect flag. */
682 * Show and set the primary_reselect flag.
683 */
684static ssize_t bonding_show_primary_reselect(struct device *d, 440static ssize_t bonding_show_primary_reselect(struct device *d,
685 struct device_attribute *attr, 441 struct device_attribute *attr,
686 char *buf) 442 char *buf)
@@ -694,28 +450,10 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
694 return sprintf(buf, "%s %d\n", 450 return sprintf(buf, "%s %d\n",
695 val->string, bond->params.primary_reselect); 451 val->string, bond->params.primary_reselect);
696} 452}
697
698static ssize_t bonding_store_primary_reselect(struct device *d,
699 struct device_attribute *attr,
700 const char *buf, size_t count)
701{
702 struct bonding *bond = to_bond(d);
703 int ret;
704
705 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY_RESELECT,
706 (char *)buf);
707 if (!ret)
708 ret = count;
709
710 return ret;
711}
712static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR, 453static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
713 bonding_show_primary_reselect, 454 bonding_show_primary_reselect, bonding_sysfs_store_option);
714 bonding_store_primary_reselect);
715 455
716/* 456/* Show the use_carrier flag. */
717 * Show and set the use_carrier flag.
718 */
719static ssize_t bonding_show_carrier(struct device *d, 457static ssize_t bonding_show_carrier(struct device *d,
720 struct device_attribute *attr, 458 struct device_attribute *attr,
721 char *buf) 459 char *buf)
@@ -724,27 +462,11 @@ static ssize_t bonding_show_carrier(struct device *d,
724 462
725 return sprintf(buf, "%d\n", bond->params.use_carrier); 463 return sprintf(buf, "%d\n", bond->params.use_carrier);
726} 464}
727
728static ssize_t bonding_store_carrier(struct device *d,
729 struct device_attribute *attr,
730 const char *buf, size_t count)
731{
732 struct bonding *bond = to_bond(d);
733 int ret;
734
735 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_USE_CARRIER, (char *)buf);
736 if (!ret)
737 ret = count;
738
739 return ret;
740}
741static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, 465static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
742 bonding_show_carrier, bonding_store_carrier); 466 bonding_show_carrier, bonding_sysfs_store_option);
743 467
744 468
745/* 469/* Show currently active_slave. */
746 * Show and set currently active_slave.
747 */
748static ssize_t bonding_show_active_slave(struct device *d, 470static ssize_t bonding_show_active_slave(struct device *d,
749 struct device_attribute *attr, 471 struct device_attribute *attr,
750 char *buf) 472 char *buf)
@@ -761,27 +483,10 @@ static ssize_t bonding_show_active_slave(struct device *d,
761 483
762 return count; 484 return count;
763} 485}
764
765static ssize_t bonding_store_active_slave(struct device *d,
766 struct device_attribute *attr,
767 const char *buf, size_t count)
768{
769 struct bonding *bond = to_bond(d);
770 int ret;
771
772 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ACTIVE_SLAVE, (char *)buf);
773 if (!ret)
774 ret = count;
775
776 return ret;
777}
778static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR, 486static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
779 bonding_show_active_slave, bonding_store_active_slave); 487 bonding_show_active_slave, bonding_sysfs_store_option);
780
781 488
782/* 489/* Show link status of the bond interface. */
783 * Show link status of the bond interface.
784 */
785static ssize_t bonding_show_mii_status(struct device *d, 490static ssize_t bonding_show_mii_status(struct device *d,
786 struct device_attribute *attr, 491 struct device_attribute *attr,
787 char *buf) 492 char *buf)
@@ -792,9 +497,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
792} 497}
793static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL); 498static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
794 499
795/* 500/* Show current 802.3ad aggregator ID. */
796 * Show current 802.3ad aggregator ID.
797 */
798static ssize_t bonding_show_ad_aggregator(struct device *d, 501static ssize_t bonding_show_ad_aggregator(struct device *d,
799 struct device_attribute *attr, 502 struct device_attribute *attr,
800 char *buf) 503 char *buf)
@@ -802,7 +505,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
802 int count = 0; 505 int count = 0;
803 struct bonding *bond = to_bond(d); 506 struct bonding *bond = to_bond(d);
804 507
805 if (bond->params.mode == BOND_MODE_8023AD) { 508 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
806 struct ad_info ad_info; 509 struct ad_info ad_info;
807 count = sprintf(buf, "%d\n", 510 count = sprintf(buf, "%d\n",
808 bond_3ad_get_active_agg_info(bond, &ad_info) 511 bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -814,9 +517,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
814static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL); 517static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL);
815 518
816 519
817/* 520/* Show number of active 802.3ad ports. */
818 * Show number of active 802.3ad ports.
819 */
820static ssize_t bonding_show_ad_num_ports(struct device *d, 521static ssize_t bonding_show_ad_num_ports(struct device *d,
821 struct device_attribute *attr, 522 struct device_attribute *attr,
822 char *buf) 523 char *buf)
@@ -824,7 +525,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
824 int count = 0; 525 int count = 0;
825 struct bonding *bond = to_bond(d); 526 struct bonding *bond = to_bond(d);
826 527
827 if (bond->params.mode == BOND_MODE_8023AD) { 528 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
828 struct ad_info ad_info; 529 struct ad_info ad_info;
829 count = sprintf(buf, "%d\n", 530 count = sprintf(buf, "%d\n",
830 bond_3ad_get_active_agg_info(bond, &ad_info) 531 bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -836,9 +537,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
836static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL); 537static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL);
837 538
838 539
839/* 540/* Show current 802.3ad actor key. */
840 * Show current 802.3ad actor key.
841 */
842static ssize_t bonding_show_ad_actor_key(struct device *d, 541static ssize_t bonding_show_ad_actor_key(struct device *d,
843 struct device_attribute *attr, 542 struct device_attribute *attr,
844 char *buf) 543 char *buf)
@@ -846,7 +545,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
846 int count = 0; 545 int count = 0;
847 struct bonding *bond = to_bond(d); 546 struct bonding *bond = to_bond(d);
848 547
849 if (bond->params.mode == BOND_MODE_8023AD) { 548 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
850 struct ad_info ad_info; 549 struct ad_info ad_info;
851 count = sprintf(buf, "%d\n", 550 count = sprintf(buf, "%d\n",
852 bond_3ad_get_active_agg_info(bond, &ad_info) 551 bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -858,9 +557,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
858static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL); 557static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL);
859 558
860 559
861/* 560/* Show current 802.3ad partner key. */
862 * Show current 802.3ad partner key.
863 */
864static ssize_t bonding_show_ad_partner_key(struct device *d, 561static ssize_t bonding_show_ad_partner_key(struct device *d,
865 struct device_attribute *attr, 562 struct device_attribute *attr,
866 char *buf) 563 char *buf)
@@ -868,7 +565,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
868 int count = 0; 565 int count = 0;
869 struct bonding *bond = to_bond(d); 566 struct bonding *bond = to_bond(d);
870 567
871 if (bond->params.mode == BOND_MODE_8023AD) { 568 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
872 struct ad_info ad_info; 569 struct ad_info ad_info;
873 count = sprintf(buf, "%d\n", 570 count = sprintf(buf, "%d\n",
874 bond_3ad_get_active_agg_info(bond, &ad_info) 571 bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -880,9 +577,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
880static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL); 577static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL);
881 578
882 579
883/* 580/* Show current 802.3ad partner mac. */
884 * Show current 802.3ad partner mac.
885 */
886static ssize_t bonding_show_ad_partner_mac(struct device *d, 581static ssize_t bonding_show_ad_partner_mac(struct device *d,
887 struct device_attribute *attr, 582 struct device_attribute *attr,
888 char *buf) 583 char *buf)
@@ -890,7 +585,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
890 int count = 0; 585 int count = 0;
891 struct bonding *bond = to_bond(d); 586 struct bonding *bond = to_bond(d);
892 587
893 if (bond->params.mode == BOND_MODE_8023AD) { 588 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
894 struct ad_info ad_info; 589 struct ad_info ad_info;
895 if (!bond_3ad_get_active_agg_info(bond, &ad_info)) 590 if (!bond_3ad_get_active_agg_info(bond, &ad_info))
896 count = sprintf(buf, "%pM\n", ad_info.partner_system); 591 count = sprintf(buf, "%pM\n", ad_info.partner_system);
@@ -900,9 +595,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
900} 595}
901static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL); 596static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
902 597
903/* 598/* Show the queue_ids of the slaves in the current bond. */
904 * Show the queue_ids of the slaves in the current bond.
905 */
906static ssize_t bonding_show_queue_id(struct device *d, 599static ssize_t bonding_show_queue_id(struct device *d,
907 struct device_attribute *attr, 600 struct device_attribute *attr,
908 char *buf) 601 char *buf)
@@ -933,31 +626,11 @@ static ssize_t bonding_show_queue_id(struct device *d,
933 626
934 return res; 627 return res;
935} 628}
936
937/*
938 * Set the queue_ids of the slaves in the current bond. The bond
939 * interface must be enslaved for this to work.
940 */
941static ssize_t bonding_store_queue_id(struct device *d,
942 struct device_attribute *attr,
943 const char *buffer, size_t count)
944{
945 struct bonding *bond = to_bond(d);
946 int ret;
947
948 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_QUEUE_ID, (char *)buffer);
949 if (!ret)
950 ret = count;
951
952 return ret;
953}
954static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id, 629static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
955 bonding_store_queue_id); 630 bonding_sysfs_store_option);
956 631
957 632
958/* 633/* Show the all_slaves_active flag. */
959 * Show and set the all_slaves_active flag.
960 */
961static ssize_t bonding_show_slaves_active(struct device *d, 634static ssize_t bonding_show_slaves_active(struct device *d,
962 struct device_attribute *attr, 635 struct device_attribute *attr,
963 char *buf) 636 char *buf)
@@ -966,27 +639,10 @@ static ssize_t bonding_show_slaves_active(struct device *d,
966 639
967 return sprintf(buf, "%d\n", bond->params.all_slaves_active); 640 return sprintf(buf, "%d\n", bond->params.all_slaves_active);
968} 641}
969
970static ssize_t bonding_store_slaves_active(struct device *d,
971 struct device_attribute *attr,
972 const char *buf, size_t count)
973{
974 struct bonding *bond = to_bond(d);
975 int ret;
976
977 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ALL_SLAVES_ACTIVE,
978 (char *)buf);
979 if (!ret)
980 ret = count;
981
982 return ret;
983}
984static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, 642static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
985 bonding_show_slaves_active, bonding_store_slaves_active); 643 bonding_show_slaves_active, bonding_sysfs_store_option);
986 644
987/* 645/* Show the number of IGMP membership reports to send on link failure */
988 * Show and set the number of IGMP membership reports to send on link failure
989 */
990static ssize_t bonding_show_resend_igmp(struct device *d, 646static ssize_t bonding_show_resend_igmp(struct device *d,
991 struct device_attribute *attr, 647 struct device_attribute *attr,
992 char *buf) 648 char *buf)
@@ -995,23 +651,8 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
995 651
996 return sprintf(buf, "%d\n", bond->params.resend_igmp); 652 return sprintf(buf, "%d\n", bond->params.resend_igmp);
997} 653}
998
999static ssize_t bonding_store_resend_igmp(struct device *d,
1000 struct device_attribute *attr,
1001 const char *buf, size_t count)
1002{
1003 struct bonding *bond = to_bond(d);
1004 int ret;
1005
1006 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_RESEND_IGMP, (char *)buf);
1007 if (!ret)
1008 ret = count;
1009
1010 return ret;
1011}
1012
1013static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR, 654static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
1014 bonding_show_resend_igmp, bonding_store_resend_igmp); 655 bonding_show_resend_igmp, bonding_sysfs_store_option);
1015 656
1016 657
1017static ssize_t bonding_show_lp_interval(struct device *d, 658static ssize_t bonding_show_lp_interval(struct device *d,
@@ -1019,25 +660,21 @@ static ssize_t bonding_show_lp_interval(struct device *d,
1019 char *buf) 660 char *buf)
1020{ 661{
1021 struct bonding *bond = to_bond(d); 662 struct bonding *bond = to_bond(d);
663
1022 return sprintf(buf, "%d\n", bond->params.lp_interval); 664 return sprintf(buf, "%d\n", bond->params.lp_interval);
1023} 665}
666static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
667 bonding_show_lp_interval, bonding_sysfs_store_option);
1024 668
1025static ssize_t bonding_store_lp_interval(struct device *d, 669static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
1026 struct device_attribute *attr, 670 struct device_attribute *attr,
1027 const char *buf, size_t count) 671 char *buf)
1028{ 672{
1029 struct bonding *bond = to_bond(d); 673 struct bonding *bond = to_bond(d);
1030 int ret; 674 return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
1031
1032 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LP_INTERVAL, (char *)buf);
1033 if (!ret)
1034 ret = count;
1035
1036 return ret;
1037} 675}
1038 676static DEVICE_ATTR(tlb_dynamic_lb, S_IRUGO | S_IWUSR,
1039static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR, 677 bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
1040 bonding_show_lp_interval, bonding_store_lp_interval);
1041 678
1042static ssize_t bonding_show_packets_per_slave(struct device *d, 679static ssize_t bonding_show_packets_per_slave(struct device *d,
1043 struct device_attribute *attr, 680 struct device_attribute *attr,
@@ -1045,27 +682,11 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
1045{ 682{
1046 struct bonding *bond = to_bond(d); 683 struct bonding *bond = to_bond(d);
1047 unsigned int packets_per_slave = bond->params.packets_per_slave; 684 unsigned int packets_per_slave = bond->params.packets_per_slave;
1048 return sprintf(buf, "%u\n", packets_per_slave);
1049}
1050
1051static ssize_t bonding_store_packets_per_slave(struct device *d,
1052 struct device_attribute *attr,
1053 const char *buf, size_t count)
1054{
1055 struct bonding *bond = to_bond(d);
1056 int ret;
1057 685
1058 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PACKETS_PER_SLAVE, 686 return sprintf(buf, "%u\n", packets_per_slave);
1059 (char *)buf);
1060 if (!ret)
1061 ret = count;
1062
1063 return ret;
1064} 687}
1065
1066static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR, 688static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
1067 bonding_show_packets_per_slave, 689 bonding_show_packets_per_slave, bonding_sysfs_store_option);
1068 bonding_store_packets_per_slave);
1069 690
1070static struct attribute *per_bond_attrs[] = { 691static struct attribute *per_bond_attrs[] = {
1071 &dev_attr_slaves.attr, 692 &dev_attr_slaves.attr,
@@ -1099,6 +720,7 @@ static struct attribute *per_bond_attrs[] = {
1099 &dev_attr_min_links.attr, 720 &dev_attr_min_links.attr,
1100 &dev_attr_lp_interval.attr, 721 &dev_attr_lp_interval.attr,
1101 &dev_attr_packets_per_slave.attr, 722 &dev_attr_packets_per_slave.attr,
723 &dev_attr_tlb_dynamic_lb.attr,
1102 NULL, 724 NULL,
1103}; 725};
1104 726
@@ -1107,8 +729,7 @@ static struct attribute_group bonding_group = {
1107 .attrs = per_bond_attrs, 729 .attrs = per_bond_attrs,
1108}; 730};
1109 731
1110/* 732/* Initialize sysfs. This sets up the bonding_masters file in
1111 * Initialize sysfs. This sets up the bonding_masters file in
1112 * /sys/class/net. 733 * /sys/class/net.
1113 */ 734 */
1114int bond_create_sysfs(struct bond_net *bn) 735int bond_create_sysfs(struct bond_net *bn)
@@ -1120,8 +741,7 @@ int bond_create_sysfs(struct bond_net *bn)
1120 741
1121 ret = netdev_class_create_file_ns(&bn->class_attr_bonding_masters, 742 ret = netdev_class_create_file_ns(&bn->class_attr_bonding_masters,
1122 bn->net); 743 bn->net);
1123 /* 744 /* Permit multiple loads of the module by ignoring failures to
1124 * Permit multiple loads of the module by ignoring failures to
1125 * create the bonding_masters sysfs file. Bonding devices 745 * create the bonding_masters sysfs file. Bonding devices
1126 * created by second or subsequent loads of the module will 746 * created by second or subsequent loads of the module will
1127 * not be listed in, or controllable by, bonding_masters, but 747 * not be listed in, or controllable by, bonding_masters, but
@@ -1144,16 +764,13 @@ int bond_create_sysfs(struct bond_net *bn)
1144 764
1145} 765}
1146 766
1147/* 767/* Remove /sys/class/net/bonding_masters. */
1148 * Remove /sys/class/net/bonding_masters.
1149 */
1150void bond_destroy_sysfs(struct bond_net *bn) 768void bond_destroy_sysfs(struct bond_net *bn)
1151{ 769{
1152 netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net); 770 netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net);
1153} 771}
1154 772
1155/* 773/* Initialize sysfs for each bond. This sets up and registers
1156 * Initialize sysfs for each bond. This sets up and registers
1157 * the 'bondctl' directory for each individual bond under /sys/class/net. 774 * the 'bondctl' directory for each individual bond under /sys/class/net.
1158 */ 775 */
1159void bond_prepare_sysfs_group(struct bonding *bond) 776void bond_prepare_sysfs_group(struct bonding *bond)
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 2e4eec5450c8..198677f58ce0 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -69,8 +69,8 @@ static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
69{ 69{
70 const struct aggregator *agg; 70 const struct aggregator *agg;
71 71
72 if (slave->bond->params.mode == BOND_MODE_8023AD) { 72 if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
73 agg = SLAVE_AD_INFO(slave).port.aggregator; 73 agg = SLAVE_AD_INFO(slave)->port.aggregator;
74 if (agg) 74 if (agg)
75 return sprintf(buf, "%d\n", 75 return sprintf(buf, "%d\n",
76 agg->aggregator_identifier); 76 agg->aggregator_identifier);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 00bea320e3b5..0b4d9cde0b05 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -41,42 +41,6 @@
41 41
42#define BOND_DEFAULT_MIIMON 100 42#define BOND_DEFAULT_MIIMON 100
43 43
44#define IS_UP(dev) \
45 ((((dev)->flags & IFF_UP) == IFF_UP) && \
46 netif_running(dev) && \
47 netif_carrier_ok(dev))
48
49/*
50 * Checks whether slave is ready for transmit.
51 */
52#define SLAVE_IS_OK(slave) \
53 (((slave)->dev->flags & IFF_UP) && \
54 netif_running((slave)->dev) && \
55 ((slave)->link == BOND_LINK_UP) && \
56 bond_is_active_slave(slave))
57
58
59#define USES_PRIMARY(mode) \
60 (((mode) == BOND_MODE_ACTIVEBACKUP) || \
61 ((mode) == BOND_MODE_TLB) || \
62 ((mode) == BOND_MODE_ALB))
63
64#define BOND_NO_USES_ARP(mode) \
65 (((mode) == BOND_MODE_8023AD) || \
66 ((mode) == BOND_MODE_TLB) || \
67 ((mode) == BOND_MODE_ALB))
68
69#define TX_QUEUE_OVERRIDE(mode) \
70 (((mode) == BOND_MODE_ACTIVEBACKUP) || \
71 ((mode) == BOND_MODE_ROUNDROBIN))
72
73#define BOND_MODE_IS_LB(mode) \
74 (((mode) == BOND_MODE_TLB) || \
75 ((mode) == BOND_MODE_ALB))
76
77#define IS_IP_TARGET_UNUSABLE_ADDRESS(a) \
78 ((htonl(INADDR_BROADCAST) == a) || \
79 ipv4_is_zeronet(a))
80/* 44/*
81 * Less bad way to call ioctl from within the kernel; this needs to be 45 * Less bad way to call ioctl from within the kernel; this needs to be
82 * done some other way to get the call out of interrupt context. 46 * done some other way to get the call out of interrupt context.
@@ -90,6 +54,8 @@
90 set_fs(fs); \ 54 set_fs(fs); \
91 res; }) 55 res; })
92 56
57#define BOND_MODE(bond) ((bond)->params.mode)
58
93/* slave list primitives */ 59/* slave list primitives */
94#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower) 60#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
95 61
@@ -175,6 +141,7 @@ struct bond_params {
175 int resend_igmp; 141 int resend_igmp;
176 int lp_interval; 142 int lp_interval;
177 int packets_per_slave; 143 int packets_per_slave;
144 int tlb_dynamic_lb;
178 struct reciprocal_value reciprocal_packets_per_slave; 145 struct reciprocal_value reciprocal_packets_per_slave;
179}; 146};
180 147
@@ -183,8 +150,6 @@ struct bond_parm_tbl {
183 int mode; 150 int mode;
184}; 151};
185 152
186#define BOND_MAX_MODENAME_LEN 20
187
188struct slave { 153struct slave {
189 struct net_device *dev; /* first - useful for panic debug */ 154 struct net_device *dev; /* first - useful for panic debug */
190 struct bonding *bond; /* our master */ 155 struct bonding *bond; /* our master */
@@ -205,7 +170,7 @@ struct slave {
205 u32 speed; 170 u32 speed;
206 u16 queue_id; 171 u16 queue_id;
207 u8 perm_hwaddr[ETH_ALEN]; 172 u8 perm_hwaddr[ETH_ALEN];
208 struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ 173 struct ad_slave_info *ad_info;
209 struct tlb_slave_info tlb_info; 174 struct tlb_slave_info tlb_info;
210#ifdef CONFIG_NET_POLL_CONTROLLER 175#ifdef CONFIG_NET_POLL_CONTROLLER
211 struct netpoll *np; 176 struct netpoll *np;
@@ -285,14 +250,41 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
285 250
286static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) 251static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
287{ 252{
288 if (!slave || !slave->bond)
289 return NULL;
290 return slave->bond; 253 return slave->bond;
291} 254}
292 255
256static inline bool bond_should_override_tx_queue(struct bonding *bond)
257{
258 return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
259 BOND_MODE(bond) == BOND_MODE_ROUNDROBIN;
260}
261
293static inline bool bond_is_lb(const struct bonding *bond) 262static inline bool bond_is_lb(const struct bonding *bond)
294{ 263{
295 return BOND_MODE_IS_LB(bond->params.mode); 264 return BOND_MODE(bond) == BOND_MODE_TLB ||
265 BOND_MODE(bond) == BOND_MODE_ALB;
266}
267
268static inline bool bond_mode_uses_arp(int mode)
269{
270 return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
271 mode != BOND_MODE_ALB;
272}
273
274static inline bool bond_mode_uses_primary(int mode)
275{
276 return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB ||
277 mode == BOND_MODE_ALB;
278}
279
280static inline bool bond_uses_primary(struct bonding *bond)
281{
282 return bond_mode_uses_primary(BOND_MODE(bond));
283}
284
285static inline bool bond_slave_is_up(struct slave *slave)
286{
287 return netif_running(slave->dev) && netif_carrier_ok(slave->dev);
296} 288}
297 289
298static inline void bond_set_active_slave(struct slave *slave) 290static inline void bond_set_active_slave(struct slave *slave)
@@ -365,6 +357,12 @@ static inline bool bond_is_active_slave(struct slave *slave)
365 return !bond_slave_state(slave); 357 return !bond_slave_state(slave);
366} 358}
367 359
360static inline bool bond_slave_can_tx(struct slave *slave)
361{
362 return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP &&
363 bond_is_active_slave(slave);
364}
365
368#define BOND_PRI_RESELECT_ALWAYS 0 366#define BOND_PRI_RESELECT_ALWAYS 0
369#define BOND_PRI_RESELECT_BETTER 1 367#define BOND_PRI_RESELECT_BETTER 1
370#define BOND_PRI_RESELECT_FAILURE 2 368#define BOND_PRI_RESELECT_FAILURE 2
@@ -396,12 +394,16 @@ static inline int slave_do_arp_validate(struct bonding *bond,
396 return bond->params.arp_validate & (1 << bond_slave_state(slave)); 394 return bond->params.arp_validate & (1 << bond_slave_state(slave));
397} 395}
398 396
399static inline int slave_do_arp_validate_only(struct bonding *bond, 397static inline int slave_do_arp_validate_only(struct bonding *bond)
400 struct slave *slave)
401{ 398{
402 return bond->params.arp_validate & BOND_ARP_FILTER; 399 return bond->params.arp_validate & BOND_ARP_FILTER;
403} 400}
404 401
402static inline int bond_is_ip_target_ok(__be32 addr)
403{
404 return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr);
405}
406
405/* Get the oldest arp which we've received on this slave for bond's 407/* Get the oldest arp which we've received on this slave for bond's
406 * arp_targets. 408 * arp_targets.
407 */ 409 */
@@ -479,16 +481,14 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
479 return addr; 481 return addr;
480} 482}
481 483
482static inline bool slave_can_tx(struct slave *slave) 484struct bond_net {
483{ 485 struct net *net; /* Associated network namespace */
484 if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP && 486 struct list_head dev_list;
485 bond_is_active_slave(slave)) 487#ifdef CONFIG_PROC_FS
486 return true; 488 struct proc_dir_entry *proc_dir;
487 else 489#endif
488 return false; 490 struct class_attribute class_attr_bonding_masters;
489} 491};
490
491struct bond_net;
492 492
493int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); 493int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
494void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 494void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
@@ -500,7 +500,7 @@ int bond_sysfs_slave_add(struct slave *slave);
500void bond_sysfs_slave_del(struct slave *slave); 500void bond_sysfs_slave_del(struct slave *slave);
501int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); 501int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
502int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); 502int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
503int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count); 503u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
504void bond_select_active_slave(struct bonding *bond); 504void bond_select_active_slave(struct bonding *bond);
505void bond_change_active_slave(struct bonding *bond, struct slave *new_active); 505void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
506void bond_create_debugfs(void); 506void bond_create_debugfs(void);
@@ -516,15 +516,9 @@ void bond_netlink_fini(void);
516struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond); 516struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
517struct net_device *bond_option_active_slave_get(struct bonding *bond); 517struct net_device *bond_option_active_slave_get(struct bonding *bond);
518const char *bond_slave_link_status(s8 link); 518const char *bond_slave_link_status(s8 link);
519 519bool bond_verify_device_path(struct net_device *start_dev,
520struct bond_net { 520 struct net_device *end_dev,
521 struct net * net; /* Associated network namespace */ 521 struct bond_vlan_tag *tags);
522 struct list_head dev_list;
523#ifdef CONFIG_PROC_FS
524 struct proc_dir_entry * proc_dir;
525#endif
526 struct class_attribute class_attr_bonding_masters;
527};
528 522
529#ifdef CONFIG_PROC_FS 523#ifdef CONFIG_PROC_FS
530void bond_create_proc_entry(struct bonding *bond); 524void bond_create_proc_entry(struct bonding *bond);
@@ -576,6 +570,27 @@ static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
576 return NULL; 570 return NULL;
577} 571}
578 572
573/* Caller must hold rcu_read_lock() for read */
574static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
575{
576 struct list_head *iter;
577 struct slave *tmp;
578 struct netdev_hw_addr *ha;
579
580 bond_for_each_slave_rcu(bond, tmp, iter)
581 if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
582 return true;
583
584 if (netdev_uc_empty(bond->dev))
585 return false;
586
587 netdev_for_each_uc_addr(ha, bond->dev)
588 if (ether_addr_equal_64bits(mac, ha->addr))
589 return true;
590
591 return false;
592}
593
579/* Check if the ip is present in arp ip list, or first free slot if ip == 0 594/* Check if the ip is present in arp ip list, or first free slot if ip == 0
580 * Returns -1 if not found, index if found 595 * Returns -1 if not found, index if found
581 */ 596 */
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9e7d95dae2c7..41688229c570 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -65,7 +65,7 @@ config CAN_LEDS
65 65
66config CAN_AT91 66config CAN_AT91
67 tristate "Atmel AT91 onchip CAN controller" 67 tristate "Atmel AT91 onchip CAN controller"
68 depends on ARM 68 depends on ARCH_AT91 || COMPILE_TEST
69 ---help--- 69 ---help---
70 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263 70 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
71 and AT91SAM9X5 processors. 71 and AT91SAM9X5 processors.
@@ -77,12 +77,6 @@ config CAN_TI_HECC
77 Driver for TI HECC (High End CAN Controller) module found on many 77 Driver for TI HECC (High End CAN Controller) module found on many
78 TI devices. The device specifications are available from www.ti.com 78 TI devices. The device specifications are available from www.ti.com
79 79
80config CAN_MCP251X
81 tristate "Microchip MCP251x SPI CAN controllers"
82 depends on SPI && HAS_DMA
83 ---help---
84 Driver for the Microchip MCP251x SPI CAN controllers.
85
86config CAN_BFIN 80config CAN_BFIN
87 depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x 81 depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x
88 tristate "Analog Devices Blackfin on-chip CAN" 82 tristate "Analog Devices Blackfin on-chip CAN"
@@ -110,7 +104,7 @@ config CAN_FLEXCAN
110 104
111config PCH_CAN 105config PCH_CAN
112 tristate "Intel EG20T PCH CAN controller" 106 tristate "Intel EG20T PCH CAN controller"
113 depends on PCI 107 depends on PCI && (X86_32 || COMPILE_TEST)
114 ---help--- 108 ---help---
115 This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which 109 This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
116 is an IOH for x86 embedded processor (Intel Atom E6xx series). 110 is an IOH for x86 embedded processor (Intel Atom E6xx series).
@@ -125,6 +119,24 @@ config CAN_GRCAN
125 endian syntheses of the cores would need some modifications on 119 endian syntheses of the cores would need some modifications on
126 the hardware level to work. 120 the hardware level to work.
127 121
122config CAN_RCAR
123 tristate "Renesas R-Car CAN controller"
124 depends on ARM
125 ---help---
126 Say Y here if you want to use CAN controller found on Renesas R-Car
127 SoCs.
128
129 To compile this driver as a module, choose M here: the module will
130 be called rcar_can.
131
132config CAN_XILINXCAN
133 tristate "Xilinx CAN"
134 depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
135 depends on COMMON_CLK && HAS_IOMEM
136 ---help---
137 Xilinx CAN driver. This driver supports both soft AXI CAN IP and
138 Zynq CANPS IP.
139
128source "drivers/net/can/mscan/Kconfig" 140source "drivers/net/can/mscan/Kconfig"
129 141
130source "drivers/net/can/sja1000/Kconfig" 142source "drivers/net/can/sja1000/Kconfig"
@@ -133,6 +145,8 @@ source "drivers/net/can/c_can/Kconfig"
133 145
134source "drivers/net/can/cc770/Kconfig" 146source "drivers/net/can/cc770/Kconfig"
135 147
148source "drivers/net/can/spi/Kconfig"
149
136source "drivers/net/can/usb/Kconfig" 150source "drivers/net/can/usb/Kconfig"
137 151
138source "drivers/net/can/softing/Kconfig" 152source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index c7440392adbb..1697f22353a9 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,7 @@ can-dev-y := dev.o
10 10
11can-dev-$(CONFIG_CAN_LEDS) += led.o 11can-dev-$(CONFIG_CAN_LEDS) += led.o
12 12
13obj-y += spi/
13obj-y += usb/ 14obj-y += usb/
14obj-y += softing/ 15obj-y += softing/
15 16
@@ -19,11 +20,12 @@ obj-$(CONFIG_CAN_C_CAN) += c_can/
19obj-$(CONFIG_CAN_CC770) += cc770/ 20obj-$(CONFIG_CAN_CC770) += cc770/
20obj-$(CONFIG_CAN_AT91) += at91_can.o 21obj-$(CONFIG_CAN_AT91) += at91_can.o
21obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o 22obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
22obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
23obj-$(CONFIG_CAN_BFIN) += bfin_can.o 23obj-$(CONFIG_CAN_BFIN) += bfin_can.o
24obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o 24obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
25obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o 25obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
26obj-$(CONFIG_PCH_CAN) += pch_can.o 26obj-$(CONFIG_PCH_CAN) += pch_can.o
27obj-$(CONFIG_CAN_GRCAN) += grcan.o 27obj-$(CONFIG_CAN_GRCAN) += grcan.o
28obj-$(CONFIG_CAN_RCAR) += rcar_can.o
29obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o
28 30
29ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 31ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 95e04e2002da..8e78bb48f5a4 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -252,8 +252,7 @@ static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj
252 struct c_can_priv *priv = netdev_priv(dev); 252 struct c_can_priv *priv = netdev_priv(dev);
253 int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface); 253 int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
254 254
255 priv->write_reg(priv, reg + 1, cmd); 255 priv->write_reg32(priv, reg, (cmd << 16) | obj);
256 priv->write_reg(priv, reg, obj);
257 256
258 for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) { 257 for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
259 if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY)) 258 if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
@@ -328,8 +327,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
328 change_bit(idx, &priv->tx_dir); 327 change_bit(idx, &priv->tx_dir);
329 } 328 }
330 329
331 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), arb); 330 priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
332 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), arb >> 16);
333 331
334 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl); 332 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
335 333
@@ -391,8 +389,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
391 389
392 frame->can_dlc = get_can_dlc(ctrl & 0x0F); 390 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
393 391
394 arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)); 392 arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));
395 arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16;
396 393
397 if (arb & IF_ARB_MSGXTD) 394 if (arb & IF_ARB_MSGXTD)
398 frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG; 395 frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@ -424,12 +421,10 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
424 struct c_can_priv *priv = netdev_priv(dev); 421 struct c_can_priv *priv = netdev_priv(dev);
425 422
426 mask |= BIT(29); 423 mask |= BIT(29);
427 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask); 424 priv->write_reg32(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
428 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16);
429 425
430 id |= IF_ARB_MSGVAL; 426 id |= IF_ARB_MSGVAL;
431 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id); 427 priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), id);
432 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16);
433 428
434 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont); 429 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
435 c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP); 430 c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index c56f1b1c11ca..99ad1aa576b0 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -78,6 +78,7 @@ enum reg {
78 C_CAN_INTPND2_REG, 78 C_CAN_INTPND2_REG,
79 C_CAN_MSGVAL1_REG, 79 C_CAN_MSGVAL1_REG,
80 C_CAN_MSGVAL2_REG, 80 C_CAN_MSGVAL2_REG,
81 C_CAN_FUNCTION_REG,
81}; 82};
82 83
83static const u16 reg_map_c_can[] = { 84static const u16 reg_map_c_can[] = {
@@ -129,6 +130,7 @@ static const u16 reg_map_d_can[] = {
129 [C_CAN_BRPEXT_REG] = 0x0E, 130 [C_CAN_BRPEXT_REG] = 0x0E,
130 [C_CAN_INT_REG] = 0x10, 131 [C_CAN_INT_REG] = 0x10,
131 [C_CAN_TEST_REG] = 0x14, 132 [C_CAN_TEST_REG] = 0x14,
133 [C_CAN_FUNCTION_REG] = 0x18,
132 [C_CAN_TXRQST1_REG] = 0x88, 134 [C_CAN_TXRQST1_REG] = 0x88,
133 [C_CAN_TXRQST2_REG] = 0x8A, 135 [C_CAN_TXRQST2_REG] = 0x8A,
134 [C_CAN_NEWDAT1_REG] = 0x9C, 136 [C_CAN_NEWDAT1_REG] = 0x9C,
@@ -176,8 +178,10 @@ struct c_can_priv {
176 atomic_t tx_active; 178 atomic_t tx_active;
177 unsigned long tx_dir; 179 unsigned long tx_dir;
178 int last_status; 180 int last_status;
179 u16 (*read_reg) (struct c_can_priv *priv, enum reg index); 181 u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
180 void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val); 182 void (*write_reg) (const struct c_can_priv *priv, enum reg index, u16 val);
183 u32 (*read_reg32) (const struct c_can_priv *priv, enum reg index);
184 void (*write_reg32) (const struct c_can_priv *priv, enum reg index, u32 val);
181 void __iomem *base; 185 void __iomem *base;
182 const u16 *regs; 186 const u16 *regs;
183 void *priv; /* for board-specific data */ 187 void *priv; /* for board-specific data */
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index fe5f6303b584..5d11e0e4225b 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -19,9 +19,13 @@
19 19
20#include "c_can.h" 20#include "c_can.h"
21 21
22#define PCI_DEVICE_ID_PCH_CAN 0x8818
23#define PCH_PCI_SOFT_RESET 0x01fc
24
22enum c_can_pci_reg_align { 25enum c_can_pci_reg_align {
23 C_CAN_REG_ALIGN_16, 26 C_CAN_REG_ALIGN_16,
24 C_CAN_REG_ALIGN_32, 27 C_CAN_REG_ALIGN_32,
28 C_CAN_REG_32,
25}; 29};
26 30
27struct c_can_pci_data { 31struct c_can_pci_data {
@@ -31,6 +35,10 @@ struct c_can_pci_data {
31 enum c_can_pci_reg_align reg_align; 35 enum c_can_pci_reg_align reg_align;
32 /* Set the frequency */ 36 /* Set the frequency */
33 unsigned int freq; 37 unsigned int freq;
38 /* PCI bar number */
39 int bar;
40 /* Callback for reset */
41 void (*init)(const struct c_can_priv *priv, bool enable);
34}; 42};
35 43
36/* 44/*
@@ -39,30 +47,70 @@ struct c_can_pci_data {
39 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. 47 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
40 * Handle the same by providing a common read/write interface. 48 * Handle the same by providing a common read/write interface.
41 */ 49 */
42static u16 c_can_pci_read_reg_aligned_to_16bit(struct c_can_priv *priv, 50static u16 c_can_pci_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
43 enum reg index) 51 enum reg index)
44{ 52{
45 return readw(priv->base + priv->regs[index]); 53 return readw(priv->base + priv->regs[index]);
46} 54}
47 55
48static void c_can_pci_write_reg_aligned_to_16bit(struct c_can_priv *priv, 56static void c_can_pci_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
49 enum reg index, u16 val) 57 enum reg index, u16 val)
50{ 58{
51 writew(val, priv->base + priv->regs[index]); 59 writew(val, priv->base + priv->regs[index]);
52} 60}
53 61
54static u16 c_can_pci_read_reg_aligned_to_32bit(struct c_can_priv *priv, 62static u16 c_can_pci_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
55 enum reg index) 63 enum reg index)
56{ 64{
57 return readw(priv->base + 2 * priv->regs[index]); 65 return readw(priv->base + 2 * priv->regs[index]);
58} 66}
59 67
60static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv, 68static void c_can_pci_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
61 enum reg index, u16 val) 69 enum reg index, u16 val)
62{ 70{
63 writew(val, priv->base + 2 * priv->regs[index]); 71 writew(val, priv->base + 2 * priv->regs[index]);
64} 72}
65 73
74static u16 c_can_pci_read_reg_32bit(const struct c_can_priv *priv,
75 enum reg index)
76{
77 return (u16)ioread32(priv->base + 2 * priv->regs[index]);
78}
79
80static void c_can_pci_write_reg_32bit(const struct c_can_priv *priv,
81 enum reg index, u16 val)
82{
83 iowrite32((u32)val, priv->base + 2 * priv->regs[index]);
84}
85
86static u32 c_can_pci_read_reg32(const struct c_can_priv *priv, enum reg index)
87{
88 u32 val;
89
90 val = priv->read_reg(priv, index);
91 val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
92
93 return val;
94}
95
96static void c_can_pci_write_reg32(const struct c_can_priv *priv, enum reg index,
97 u32 val)
98{
99 priv->write_reg(priv, index + 1, val >> 16);
100 priv->write_reg(priv, index, val);
101}
102
103static void c_can_pci_reset_pch(const struct c_can_priv *priv, bool enable)
104{
105 if (enable) {
106 u32 __iomem *addr = priv->base + PCH_PCI_SOFT_RESET;
107
108 /* write to sw reset register */
109 iowrite32(1, addr);
110 iowrite32(0, addr);
111 }
112}
113
66static int c_can_pci_probe(struct pci_dev *pdev, 114static int c_can_pci_probe(struct pci_dev *pdev,
67 const struct pci_device_id *ent) 115 const struct pci_device_id *ent)
68{ 116{
@@ -90,7 +138,8 @@ static int c_can_pci_probe(struct pci_dev *pdev,
90 pci_set_master(pdev); 138 pci_set_master(pdev);
91 } 139 }
92 140
93 addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); 141 addr = pci_iomap(pdev, c_can_pci_data->bar,
142 pci_resource_len(pdev, c_can_pci_data->bar));
94 if (!addr) { 143 if (!addr) {
95 dev_err(&pdev->dev, 144 dev_err(&pdev->dev,
96 "device has no PCI memory resources, " 145 "device has no PCI memory resources, "
@@ -147,10 +196,18 @@ static int c_can_pci_probe(struct pci_dev *pdev,
147 priv->read_reg = c_can_pci_read_reg_aligned_to_16bit; 196 priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
148 priv->write_reg = c_can_pci_write_reg_aligned_to_16bit; 197 priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
149 break; 198 break;
199 case C_CAN_REG_32:
200 priv->read_reg = c_can_pci_read_reg_32bit;
201 priv->write_reg = c_can_pci_write_reg_32bit;
202 break;
150 default: 203 default:
151 ret = -EINVAL; 204 ret = -EINVAL;
152 goto out_free_c_can; 205 goto out_free_c_can;
153 } 206 }
207 priv->read_reg32 = c_can_pci_read_reg32;
208 priv->write_reg32 = c_can_pci_write_reg32;
209
210 priv->raminit = c_can_pci_data->init;
154 211
155 ret = register_c_can_dev(dev); 212 ret = register_c_can_dev(dev);
156 if (ret) { 213 if (ret) {
@@ -198,6 +255,15 @@ static struct c_can_pci_data c_can_sta2x11= {
198 .type = BOSCH_C_CAN, 255 .type = BOSCH_C_CAN,
199 .reg_align = C_CAN_REG_ALIGN_32, 256 .reg_align = C_CAN_REG_ALIGN_32,
200 .freq = 52000000, /* 52 Mhz */ 257 .freq = 52000000, /* 52 Mhz */
258 .bar = 0,
259};
260
261static struct c_can_pci_data c_can_pch = {
262 .type = BOSCH_C_CAN,
263 .reg_align = C_CAN_REG_32,
264 .freq = 50000000, /* 50 MHz */
265 .init = c_can_pci_reset_pch,
266 .bar = 1,
201}; 267};
202 268
203#define C_CAN_ID(_vend, _dev, _driverdata) { \ 269#define C_CAN_ID(_vend, _dev, _driverdata) { \
@@ -207,6 +273,8 @@ static struct c_can_pci_data c_can_sta2x11= {
207static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = { 273static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = {
208 C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN, 274 C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN,
209 c_can_sta2x11), 275 c_can_sta2x11),
276 C_CAN_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_CAN,
277 c_can_pch),
210 {}, 278 {},
211}; 279};
212static struct pci_driver c_can_pci_driver = { 280static struct pci_driver c_can_pci_driver = {
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 1df0b322d1e4..824108cd9fd5 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -40,6 +40,7 @@
40#define CAN_RAMINIT_START_MASK(i) (0x001 << (i)) 40#define CAN_RAMINIT_START_MASK(i) (0x001 << (i))
41#define CAN_RAMINIT_DONE_MASK(i) (0x100 << (i)) 41#define CAN_RAMINIT_DONE_MASK(i) (0x100 << (i))
42#define CAN_RAMINIT_ALL_MASK(i) (0x101 << (i)) 42#define CAN_RAMINIT_ALL_MASK(i) (0x101 << (i))
43#define DCAN_RAM_INIT_BIT (1 << 3)
43static DEFINE_SPINLOCK(raminit_lock); 44static DEFINE_SPINLOCK(raminit_lock);
44/* 45/*
45 * 16-bit c_can registers can be arranged differently in the memory 46 * 16-bit c_can registers can be arranged differently in the memory
@@ -47,31 +48,31 @@ static DEFINE_SPINLOCK(raminit_lock);
47 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. 48 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
48 * Handle the same by providing a common read/write interface. 49 * Handle the same by providing a common read/write interface.
49 */ 50 */
50static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv, 51static u16 c_can_plat_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
51 enum reg index) 52 enum reg index)
52{ 53{
53 return readw(priv->base + priv->regs[index]); 54 return readw(priv->base + priv->regs[index]);
54} 55}
55 56
56static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv, 57static void c_can_plat_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
57 enum reg index, u16 val) 58 enum reg index, u16 val)
58{ 59{
59 writew(val, priv->base + priv->regs[index]); 60 writew(val, priv->base + priv->regs[index]);
60} 61}
61 62
62static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv, 63static u16 c_can_plat_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
63 enum reg index) 64 enum reg index)
64{ 65{
65 return readw(priv->base + 2 * priv->regs[index]); 66 return readw(priv->base + 2 * priv->regs[index]);
66} 67}
67 68
68static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv, 69static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
69 enum reg index, u16 val) 70 enum reg index, u16 val)
70{ 71{
71 writew(val, priv->base + 2 * priv->regs[index]); 72 writew(val, priv->base + 2 * priv->regs[index]);
72} 73}
73 74
74static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask, 75static void c_can_hw_raminit_wait_ti(const struct c_can_priv *priv, u32 mask,
75 u32 val) 76 u32 val)
76{ 77{
77 /* We look only at the bits of our instance. */ 78 /* We look only at the bits of our instance. */
@@ -80,7 +81,7 @@ static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
80 udelay(1); 81 udelay(1);
81} 82}
82 83
83static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable) 84static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable)
84{ 85{
85 u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance); 86 u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance);
86 u32 ctrl; 87 u32 ctrl;
@@ -96,18 +97,68 @@ static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
96 ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); 97 ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
97 writel(ctrl, priv->raminit_ctrlreg); 98 writel(ctrl, priv->raminit_ctrlreg);
98 ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance); 99 ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
99 c_can_hw_raminit_wait(priv, ctrl, mask); 100 c_can_hw_raminit_wait_ti(priv, ctrl, mask);
100 101
101 if (enable) { 102 if (enable) {
102 /* Set start bit and wait for the done bit. */ 103 /* Set start bit and wait for the done bit. */
103 ctrl |= CAN_RAMINIT_START_MASK(priv->instance); 104 ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
104 writel(ctrl, priv->raminit_ctrlreg); 105 writel(ctrl, priv->raminit_ctrlreg);
105 ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); 106 ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
106 c_can_hw_raminit_wait(priv, ctrl, mask); 107 c_can_hw_raminit_wait_ti(priv, ctrl, mask);
107 } 108 }
108 spin_unlock(&raminit_lock); 109 spin_unlock(&raminit_lock);
109} 110}
110 111
112static u32 c_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
113{
114 u32 val;
115
116 val = priv->read_reg(priv, index);
117 val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
118
119 return val;
120}
121
122static void c_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
123 u32 val)
124{
125 priv->write_reg(priv, index + 1, val >> 16);
126 priv->write_reg(priv, index, val);
127}
128
129static u32 d_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
130{
131 return readl(priv->base + priv->regs[index]);
132}
133
134static void d_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
135 u32 val)
136{
137 writel(val, priv->base + priv->regs[index]);
138}
139
140static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask)
141{
142 while (priv->read_reg32(priv, C_CAN_FUNCTION_REG) & mask)
143 udelay(1);
144}
145
146static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
147{
148 u32 ctrl;
149
150 ctrl = priv->read_reg32(priv, C_CAN_FUNCTION_REG);
151 ctrl &= ~DCAN_RAM_INIT_BIT;
152 priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl);
153 c_can_hw_raminit_wait(priv, ctrl);
154
155 if (enable) {
156 ctrl |= DCAN_RAM_INIT_BIT;
157 priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl);
158 c_can_hw_raminit_wait(priv, ctrl);
159 }
160}
161
111static struct platform_device_id c_can_id_table[] = { 162static struct platform_device_id c_can_id_table[] = {
112 [BOSCH_C_CAN_PLATFORM] = { 163 [BOSCH_C_CAN_PLATFORM] = {
113 .name = KBUILD_MODNAME, 164 .name = KBUILD_MODNAME,
@@ -201,11 +252,15 @@ static int c_can_plat_probe(struct platform_device *pdev)
201 case IORESOURCE_MEM_32BIT: 252 case IORESOURCE_MEM_32BIT:
202 priv->read_reg = c_can_plat_read_reg_aligned_to_32bit; 253 priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
203 priv->write_reg = c_can_plat_write_reg_aligned_to_32bit; 254 priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
255 priv->read_reg32 = c_can_plat_read_reg32;
256 priv->write_reg32 = c_can_plat_write_reg32;
204 break; 257 break;
205 case IORESOURCE_MEM_16BIT: 258 case IORESOURCE_MEM_16BIT:
206 default: 259 default:
207 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; 260 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
208 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; 261 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
262 priv->read_reg32 = c_can_plat_read_reg32;
263 priv->write_reg32 = c_can_plat_write_reg32;
209 break; 264 break;
210 } 265 }
211 break; 266 break;
@@ -214,6 +269,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
214 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; 269 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
215 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; 270 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
216 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; 271 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
272 priv->read_reg32 = d_can_plat_read_reg32;
273 priv->write_reg32 = d_can_plat_write_reg32;
217 274
218 if (pdev->dev.of_node) 275 if (pdev->dev.of_node)
219 priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can"); 276 priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
@@ -221,11 +278,20 @@ static int c_can_plat_probe(struct platform_device *pdev)
221 priv->instance = pdev->id; 278 priv->instance = pdev->id;
222 279
223 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 280 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
281 /* Not all D_CAN modules have a separate register for the D_CAN
282 * RAM initialization. Use default RAM init bit in D_CAN module
283 * if not specified in DT.
284 */
285 if (!res) {
286 priv->raminit = c_can_hw_raminit;
287 break;
288 }
289
224 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res); 290 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
225 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0) 291 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
226 dev_info(&pdev->dev, "control memory is not used for raminit\n"); 292 dev_info(&pdev->dev, "control memory is not used for raminit\n");
227 else 293 else
228 priv->raminit = c_can_hw_raminit; 294 priv->raminit = c_can_hw_raminit_ti;
229 break; 295 break;
230 default: 296 default:
231 ret = -EINVAL; 297 ret = -EINVAL;
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index f19be5269e7b..81c711719490 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
1config CAN_MSCAN 1config CAN_MSCAN
2 depends on PPC || M68K 2 depends on PPC
3 tristate "Support for Freescale MSCAN based chips" 3 tristate "Support for Freescale MSCAN based chips"
4 ---help--- 4 ---help---
5 The Motorola Scalable Controller Area Network (MSCAN) definition 5 The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
new file mode 100644
index 000000000000..5268d216ecfa
--- /dev/null
+++ b/drivers/net/can/rcar_can.c
@@ -0,0 +1,876 @@
1/* Renesas R-Car CAN device driver
2 *
3 * Copyright (C) 2013 Cogent Embedded, Inc. <source@cogentembedded.com>
4 * Copyright (C) 2013 Renesas Solutions Corp.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/types.h>
15#include <linux/interrupt.h>
16#include <linux/errno.h>
17#include <linux/netdevice.h>
18#include <linux/platform_device.h>
19#include <linux/can/led.h>
20#include <linux/can/dev.h>
21#include <linux/clk.h>
22#include <linux/can/platform/rcar_can.h>
23
24#define RCAR_CAN_DRV_NAME "rcar_can"
25
26/* Mailbox configuration:
27 * mailbox 60 - 63 - Rx FIFO mailboxes
28 * mailbox 56 - 59 - Tx FIFO mailboxes
29 * non-FIFO mailboxes are not used
30 */
31#define RCAR_CAN_N_MBX 64 /* Number of mailboxes in non-FIFO mode */
32#define RCAR_CAN_RX_FIFO_MBX 60 /* Mailbox - window to Rx FIFO */
33#define RCAR_CAN_TX_FIFO_MBX 56 /* Mailbox - window to Tx FIFO */
34#define RCAR_CAN_FIFO_DEPTH 4
35
36/* Mailbox registers structure */
37struct rcar_can_mbox_regs {
38 u32 id; /* IDE and RTR bits, SID and EID */
39 u8 stub; /* Not used */
40 u8 dlc; /* Data Length Code - bits [0..3] */
41 u8 data[8]; /* Data Bytes */
42 u8 tsh; /* Time Stamp Higher Byte */
43 u8 tsl; /* Time Stamp Lower Byte */
44};
45
46struct rcar_can_regs {
47 struct rcar_can_mbox_regs mb[RCAR_CAN_N_MBX]; /* Mailbox registers */
48 u32 mkr_2_9[8]; /* Mask Registers 2-9 */
49 u32 fidcr[2]; /* FIFO Received ID Compare Register */
50 u32 mkivlr1; /* Mask Invalid Register 1 */
51 u32 mier1; /* Mailbox Interrupt Enable Register 1 */
52 u32 mkr_0_1[2]; /* Mask Registers 0-1 */
53 u32 mkivlr0; /* Mask Invalid Register 0*/
54 u32 mier0; /* Mailbox Interrupt Enable Register 0 */
55 u8 pad_440[0x3c0];
56 u8 mctl[64]; /* Message Control Registers */
57 u16 ctlr; /* Control Register */
58 u16 str; /* Status register */
59 u8 bcr[3]; /* Bit Configuration Register */
60 u8 clkr; /* Clock Select Register */
61 u8 rfcr; /* Receive FIFO Control Register */
62 u8 rfpcr; /* Receive FIFO Pointer Control Register */
63 u8 tfcr; /* Transmit FIFO Control Register */
64 u8 tfpcr; /* Transmit FIFO Pointer Control Register */
65 u8 eier; /* Error Interrupt Enable Register */
66 u8 eifr; /* Error Interrupt Factor Judge Register */
67 u8 recr; /* Receive Error Count Register */
68 u8 tecr; /* Transmit Error Count Register */
69 u8 ecsr; /* Error Code Store Register */
70 u8 cssr; /* Channel Search Support Register */
71 u8 mssr; /* Mailbox Search Status Register */
72 u8 msmr; /* Mailbox Search Mode Register */
73 u16 tsr; /* Time Stamp Register */
74 u8 afsr; /* Acceptance Filter Support Register */
75 u8 pad_857;
76 u8 tcr; /* Test Control Register */
77 u8 pad_859[7];
78 u8 ier; /* Interrupt Enable Register */
79 u8 isr; /* Interrupt Status Register */
80 u8 pad_862;
81 u8 mbsmr; /* Mailbox Search Mask Register */
82};
83
84struct rcar_can_priv {
85 struct can_priv can; /* Must be the first member! */
86 struct net_device *ndev;
87 struct napi_struct napi;
88 struct rcar_can_regs __iomem *regs;
89 struct clk *clk;
90 u8 tx_dlc[RCAR_CAN_FIFO_DEPTH];
91 u32 tx_head;
92 u32 tx_tail;
93 u8 clock_select;
94 u8 ier;
95};
96
97static const struct can_bittiming_const rcar_can_bittiming_const = {
98 .name = RCAR_CAN_DRV_NAME,
99 .tseg1_min = 4,
100 .tseg1_max = 16,
101 .tseg2_min = 2,
102 .tseg2_max = 8,
103 .sjw_max = 4,
104 .brp_min = 1,
105 .brp_max = 1024,
106 .brp_inc = 1,
107};
108
109/* Control Register bits */
110#define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */
111#define RCAR_CAN_CTLR_BOM_ENT (1 << 11) /* Entry to halt mode */
112 /* at bus-off entry */
113#define RCAR_CAN_CTLR_SLPM (1 << 10)
114#define RCAR_CAN_CTLR_CANM (3 << 8) /* Operating Mode Select Bit */
115#define RCAR_CAN_CTLR_CANM_HALT (1 << 9)
116#define RCAR_CAN_CTLR_CANM_RESET (1 << 8)
117#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8)
118#define RCAR_CAN_CTLR_MLM (1 << 3) /* Message Lost Mode Select */
119#define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */
120#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */
121#define RCAR_CAN_CTLR_MBM (1 << 0) /* Mailbox Mode select */
122
123/* Status Register bits */
124#define RCAR_CAN_STR_RSTST (1 << 8) /* Reset Status Bit */
125
126/* FIFO Received ID Compare Registers 0 and 1 bits */
127#define RCAR_CAN_FIDCR_IDE (1 << 31) /* ID Extension Bit */
128#define RCAR_CAN_FIDCR_RTR (1 << 30) /* Remote Transmission Request Bit */
129
130/* Receive FIFO Control Register bits */
131#define RCAR_CAN_RFCR_RFEST (1 << 7) /* Receive FIFO Empty Status Flag */
132#define RCAR_CAN_RFCR_RFE (1 << 0) /* Receive FIFO Enable */
133
134/* Transmit FIFO Control Register bits */
135#define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Message */
136 /* Number Status Bits */
137#define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Transmit FIFO Unsent */
138 /* Message Number Status Bits */
139#define RCAR_CAN_TFCR_TFE (1 << 0) /* Transmit FIFO Enable */
140
141#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
142 /* for Rx mailboxes 0-31 */
143#define RCAR_CAN_N_RX_MKREGS2 8
144
145/* Bit Configuration Register settings */
146#define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20)
147#define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8)
148#define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4)
149#define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07)
150
151/* Mailbox and Mask Registers bits */
152#define RCAR_CAN_IDE (1 << 31)
153#define RCAR_CAN_RTR (1 << 30)
154#define RCAR_CAN_SID_SHIFT 18
155
156/* Mailbox Interrupt Enable Register 1 bits */
157#define RCAR_CAN_MIER1_RXFIE (1 << 28) /* Receive FIFO Interrupt Enable */
158#define RCAR_CAN_MIER1_TXFIE (1 << 24) /* Transmit FIFO Interrupt Enable */
159
160/* Interrupt Enable Register bits */
161#define RCAR_CAN_IER_ERSIE (1 << 5) /* Error (ERS) Interrupt Enable Bit */
162#define RCAR_CAN_IER_RXFIE (1 << 4) /* Reception FIFO Interrupt */
163 /* Enable Bit */
164#define RCAR_CAN_IER_TXFIE (1 << 3) /* Transmission FIFO Interrupt */
165 /* Enable Bit */
166/* Interrupt Status Register bits */
167#define RCAR_CAN_ISR_ERSF (1 << 5) /* Error (ERS) Interrupt Status Bit */
168#define RCAR_CAN_ISR_RXFF (1 << 4) /* Reception FIFO Interrupt */
169 /* Status Bit */
170#define RCAR_CAN_ISR_TXFF (1 << 3) /* Transmission FIFO Interrupt */
171 /* Status Bit */
172
173/* Error Interrupt Enable Register bits */
174#define RCAR_CAN_EIER_BLIE (1 << 7) /* Bus Lock Interrupt Enable */
175#define RCAR_CAN_EIER_OLIE (1 << 6) /* Overload Frame Transmit */
176 /* Interrupt Enable */
177#define RCAR_CAN_EIER_ORIE (1 << 5) /* Receive Overrun Interrupt Enable */
178#define RCAR_CAN_EIER_BORIE (1 << 4) /* Bus-Off Recovery Interrupt Enable */
179#define RCAR_CAN_EIER_BOEIE (1 << 3) /* Bus-Off Entry Interrupt Enable */
180#define RCAR_CAN_EIER_EPIE (1 << 2) /* Error Passive Interrupt Enable */
181#define RCAR_CAN_EIER_EWIE (1 << 1) /* Error Warning Interrupt Enable */
182#define RCAR_CAN_EIER_BEIE (1 << 0) /* Bus Error Interrupt Enable */
183
184/* Error Interrupt Factor Judge Register bits */
185#define RCAR_CAN_EIFR_BLIF (1 << 7) /* Bus Lock Detect Flag */
186#define RCAR_CAN_EIFR_OLIF (1 << 6) /* Overload Frame Transmission */
187 /* Detect Flag */
188#define RCAR_CAN_EIFR_ORIF (1 << 5) /* Receive Overrun Detect Flag */
189#define RCAR_CAN_EIFR_BORIF (1 << 4) /* Bus-Off Recovery Detect Flag */
190#define RCAR_CAN_EIFR_BOEIF (1 << 3) /* Bus-Off Entry Detect Flag */
191#define RCAR_CAN_EIFR_EPIF (1 << 2) /* Error Passive Detect Flag */
192#define RCAR_CAN_EIFR_EWIF (1 << 1) /* Error Warning Detect Flag */
193#define RCAR_CAN_EIFR_BEIF (1 << 0) /* Bus Error Detect Flag */
194
195/* Error Code Store Register bits */
196#define RCAR_CAN_ECSR_EDPM (1 << 7) /* Error Display Mode Select Bit */
197#define RCAR_CAN_ECSR_ADEF (1 << 6) /* ACK Delimiter Error Flag */
198#define RCAR_CAN_ECSR_BE0F (1 << 5) /* Bit Error (dominant) Flag */
199#define RCAR_CAN_ECSR_BE1F (1 << 4) /* Bit Error (recessive) Flag */
200#define RCAR_CAN_ECSR_CEF (1 << 3) /* CRC Error Flag */
201#define RCAR_CAN_ECSR_AEF (1 << 2) /* ACK Error Flag */
202#define RCAR_CAN_ECSR_FEF (1 << 1) /* Form Error Flag */
203#define RCAR_CAN_ECSR_SEF (1 << 0) /* Stuff Error Flag */
204
205#define RCAR_CAN_NAPI_WEIGHT 4
206#define MAX_STR_READS 0x100
207
208static void tx_failure_cleanup(struct net_device *ndev)
209{
210 int i;
211
212 for (i = 0; i < RCAR_CAN_FIFO_DEPTH; i++)
213 can_free_echo_skb(ndev, i);
214}
215
216static void rcar_can_error(struct net_device *ndev)
217{
218 struct rcar_can_priv *priv = netdev_priv(ndev);
219 struct net_device_stats *stats = &ndev->stats;
220 struct can_frame *cf;
221 struct sk_buff *skb;
222 u8 eifr, txerr = 0, rxerr = 0;
223
224 /* Propagate the error condition to the CAN stack */
225 skb = alloc_can_err_skb(ndev, &cf);
226
227 eifr = readb(&priv->regs->eifr);
228 if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) {
229 txerr = readb(&priv->regs->tecr);
230 rxerr = readb(&priv->regs->recr);
231 if (skb) {
232 cf->can_id |= CAN_ERR_CRTL;
233 cf->data[6] = txerr;
234 cf->data[7] = rxerr;
235 }
236 }
237 if (eifr & RCAR_CAN_EIFR_BEIF) {
238 int rx_errors = 0, tx_errors = 0;
239 u8 ecsr;
240
241 netdev_dbg(priv->ndev, "Bus error interrupt:\n");
242 if (skb) {
243 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
244 cf->data[2] = CAN_ERR_PROT_UNSPEC;
245 }
246 ecsr = readb(&priv->regs->ecsr);
247 if (ecsr & RCAR_CAN_ECSR_ADEF) {
248 netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
249 tx_errors++;
250 writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
251 if (skb)
252 cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL;
253 }
254 if (ecsr & RCAR_CAN_ECSR_BE0F) {
255 netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
256 tx_errors++;
257 writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
258 if (skb)
259 cf->data[2] |= CAN_ERR_PROT_BIT0;
260 }
261 if (ecsr & RCAR_CAN_ECSR_BE1F) {
262 netdev_dbg(priv->ndev, "Bit Error (recessive)\n");
263 tx_errors++;
264 writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
265 if (skb)
266 cf->data[2] |= CAN_ERR_PROT_BIT1;
267 }
268 if (ecsr & RCAR_CAN_ECSR_CEF) {
269 netdev_dbg(priv->ndev, "CRC Error\n");
270 rx_errors++;
271 writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
272 if (skb)
273 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
274 }
275 if (ecsr & RCAR_CAN_ECSR_AEF) {
276 netdev_dbg(priv->ndev, "ACK Error\n");
277 tx_errors++;
278 writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
279 if (skb) {
280 cf->can_id |= CAN_ERR_ACK;
281 cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
282 }
283 }
284 if (ecsr & RCAR_CAN_ECSR_FEF) {
285 netdev_dbg(priv->ndev, "Form Error\n");
286 rx_errors++;
287 writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
288 if (skb)
289 cf->data[2] |= CAN_ERR_PROT_FORM;
290 }
291 if (ecsr & RCAR_CAN_ECSR_SEF) {
292 netdev_dbg(priv->ndev, "Stuff Error\n");
293 rx_errors++;
294 writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
295 if (skb)
296 cf->data[2] |= CAN_ERR_PROT_STUFF;
297 }
298
299 priv->can.can_stats.bus_error++;
300 ndev->stats.rx_errors += rx_errors;
301 ndev->stats.tx_errors += tx_errors;
302 writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
303 }
304 if (eifr & RCAR_CAN_EIFR_EWIF) {
305 netdev_dbg(priv->ndev, "Error warning interrupt\n");
306 priv->can.state = CAN_STATE_ERROR_WARNING;
307 priv->can.can_stats.error_warning++;
308 /* Clear interrupt condition */
309 writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
310 if (skb)
311 cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
312 CAN_ERR_CRTL_RX_WARNING;
313 }
314 if (eifr & RCAR_CAN_EIFR_EPIF) {
315 netdev_dbg(priv->ndev, "Error passive interrupt\n");
316 priv->can.state = CAN_STATE_ERROR_PASSIVE;
317 priv->can.can_stats.error_passive++;
318 /* Clear interrupt condition */
319 writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
320 if (skb)
321 cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
322 CAN_ERR_CRTL_RX_PASSIVE;
323 }
324 if (eifr & RCAR_CAN_EIFR_BOEIF) {
325 netdev_dbg(priv->ndev, "Bus-off entry interrupt\n");
326 tx_failure_cleanup(ndev);
327 priv->ier = RCAR_CAN_IER_ERSIE;
328 writeb(priv->ier, &priv->regs->ier);
329 priv->can.state = CAN_STATE_BUS_OFF;
330 /* Clear interrupt condition */
331 writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
332 can_bus_off(ndev);
333 if (skb)
334 cf->can_id |= CAN_ERR_BUSOFF;
335 }
336 if (eifr & RCAR_CAN_EIFR_ORIF) {
337 netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
338 ndev->stats.rx_over_errors++;
339 ndev->stats.rx_errors++;
340 writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
341 if (skb) {
342 cf->can_id |= CAN_ERR_CRTL;
343 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
344 }
345 }
346 if (eifr & RCAR_CAN_EIFR_OLIF) {
347 netdev_dbg(priv->ndev,
348 "Overload Frame Transmission error interrupt\n");
349 ndev->stats.rx_over_errors++;
350 ndev->stats.rx_errors++;
351 writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
352 if (skb) {
353 cf->can_id |= CAN_ERR_PROT;
354 cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
355 }
356 }
357
358 if (skb) {
359 stats->rx_packets++;
360 stats->rx_bytes += cf->can_dlc;
361 netif_rx(skb);
362 }
363}
364
365static void rcar_can_tx_done(struct net_device *ndev)
366{
367 struct rcar_can_priv *priv = netdev_priv(ndev);
368 struct net_device_stats *stats = &ndev->stats;
369 u8 isr;
370
371 while (1) {
372 u8 unsent = readb(&priv->regs->tfcr);
373
374 unsent = (unsent & RCAR_CAN_TFCR_TFUST) >>
375 RCAR_CAN_TFCR_TFUST_SHIFT;
376 if (priv->tx_head - priv->tx_tail <= unsent)
377 break;
378 stats->tx_packets++;
379 stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
380 RCAR_CAN_FIFO_DEPTH];
381 priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
382 can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH);
383 priv->tx_tail++;
384 netif_wake_queue(ndev);
385 }
386 /* Clear interrupt */
387 isr = readb(&priv->regs->isr);
388 writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr);
389 can_led_event(ndev, CAN_LED_EVENT_TX);
390}
391
392static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
393{
394 struct net_device *ndev = dev_id;
395 struct rcar_can_priv *priv = netdev_priv(ndev);
396 u8 isr;
397
398 isr = readb(&priv->regs->isr);
399 if (!(isr & priv->ier))
400 return IRQ_NONE;
401
402 if (isr & RCAR_CAN_ISR_ERSF)
403 rcar_can_error(ndev);
404
405 if (isr & RCAR_CAN_ISR_TXFF)
406 rcar_can_tx_done(ndev);
407
408 if (isr & RCAR_CAN_ISR_RXFF) {
409 if (napi_schedule_prep(&priv->napi)) {
410 /* Disable Rx FIFO interrupts */
411 priv->ier &= ~RCAR_CAN_IER_RXFIE;
412 writeb(priv->ier, &priv->regs->ier);
413 __napi_schedule(&priv->napi);
414 }
415 }
416
417 return IRQ_HANDLED;
418}
419
420static void rcar_can_set_bittiming(struct net_device *dev)
421{
422 struct rcar_can_priv *priv = netdev_priv(dev);
423 struct can_bittiming *bt = &priv->can.bittiming;
424 u32 bcr;
425
426 bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) |
427 RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) |
428 RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1);
429 /* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access.
430 * All the registers are big-endian but they get byte-swapped on 32-bit
431 * read/write (but not on 8-bit, contrary to the manuals)...
432 */
433 writel((bcr << 8) | priv->clock_select, &priv->regs->bcr);
434}
435
436static void rcar_can_start(struct net_device *ndev)
437{
438 struct rcar_can_priv *priv = netdev_priv(ndev);
439 u16 ctlr;
440 int i;
441
442 /* Set controller to known mode:
443 * - FIFO mailbox mode
444 * - accept all messages
445 * - overrun mode
446 * CAN is in sleep mode after MCU hardware or software reset.
447 */
448 ctlr = readw(&priv->regs->ctlr);
449 ctlr &= ~RCAR_CAN_CTLR_SLPM;
450 writew(ctlr, &priv->regs->ctlr);
451 /* Go to reset mode */
452 ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
453 writew(ctlr, &priv->regs->ctlr);
454 for (i = 0; i < MAX_STR_READS; i++) {
455 if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
456 break;
457 }
458 rcar_can_set_bittiming(ndev);
459 ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */
460 ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */
461 /* at bus-off */
462 ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */
463 ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */
464 writew(ctlr, &priv->regs->ctlr);
465
466 /* Accept all SID and EID */
467 writel(0, &priv->regs->mkr_2_9[6]);
468 writel(0, &priv->regs->mkr_2_9[7]);
469 /* In FIFO mailbox mode, write "0" to bits 24 to 31 */
470 writel(0, &priv->regs->mkivlr1);
471 /* Accept all frames */
472 writel(0, &priv->regs->fidcr[0]);
473 writel(RCAR_CAN_FIDCR_IDE | RCAR_CAN_FIDCR_RTR, &priv->regs->fidcr[1]);
474 /* Enable and configure FIFO mailbox interrupts */
475 writel(RCAR_CAN_MIER1_RXFIE | RCAR_CAN_MIER1_TXFIE, &priv->regs->mier1);
476
477 priv->ier = RCAR_CAN_IER_ERSIE | RCAR_CAN_IER_RXFIE |
478 RCAR_CAN_IER_TXFIE;
479 writeb(priv->ier, &priv->regs->ier);
480
481 /* Accumulate error codes */
482 writeb(RCAR_CAN_ECSR_EDPM, &priv->regs->ecsr);
483 /* Enable error interrupts */
484 writeb(RCAR_CAN_EIER_EWIE | RCAR_CAN_EIER_EPIE | RCAR_CAN_EIER_BOEIE |
485 (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING ?
486 RCAR_CAN_EIER_BEIE : 0) | RCAR_CAN_EIER_ORIE |
487 RCAR_CAN_EIER_OLIE, &priv->regs->eier);
488 priv->can.state = CAN_STATE_ERROR_ACTIVE;
489
490 /* Go to operation mode */
491 writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr);
492 for (i = 0; i < MAX_STR_READS; i++) {
493 if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST))
494 break;
495 }
496 /* Enable Rx and Tx FIFO */
497 writeb(RCAR_CAN_RFCR_RFE, &priv->regs->rfcr);
498 writeb(RCAR_CAN_TFCR_TFE, &priv->regs->tfcr);
499}
500
501static int rcar_can_open(struct net_device *ndev)
502{
503 struct rcar_can_priv *priv = netdev_priv(ndev);
504 int err;
505
506 err = clk_prepare_enable(priv->clk);
507 if (err) {
508 netdev_err(ndev, "clk_prepare_enable() failed, error %d\n",
509 err);
510 goto out;
511 }
512 err = open_candev(ndev);
513 if (err) {
514 netdev_err(ndev, "open_candev() failed, error %d\n", err);
515 goto out_clock;
516 }
517 napi_enable(&priv->napi);
518 err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
519 if (err) {
520 netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
521 goto out_close;
522 }
523 can_led_event(ndev, CAN_LED_EVENT_OPEN);
524 rcar_can_start(ndev);
525 netif_start_queue(ndev);
526 return 0;
527out_close:
528 napi_disable(&priv->napi);
529 close_candev(ndev);
530out_clock:
531 clk_disable_unprepare(priv->clk);
532out:
533 return err;
534}
535
536static void rcar_can_stop(struct net_device *ndev)
537{
538 struct rcar_can_priv *priv = netdev_priv(ndev);
539 u16 ctlr;
540 int i;
541
542 /* Go to (force) reset mode */
543 ctlr = readw(&priv->regs->ctlr);
544 ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
545 writew(ctlr, &priv->regs->ctlr);
546 for (i = 0; i < MAX_STR_READS; i++) {
547 if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
548 break;
549 }
550 writel(0, &priv->regs->mier0);
551 writel(0, &priv->regs->mier1);
552 writeb(0, &priv->regs->ier);
553 writeb(0, &priv->regs->eier);
554 /* Go to sleep mode */
555 ctlr |= RCAR_CAN_CTLR_SLPM;
556 writew(ctlr, &priv->regs->ctlr);
557 priv->can.state = CAN_STATE_STOPPED;
558}
559
560static int rcar_can_close(struct net_device *ndev)
561{
562 struct rcar_can_priv *priv = netdev_priv(ndev);
563
564 netif_stop_queue(ndev);
565 rcar_can_stop(ndev);
566 free_irq(ndev->irq, ndev);
567 napi_disable(&priv->napi);
568 clk_disable_unprepare(priv->clk);
569 close_candev(ndev);
570 can_led_event(ndev, CAN_LED_EVENT_STOP);
571 return 0;
572}
573
574static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
575 struct net_device *ndev)
576{
577 struct rcar_can_priv *priv = netdev_priv(ndev);
578 struct can_frame *cf = (struct can_frame *)skb->data;
579 u32 data, i;
580
581 if (can_dropped_invalid_skb(ndev, skb))
582 return NETDEV_TX_OK;
583
584 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
585 data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE;
586 else /* Standard frame format */
587 data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT;
588
589 if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
590 data |= RCAR_CAN_RTR;
591 } else {
592 for (i = 0; i < cf->can_dlc; i++)
593 writeb(cf->data[i],
594 &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].data[i]);
595 }
596
597 writel(data, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].id);
598
599 writeb(cf->can_dlc, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
600
601 priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->can_dlc;
602 can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH);
603 priv->tx_head++;
604 /* Start Tx: write 0xff to the TFPCR register to increment
605 * the CPU-side pointer for the transmit FIFO to the next
606 * mailbox location
607 */
608 writeb(0xff, &priv->regs->tfpcr);
609 /* Stop the queue if we've filled all FIFO entries */
610 if (priv->tx_head - priv->tx_tail >= RCAR_CAN_FIFO_DEPTH)
611 netif_stop_queue(ndev);
612
613 return NETDEV_TX_OK;
614}
615
616static const struct net_device_ops rcar_can_netdev_ops = {
617 .ndo_open = rcar_can_open,
618 .ndo_stop = rcar_can_close,
619 .ndo_start_xmit = rcar_can_start_xmit,
620};
621
622static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
623{
624 struct net_device_stats *stats = &priv->ndev->stats;
625 struct can_frame *cf;
626 struct sk_buff *skb;
627 u32 data;
628 u8 dlc;
629
630 skb = alloc_can_skb(priv->ndev, &cf);
631 if (!skb) {
632 stats->rx_dropped++;
633 return;
634 }
635
636 data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id);
637 if (data & RCAR_CAN_IDE)
638 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
639 else
640 cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK;
641
642 dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc);
643 cf->can_dlc = get_can_dlc(dlc);
644 if (data & RCAR_CAN_RTR) {
645 cf->can_id |= CAN_RTR_FLAG;
646 } else {
647 for (dlc = 0; dlc < cf->can_dlc; dlc++)
648 cf->data[dlc] =
649 readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]);
650 }
651
652 can_led_event(priv->ndev, CAN_LED_EVENT_RX);
653
654 stats->rx_bytes += cf->can_dlc;
655 stats->rx_packets++;
656 netif_receive_skb(skb);
657}
658
659static int rcar_can_rx_poll(struct napi_struct *napi, int quota)
660{
661 struct rcar_can_priv *priv = container_of(napi,
662 struct rcar_can_priv, napi);
663 int num_pkts;
664
665 for (num_pkts = 0; num_pkts < quota; num_pkts++) {
666 u8 rfcr, isr;
667
668 isr = readb(&priv->regs->isr);
669 /* Clear interrupt bit */
670 if (isr & RCAR_CAN_ISR_RXFF)
671 writeb(isr & ~RCAR_CAN_ISR_RXFF, &priv->regs->isr);
672 rfcr = readb(&priv->regs->rfcr);
673 if (rfcr & RCAR_CAN_RFCR_RFEST)
674 break;
675 rcar_can_rx_pkt(priv);
676 /* Write 0xff to the RFPCR register to increment
677 * the CPU-side pointer for the receive FIFO
678 * to the next mailbox location
679 */
680 writeb(0xff, &priv->regs->rfpcr);
681 }
682 /* All packets processed */
683 if (num_pkts < quota) {
684 napi_complete(napi);
685 priv->ier |= RCAR_CAN_IER_RXFIE;
686 writeb(priv->ier, &priv->regs->ier);
687 }
688 return num_pkts;
689}
690
691static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
692{
693 switch (mode) {
694 case CAN_MODE_START:
695 rcar_can_start(ndev);
696 netif_wake_queue(ndev);
697 return 0;
698 default:
699 return -EOPNOTSUPP;
700 }
701}
702
703static int rcar_can_get_berr_counter(const struct net_device *dev,
704 struct can_berr_counter *bec)
705{
706 struct rcar_can_priv *priv = netdev_priv(dev);
707 int err;
708
709 err = clk_prepare_enable(priv->clk);
710 if (err)
711 return err;
712 bec->txerr = readb(&priv->regs->tecr);
713 bec->rxerr = readb(&priv->regs->recr);
714 clk_disable_unprepare(priv->clk);
715 return 0;
716}
717
718static int rcar_can_probe(struct platform_device *pdev)
719{
720 struct rcar_can_platform_data *pdata;
721 struct rcar_can_priv *priv;
722 struct net_device *ndev;
723 struct resource *mem;
724 void __iomem *addr;
725 int err = -ENODEV;
726 int irq;
727
728 pdata = dev_get_platdata(&pdev->dev);
729 if (!pdata) {
730 dev_err(&pdev->dev, "No platform data provided!\n");
731 goto fail;
732 }
733
734 irq = platform_get_irq(pdev, 0);
735 if (!irq) {
736 dev_err(&pdev->dev, "No IRQ resource\n");
737 goto fail;
738 }
739
740 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
741 addr = devm_ioremap_resource(&pdev->dev, mem);
742 if (IS_ERR(addr)) {
743 err = PTR_ERR(addr);
744 goto fail;
745 }
746
747 ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH);
748 if (!ndev) {
749 dev_err(&pdev->dev, "alloc_candev() failed\n");
750 err = -ENOMEM;
751 goto fail;
752 }
753
754 priv = netdev_priv(ndev);
755
756 priv->clk = devm_clk_get(&pdev->dev, NULL);
757 if (IS_ERR(priv->clk)) {
758 err = PTR_ERR(priv->clk);
759 dev_err(&pdev->dev, "cannot get clock: %d\n", err);
760 goto fail_clk;
761 }
762
763 ndev->netdev_ops = &rcar_can_netdev_ops;
764 ndev->irq = irq;
765 ndev->flags |= IFF_ECHO;
766 priv->ndev = ndev;
767 priv->regs = addr;
768 priv->clock_select = pdata->clock_select;
769 priv->can.clock.freq = clk_get_rate(priv->clk);
770 priv->can.bittiming_const = &rcar_can_bittiming_const;
771 priv->can.do_set_mode = rcar_can_do_set_mode;
772 priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
773 priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
774 platform_set_drvdata(pdev, ndev);
775 SET_NETDEV_DEV(ndev, &pdev->dev);
776
777 netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll,
778 RCAR_CAN_NAPI_WEIGHT);
779 err = register_candev(ndev);
780 if (err) {
781 dev_err(&pdev->dev, "register_candev() failed, error %d\n",
782 err);
783 goto fail_candev;
784 }
785
786 devm_can_led_init(ndev);
787
788 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
789 priv->regs, ndev->irq);
790
791 return 0;
792fail_candev:
793 netif_napi_del(&priv->napi);
794fail_clk:
795 free_candev(ndev);
796fail:
797 return err;
798}
799
800static int rcar_can_remove(struct platform_device *pdev)
801{
802 struct net_device *ndev = platform_get_drvdata(pdev);
803 struct rcar_can_priv *priv = netdev_priv(ndev);
804
805 unregister_candev(ndev);
806 netif_napi_del(&priv->napi);
807 free_candev(ndev);
808 return 0;
809}
810
811static int __maybe_unused rcar_can_suspend(struct device *dev)
812{
813 struct net_device *ndev = dev_get_drvdata(dev);
814 struct rcar_can_priv *priv = netdev_priv(ndev);
815 u16 ctlr;
816
817 if (netif_running(ndev)) {
818 netif_stop_queue(ndev);
819 netif_device_detach(ndev);
820 }
821 ctlr = readw(&priv->regs->ctlr);
822 ctlr |= RCAR_CAN_CTLR_CANM_HALT;
823 writew(ctlr, &priv->regs->ctlr);
824 ctlr |= RCAR_CAN_CTLR_SLPM;
825 writew(ctlr, &priv->regs->ctlr);
826 priv->can.state = CAN_STATE_SLEEPING;
827
828 clk_disable(priv->clk);
829 return 0;
830}
831
832static int __maybe_unused rcar_can_resume(struct device *dev)
833{
834 struct net_device *ndev = dev_get_drvdata(dev);
835 struct rcar_can_priv *priv = netdev_priv(ndev);
836 u16 ctlr;
837 int err;
838
839 err = clk_enable(priv->clk);
840 if (err) {
841 netdev_err(ndev, "clk_enable() failed, error %d\n", err);
842 return err;
843 }
844
845 ctlr = readw(&priv->regs->ctlr);
846 ctlr &= ~RCAR_CAN_CTLR_SLPM;
847 writew(ctlr, &priv->regs->ctlr);
848 ctlr &= ~RCAR_CAN_CTLR_CANM;
849 writew(ctlr, &priv->regs->ctlr);
850 priv->can.state = CAN_STATE_ERROR_ACTIVE;
851
852 if (netif_running(ndev)) {
853 netif_device_attach(ndev);
854 netif_start_queue(ndev);
855 }
856 return 0;
857}
858
859static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume);
860
861static struct platform_driver rcar_can_driver = {
862 .driver = {
863 .name = RCAR_CAN_DRV_NAME,
864 .owner = THIS_MODULE,
865 .pm = &rcar_can_pm_ops,
866 },
867 .probe = rcar_can_probe,
868 .remove = rcar_can_remove,
869};
870
871module_platform_driver(rcar_can_driver);
872
873MODULE_AUTHOR("Cogent Embedded, Inc.");
874MODULE_LICENSE("GPL");
875MODULE_DESCRIPTION("CAN driver for Renesas R-Car SoC");
876MODULE_ALIAS("platform:" RCAR_CAN_DRV_NAME);
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 7d8c8f3672dd..bacd236ce306 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -556,15 +556,6 @@ failed:
556/* 556/*
557 * netdev sysfs 557 * netdev sysfs
558 */ 558 */
559static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
560 char *buf)
561{
562 struct net_device *ndev = to_net_dev(dev);
563 struct softing_priv *priv = netdev2softing(ndev);
564
565 return sprintf(buf, "%i\n", priv->index);
566}
567
568static ssize_t show_chip(struct device *dev, struct device_attribute *attr, 559static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
569 char *buf) 560 char *buf)
570{ 561{
@@ -609,12 +600,10 @@ static ssize_t store_output(struct device *dev, struct device_attribute *attr,
609 return count; 600 return count;
610} 601}
611 602
612static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
613static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL); 603static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
614static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output); 604static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
615 605
616static const struct attribute *const netdev_sysfs_attrs[] = { 606static const struct attribute *const netdev_sysfs_attrs[] = {
617 &dev_attr_channel.attr,
618 &dev_attr_chip.attr, 607 &dev_attr_chip.attr,
619 &dev_attr_output.attr, 608 &dev_attr_output.attr,
620 NULL, 609 NULL,
@@ -679,17 +668,20 @@ static int softing_netdev_register(struct net_device *netdev)
679{ 668{
680 int ret; 669 int ret;
681 670
682 netdev->sysfs_groups[0] = &netdev_sysfs_group;
683 ret = register_candev(netdev); 671 ret = register_candev(netdev);
684 if (ret) { 672 if (ret) {
685 dev_alert(&netdev->dev, "register failed\n"); 673 dev_alert(&netdev->dev, "register failed\n");
686 return ret; 674 return ret;
687 } 675 }
676 if (sysfs_create_group(&netdev->dev.kobj, &netdev_sysfs_group) < 0)
677 netdev_alert(netdev, "sysfs group failed\n");
678
688 return 0; 679 return 0;
689} 680}
690 681
691static void softing_netdev_cleanup(struct net_device *netdev) 682static void softing_netdev_cleanup(struct net_device *netdev)
692{ 683{
684 sysfs_remove_group(&netdev->dev.kobj, &netdev_sysfs_group);
693 unregister_candev(netdev); 685 unregister_candev(netdev);
694 free_candev(netdev); 686 free_candev(netdev);
695} 687}
@@ -721,8 +713,6 @@ DEV_ATTR_RO(firmware_version, id.fw_version);
721DEV_ATTR_RO_STR(hardware, pdat->name); 713DEV_ATTR_RO_STR(hardware, pdat->name);
722DEV_ATTR_RO(hardware_version, id.hw_version); 714DEV_ATTR_RO(hardware_version, id.hw_version);
723DEV_ATTR_RO(license, id.license); 715DEV_ATTR_RO(license, id.license);
724DEV_ATTR_RO(frequency, id.freq);
725DEV_ATTR_RO(txpending, tx.pending);
726 716
727static struct attribute *softing_pdev_attrs[] = { 717static struct attribute *softing_pdev_attrs[] = {
728 &dev_attr_serial.attr, 718 &dev_attr_serial.attr,
@@ -731,8 +721,6 @@ static struct attribute *softing_pdev_attrs[] = {
731 &dev_attr_hardware.attr, 721 &dev_attr_hardware.attr,
732 &dev_attr_hardware_version.attr, 722 &dev_attr_hardware_version.attr,
733 &dev_attr_license.attr, 723 &dev_attr_license.attr,
734 &dev_attr_frequency.attr,
735 &dev_attr_txpending.attr,
736 NULL, 724 NULL,
737}; 725};
738 726
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
new file mode 100644
index 000000000000..148cae5871a6
--- /dev/null
+++ b/drivers/net/can/spi/Kconfig
@@ -0,0 +1,10 @@
1menu "CAN SPI interfaces"
2 depends on SPI
3
4config CAN_MCP251X
5 tristate "Microchip MCP251x SPI CAN controllers"
6 depends on HAS_DMA
7 ---help---
8 Driver for the Microchip MCP251x SPI CAN controllers.
9
10endmenu
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
new file mode 100644
index 000000000000..90bcacffbc65
--- /dev/null
+++ b/drivers/net/can/spi/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the Linux Controller Area Network SPI drivers.
3#
4
5
6obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
7
8ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 28c11f815245..5df239e68812 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -214,6 +214,8 @@
214 214
215#define TX_ECHO_SKB_MAX 1 215#define TX_ECHO_SKB_MAX 1
216 216
217#define MCP251X_OST_DELAY_MS (5)
218
217#define DEVICE_NAME "mcp251x" 219#define DEVICE_NAME "mcp251x"
218 220
219static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */ 221static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
@@ -624,50 +626,45 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
624static int mcp251x_hw_reset(struct spi_device *spi) 626static int mcp251x_hw_reset(struct spi_device *spi)
625{ 627{
626 struct mcp251x_priv *priv = spi_get_drvdata(spi); 628 struct mcp251x_priv *priv = spi_get_drvdata(spi);
629 u8 reg;
627 int ret; 630 int ret;
628 unsigned long timeout; 631
632 /* Wait for oscillator startup timer after power up */
633 mdelay(MCP251X_OST_DELAY_MS);
629 634
630 priv->spi_tx_buf[0] = INSTRUCTION_RESET; 635 priv->spi_tx_buf[0] = INSTRUCTION_RESET;
631 ret = spi_write(spi, priv->spi_tx_buf, 1); 636 ret = mcp251x_spi_trans(spi, 1);
632 if (ret) { 637 if (ret)
633 dev_err(&spi->dev, "reset failed: ret = %d\n", ret); 638 return ret;
634 return -EIO; 639
635 } 640 /* Wait for oscillator startup timer after reset */
641 mdelay(MCP251X_OST_DELAY_MS);
642
643 reg = mcp251x_read_reg(spi, CANSTAT);
644 if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
645 return -ENODEV;
636 646
637 /* Wait for reset to finish */
638 timeout = jiffies + HZ;
639 mdelay(10);
640 while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK)
641 != CANCTRL_REQOP_CONF) {
642 schedule();
643 if (time_after(jiffies, timeout)) {
644 dev_err(&spi->dev, "MCP251x didn't"
645 " enter in conf mode after reset\n");
646 return -EBUSY;
647 }
648 }
649 return 0; 647 return 0;
650} 648}
651 649
652static int mcp251x_hw_probe(struct spi_device *spi) 650static int mcp251x_hw_probe(struct spi_device *spi)
653{ 651{
654 int st1, st2; 652 u8 ctrl;
653 int ret;
655 654
656 mcp251x_hw_reset(spi); 655 ret = mcp251x_hw_reset(spi);
656 if (ret)
657 return ret;
657 658
658 /* 659 ctrl = mcp251x_read_reg(spi, CANCTRL);
659 * Please note that these are "magic values" based on after 660
660 * reset defaults taken from data sheet which allows us to see 661 dev_dbg(&spi->dev, "CANCTRL 0x%02x\n", ctrl);
661 * if we really have a chip on the bus (we avoid common all
662 * zeroes or all ones situations)
663 */
664 st1 = mcp251x_read_reg(spi, CANSTAT) & 0xEE;
665 st2 = mcp251x_read_reg(spi, CANCTRL) & 0x17;
666 662
667 dev_dbg(&spi->dev, "CANSTAT 0x%02x CANCTRL 0x%02x\n", st1, st2); 663 /* Check for power up default value */
664 if ((ctrl & 0x17) != 0x07)
665 return -ENODEV;
668 666
669 /* Check for power up default values */ 667 return 0;
670 return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
671} 668}
672 669
673static int mcp251x_power_enable(struct regulator *reg, int enable) 670static int mcp251x_power_enable(struct regulator *reg, int enable)
@@ -776,7 +773,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
776 773
777 mutex_lock(&priv->mcp_lock); 774 mutex_lock(&priv->mcp_lock);
778 if (priv->after_suspend) { 775 if (priv->after_suspend) {
779 mdelay(10);
780 mcp251x_hw_reset(spi); 776 mcp251x_hw_reset(spi);
781 mcp251x_setup(net, priv, spi); 777 mcp251x_setup(net, priv, spi);
782 if (priv->after_suspend & AFTER_SUSPEND_RESTART) { 778 if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
@@ -955,7 +951,7 @@ static int mcp251x_open(struct net_device *net)
955 priv->tx_len = 0; 951 priv->tx_len = 0;
956 952
957 ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, 953 ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
958 flags, DEVICE_NAME, priv); 954 flags | IRQF_ONESHOT, DEVICE_NAME, priv);
959 if (ret) { 955 if (ret) {
960 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); 956 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
961 mcp251x_power_enable(priv->transceiver, 0); 957 mcp251x_power_enable(priv->transceiver, 0);
@@ -1032,8 +1028,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
1032 struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev); 1028 struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
1033 struct net_device *net; 1029 struct net_device *net;
1034 struct mcp251x_priv *priv; 1030 struct mcp251x_priv *priv;
1035 int freq, ret = -ENODEV;
1036 struct clk *clk; 1031 struct clk *clk;
1032 int freq, ret;
1037 1033
1038 clk = devm_clk_get(&spi->dev, NULL); 1034 clk = devm_clk_get(&spi->dev, NULL);
1039 if (IS_ERR(clk)) { 1035 if (IS_ERR(clk)) {
@@ -1076,6 +1072,18 @@ static int mcp251x_can_probe(struct spi_device *spi)
1076 priv->net = net; 1072 priv->net = net;
1077 priv->clk = clk; 1073 priv->clk = clk;
1078 1074
1075 spi_set_drvdata(spi, priv);
1076
1077 /* Configure the SPI bus */
1078 spi->bits_per_word = 8;
1079 if (mcp251x_is_2510(spi))
1080 spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
1081 else
1082 spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
1083 ret = spi_setup(spi);
1084 if (ret)
1085 goto out_clk;
1086
1079 priv->power = devm_regulator_get(&spi->dev, "vdd"); 1087 priv->power = devm_regulator_get(&spi->dev, "vdd");
1080 priv->transceiver = devm_regulator_get(&spi->dev, "xceiver"); 1088 priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
1081 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || 1089 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
@@ -1088,8 +1096,6 @@ static int mcp251x_can_probe(struct spi_device *spi)
1088 if (ret) 1096 if (ret)
1089 goto out_clk; 1097 goto out_clk;
1090 1098
1091 spi_set_drvdata(spi, priv);
1092
1093 priv->spi = spi; 1099 priv->spi = spi;
1094 mutex_init(&priv->mcp_lock); 1100 mutex_init(&priv->mcp_lock);
1095 1101
@@ -1134,20 +1140,11 @@ static int mcp251x_can_probe(struct spi_device *spi)
1134 1140
1135 SET_NETDEV_DEV(net, &spi->dev); 1141 SET_NETDEV_DEV(net, &spi->dev);
1136 1142
1137 /* Configure the SPI bus */
1138 spi->mode = spi->mode ? : SPI_MODE_0;
1139 if (mcp251x_is_2510(spi))
1140 spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
1141 else
1142 spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
1143 spi->bits_per_word = 8;
1144 spi_setup(spi);
1145
1146 /* Here is OK to not lock the MCP, no one knows about it yet */ 1143 /* Here is OK to not lock the MCP, no one knows about it yet */
1147 if (!mcp251x_hw_probe(spi)) { 1144 ret = mcp251x_hw_probe(spi);
1148 ret = -ENODEV; 1145 if (ret)
1149 goto error_probe; 1146 goto error_probe;
1150 } 1147
1151 mcp251x_hw_sleep(spi); 1148 mcp251x_hw_sleep(spi);
1152 1149
1153 ret = register_candev(net); 1150 ret = register_candev(net);
@@ -1156,7 +1153,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
1156 1153
1157 devm_can_led_init(net); 1154 devm_can_led_init(net);
1158 1155
1159 return ret; 1156 return 0;
1160 1157
1161error_probe: 1158error_probe:
1162 if (mcp251x_enable_dma) 1159 if (mcp251x_enable_dma)
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index fc96a3d83ebe..a77db919363c 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -13,13 +13,21 @@ config CAN_ESD_USB2
13 This driver supports the CAN-USB/2 interface 13 This driver supports the CAN-USB/2 interface
14 from esd electronic system design gmbh (http://www.esd.eu). 14 from esd electronic system design gmbh (http://www.esd.eu).
15 15
16config CAN_GS_USB
17 tristate "Geschwister Schneider UG interfaces"
18 ---help---
19 This driver supports the Geschwister Schneider USB/CAN devices.
20 If unsure choose N,
21 choose Y for built in support,
22 M to compile as module (module will be named: gs_usb).
23
16config CAN_KVASER_USB 24config CAN_KVASER_USB
17 tristate "Kvaser CAN/USB interface" 25 tristate "Kvaser CAN/USB interface"
18 ---help--- 26 ---help---
19 This driver adds support for Kvaser CAN/USB devices like Kvaser 27 This driver adds support for Kvaser CAN/USB devices like Kvaser
20 Leaf Light. 28 Leaf Light.
21 29
22 The driver gives support for the following devices: 30 The driver provides support for the following devices:
23 - Kvaser Leaf Light 31 - Kvaser Leaf Light
24 - Kvaser Leaf Professional HS 32 - Kvaser Leaf Professional HS
25 - Kvaser Leaf SemiPro HS 33 - Kvaser Leaf SemiPro HS
@@ -36,6 +44,8 @@ config CAN_KVASER_USB
36 - Kvaser Leaf Light "China" 44 - Kvaser Leaf Light "China"
37 - Kvaser BlackBird SemiPro 45 - Kvaser BlackBird SemiPro
38 - Kvaser USBcan R 46 - Kvaser USBcan R
47 - Kvaser Leaf Light v2
48 - Kvaser Mini PCI Express HS
39 49
40 If unsure, say N. 50 If unsure, say N.
41 51
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index becef460a91a..7b9a393b1ac8 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o 5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
6obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o 6obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
7obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
7obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o 8obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
8obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ 9obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
9obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o 10obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
new file mode 100644
index 000000000000..04b0f84612f0
--- /dev/null
+++ b/drivers/net/can/usb/gs_usb.c
@@ -0,0 +1,971 @@
1/* CAN driver for Geschwister Schneider USB/CAN devices.
2 *
3 * Copyright (C) 2013 Geschwister Schneider Technologie-,
4 * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
5 *
6 * Many thanks to all socketcan devs!
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published
10 * by the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <linux/init.h>
19#include <linux/signal.h>
20#include <linux/module.h>
21#include <linux/netdevice.h>
22#include <linux/usb.h>
23
24#include <linux/can.h>
25#include <linux/can/dev.h>
26#include <linux/can/error.h>
27
28/* Device specific constants */
29#define USB_GSUSB_1_VENDOR_ID 0x1d50
30#define USB_GSUSB_1_PRODUCT_ID 0x606f
31
32#define GSUSB_ENDPOINT_IN 1
33#define GSUSB_ENDPOINT_OUT 2
34
35/* Device specific constants */
36enum gs_usb_breq {
37 GS_USB_BREQ_HOST_FORMAT = 0,
38 GS_USB_BREQ_BITTIMING,
39 GS_USB_BREQ_MODE,
40 GS_USB_BREQ_BERR,
41 GS_USB_BREQ_BT_CONST,
42 GS_USB_BREQ_DEVICE_CONFIG
43};
44
45enum gs_can_mode {
46 /* reset a channel. turns it off */
47 GS_CAN_MODE_RESET = 0,
48 /* starts a channel */
49 GS_CAN_MODE_START
50};
51
52enum gs_can_state {
53 GS_CAN_STATE_ERROR_ACTIVE = 0,
54 GS_CAN_STATE_ERROR_WARNING,
55 GS_CAN_STATE_ERROR_PASSIVE,
56 GS_CAN_STATE_BUS_OFF,
57 GS_CAN_STATE_STOPPED,
58 GS_CAN_STATE_SLEEPING
59};
60
61/* data types passed between host and device */
62struct gs_host_config {
63 u32 byte_order;
64} __packed;
65/* All data exchanged between host and device is exchanged in host byte order,
66 * thanks to the struct gs_host_config byte_order member, which is sent first
67 * to indicate the desired byte order.
68 */
69
70struct gs_device_config {
71 u8 reserved1;
72 u8 reserved2;
73 u8 reserved3;
74 u8 icount;
75 u32 sw_version;
76 u32 hw_version;
77} __packed;
78
79#define GS_CAN_MODE_NORMAL 0
80#define GS_CAN_MODE_LISTEN_ONLY (1<<0)
81#define GS_CAN_MODE_LOOP_BACK (1<<1)
82#define GS_CAN_MODE_TRIPLE_SAMPLE (1<<2)
83#define GS_CAN_MODE_ONE_SHOT (1<<3)
84
85struct gs_device_mode {
86 u32 mode;
87 u32 flags;
88} __packed;
89
90struct gs_device_state {
91 u32 state;
92 u32 rxerr;
93 u32 txerr;
94} __packed;
95
96struct gs_device_bittiming {
97 u32 prop_seg;
98 u32 phase_seg1;
99 u32 phase_seg2;
100 u32 sjw;
101 u32 brp;
102} __packed;
103
104#define GS_CAN_FEATURE_LISTEN_ONLY (1<<0)
105#define GS_CAN_FEATURE_LOOP_BACK (1<<1)
106#define GS_CAN_FEATURE_TRIPLE_SAMPLE (1<<2)
107#define GS_CAN_FEATURE_ONE_SHOT (1<<3)
108
109struct gs_device_bt_const {
110 u32 feature;
111 u32 fclk_can;
112 u32 tseg1_min;
113 u32 tseg1_max;
114 u32 tseg2_min;
115 u32 tseg2_max;
116 u32 sjw_max;
117 u32 brp_min;
118 u32 brp_max;
119 u32 brp_inc;
120} __packed;
121
122#define GS_CAN_FLAG_OVERFLOW 1
123
124struct gs_host_frame {
125 u32 echo_id;
126 u32 can_id;
127
128 u8 can_dlc;
129 u8 channel;
130 u8 flags;
131 u8 reserved;
132
133 u8 data[8];
134} __packed;
135/* The GS USB devices make use of the same flags and masks as in
136 * linux/can.h and linux/can/error.h, and no additional mapping is necessary.
137 */
138
139/* Only send a max of GS_MAX_TX_URBS frames per channel at a time. */
140#define GS_MAX_TX_URBS 10
141/* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */
142#define GS_MAX_RX_URBS 30
143/* Maximum number of interfaces the driver supports per device.
144 * Current hardware only supports 2 interfaces. The future may vary.
145 */
146#define GS_MAX_INTF 2
147
148struct gs_tx_context {
149 struct gs_can *dev;
150 unsigned int echo_id;
151};
152
153struct gs_can {
154 struct can_priv can; /* must be the first member */
155
156 struct gs_usb *parent;
157
158 struct net_device *netdev;
159 struct usb_device *udev;
160 struct usb_interface *iface;
161
162 struct can_bittiming_const bt_const;
163 unsigned int channel; /* channel number */
164
165 /* This lock prevents a race condition between xmit and recieve. */
166 spinlock_t tx_ctx_lock;
167 struct gs_tx_context tx_context[GS_MAX_TX_URBS];
168
169 struct usb_anchor tx_submitted;
170 atomic_t active_tx_urbs;
171};
172
173/* usb interface struct */
174struct gs_usb {
175 struct gs_can *canch[GS_MAX_INTF];
176 struct usb_anchor rx_submitted;
177 atomic_t active_channels;
178 struct usb_device *udev;
179};
180
181/* 'allocate' a tx context.
182 * returns a valid tx context or NULL if there is no space.
183 */
184static struct gs_tx_context *gs_alloc_tx_context(struct gs_can *dev)
185{
186 int i = 0;
187 unsigned long flags;
188
189 spin_lock_irqsave(&dev->tx_ctx_lock, flags);
190
191 for (; i < GS_MAX_TX_URBS; i++) {
192 if (dev->tx_context[i].echo_id == GS_MAX_TX_URBS) {
193 dev->tx_context[i].echo_id = i;
194 spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
195 return &dev->tx_context[i];
196 }
197 }
198
199 spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
200 return NULL;
201}
202
203/* releases a tx context
204 */
205static void gs_free_tx_context(struct gs_tx_context *txc)
206{
207 txc->echo_id = GS_MAX_TX_URBS;
208}
209
210/* Get a tx context by id.
211 */
212static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id)
213{
214 unsigned long flags;
215
216 if (id < GS_MAX_TX_URBS) {
217 spin_lock_irqsave(&dev->tx_ctx_lock, flags);
218 if (dev->tx_context[id].echo_id == id) {
219 spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
220 return &dev->tx_context[id];
221 }
222 spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
223 }
224 return NULL;
225}
226
227static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
228{
229 struct gs_device_mode *dm;
230 struct usb_interface *intf = gsdev->iface;
231 int rc;
232
233 dm = kzalloc(sizeof(*dm), GFP_KERNEL);
234 if (!dm)
235 return -ENOMEM;
236
237 dm->mode = GS_CAN_MODE_RESET;
238
239 rc = usb_control_msg(interface_to_usbdev(intf),
240 usb_sndctrlpipe(interface_to_usbdev(intf), 0),
241 GS_USB_BREQ_MODE,
242 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
243 gsdev->channel,
244 0,
245 dm,
246 sizeof(*dm),
247 1000);
248
249 return rc;
250}
251
252static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
253{
254 struct can_device_stats *can_stats = &dev->can.can_stats;
255
256 if (cf->can_id & CAN_ERR_RESTARTED) {
257 dev->can.state = CAN_STATE_ERROR_ACTIVE;
258 can_stats->restarts++;
259 } else if (cf->can_id & CAN_ERR_BUSOFF) {
260 dev->can.state = CAN_STATE_BUS_OFF;
261 can_stats->bus_off++;
262 } else if (cf->can_id & CAN_ERR_CRTL) {
263 if ((cf->data[1] & CAN_ERR_CRTL_TX_WARNING) ||
264 (cf->data[1] & CAN_ERR_CRTL_RX_WARNING)) {
265 dev->can.state = CAN_STATE_ERROR_WARNING;
266 can_stats->error_warning++;
267 } else if ((cf->data[1] & CAN_ERR_CRTL_TX_PASSIVE) ||
268 (cf->data[1] & CAN_ERR_CRTL_RX_PASSIVE)) {
269 dev->can.state = CAN_STATE_ERROR_PASSIVE;
270 can_stats->error_passive++;
271 } else {
272 dev->can.state = CAN_STATE_ERROR_ACTIVE;
273 }
274 }
275}
276
277static void gs_usb_recieve_bulk_callback(struct urb *urb)
278{
279 struct gs_usb *usbcan = urb->context;
280 struct gs_can *dev;
281 struct net_device *netdev;
282 int rc;
283 struct net_device_stats *stats;
284 struct gs_host_frame *hf = urb->transfer_buffer;
285 struct gs_tx_context *txc;
286 struct can_frame *cf;
287 struct sk_buff *skb;
288
289 BUG_ON(!usbcan);
290
291 switch (urb->status) {
292 case 0: /* success */
293 break;
294 case -ENOENT:
295 case -ESHUTDOWN:
296 return;
297 default:
298 /* do not resubmit aborted urbs. eg: when device goes down */
299 return;
300 }
301
302 /* device reports out of range channel id */
303 if (hf->channel >= GS_MAX_INTF)
304 goto resubmit_urb;
305
306 dev = usbcan->canch[hf->channel];
307
308 netdev = dev->netdev;
309 stats = &netdev->stats;
310
311 if (!netif_device_present(netdev))
312 return;
313
314 if (hf->echo_id == -1) { /* normal rx */
315 skb = alloc_can_skb(dev->netdev, &cf);
316 if (!skb)
317 return;
318
319 cf->can_id = hf->can_id;
320
321 cf->can_dlc = get_can_dlc(hf->can_dlc);
322 memcpy(cf->data, hf->data, 8);
323
324 /* ERROR frames tell us information about the controller */
325 if (hf->can_id & CAN_ERR_FLAG)
326 gs_update_state(dev, cf);
327
328 netdev->stats.rx_packets++;
329 netdev->stats.rx_bytes += hf->can_dlc;
330
331 netif_rx(skb);
332 } else { /* echo_id == hf->echo_id */
333 if (hf->echo_id >= GS_MAX_TX_URBS) {
334 netdev_err(netdev,
335 "Unexpected out of range echo id %d\n",
336 hf->echo_id);
337 goto resubmit_urb;
338 }
339
340 netdev->stats.tx_packets++;
341 netdev->stats.tx_bytes += hf->can_dlc;
342
343 txc = gs_get_tx_context(dev, hf->echo_id);
344
345 /* bad devices send bad echo_ids. */
346 if (!txc) {
347 netdev_err(netdev,
348 "Unexpected unused echo id %d\n",
349 hf->echo_id);
350 goto resubmit_urb;
351 }
352
353 can_get_echo_skb(netdev, hf->echo_id);
354
355 gs_free_tx_context(txc);
356
357 netif_wake_queue(netdev);
358 }
359
360 if (hf->flags & GS_CAN_FLAG_OVERFLOW) {
361 skb = alloc_can_err_skb(netdev, &cf);
362 if (!skb)
363 goto resubmit_urb;
364
365 cf->can_id |= CAN_ERR_CRTL;
366 cf->can_dlc = CAN_ERR_DLC;
367 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
368 stats->rx_over_errors++;
369 stats->rx_errors++;
370 netif_rx(skb);
371 }
372
373 resubmit_urb:
374 usb_fill_bulk_urb(urb,
375 usbcan->udev,
376 usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
377 hf,
378 sizeof(struct gs_host_frame),
379 gs_usb_recieve_bulk_callback,
380 usbcan
381 );
382
383 rc = usb_submit_urb(urb, GFP_ATOMIC);
384
385 /* USB failure take down all interfaces */
386 if (rc == -ENODEV) {
387 for (rc = 0; rc < GS_MAX_INTF; rc++) {
388 if (usbcan->canch[rc])
389 netif_device_detach(usbcan->canch[rc]->netdev);
390 }
391 }
392}
393
394static int gs_usb_set_bittiming(struct net_device *netdev)
395{
396 struct gs_can *dev = netdev_priv(netdev);
397 struct can_bittiming *bt = &dev->can.bittiming;
398 struct usb_interface *intf = dev->iface;
399 int rc;
400 struct gs_device_bittiming *dbt;
401
402 dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
403 if (!dbt)
404 return -ENOMEM;
405
406 dbt->prop_seg = bt->prop_seg;
407 dbt->phase_seg1 = bt->phase_seg1;
408 dbt->phase_seg2 = bt->phase_seg2;
409 dbt->sjw = bt->sjw;
410 dbt->brp = bt->brp;
411
412 /* request bit timings */
413 rc = usb_control_msg(interface_to_usbdev(intf),
414 usb_sndctrlpipe(interface_to_usbdev(intf), 0),
415 GS_USB_BREQ_BITTIMING,
416 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
417 dev->channel,
418 0,
419 dbt,
420 sizeof(*dbt),
421 1000);
422
423 kfree(dbt);
424
425 if (rc < 0)
426 dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
427 rc);
428
429 return rc;
430}
431
432static void gs_usb_xmit_callback(struct urb *urb)
433{
434 struct gs_tx_context *txc = urb->context;
435 struct gs_can *dev = txc->dev;
436 struct net_device *netdev = dev->netdev;
437
438 if (urb->status)
439 netdev_info(netdev, "usb xmit fail %d\n", txc->echo_id);
440
441 usb_free_coherent(urb->dev,
442 urb->transfer_buffer_length,
443 urb->transfer_buffer,
444 urb->transfer_dma);
445
446 atomic_dec(&dev->active_tx_urbs);
447
448 if (!netif_device_present(netdev))
449 return;
450
451 if (netif_queue_stopped(netdev))
452 netif_wake_queue(netdev);
453}
454
455static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev)
456{
457 struct gs_can *dev = netdev_priv(netdev);
458 struct net_device_stats *stats = &dev->netdev->stats;
459 struct urb *urb;
460 struct gs_host_frame *hf;
461 struct can_frame *cf;
462 int rc;
463 unsigned int idx;
464 struct gs_tx_context *txc;
465
466 if (can_dropped_invalid_skb(netdev, skb))
467 return NETDEV_TX_OK;
468
469 /* find an empty context to keep track of transmission */
470 txc = gs_alloc_tx_context(dev);
471 if (!txc)
472 return NETDEV_TX_BUSY;
473
474 /* create a URB, and a buffer for it */
475 urb = usb_alloc_urb(0, GFP_ATOMIC);
476 if (!urb) {
477 netdev_err(netdev, "No memory left for URB\n");
478 goto nomem_urb;
479 }
480
481 hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC,
482 &urb->transfer_dma);
483 if (!hf) {
484 netdev_err(netdev, "No memory left for USB buffer\n");
485 goto nomem_hf;
486 }
487
488 idx = txc->echo_id;
489
490 if (idx >= GS_MAX_TX_URBS) {
491 netdev_err(netdev, "Invalid tx context %d\n", idx);
492 goto badidx;
493 }
494
495 hf->echo_id = idx;
496 hf->channel = dev->channel;
497
498 cf = (struct can_frame *)skb->data;
499
500 hf->can_id = cf->can_id;
501 hf->can_dlc = cf->can_dlc;
502 memcpy(hf->data, cf->data, cf->can_dlc);
503
504 usb_fill_bulk_urb(urb, dev->udev,
505 usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
506 hf,
507 sizeof(*hf),
508 gs_usb_xmit_callback,
509 txc);
510
511 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
512 usb_anchor_urb(urb, &dev->tx_submitted);
513
514 can_put_echo_skb(skb, netdev, idx);
515
516 atomic_inc(&dev->active_tx_urbs);
517
518 rc = usb_submit_urb(urb, GFP_ATOMIC);
519 if (unlikely(rc)) { /* usb send failed */
520 atomic_dec(&dev->active_tx_urbs);
521
522 can_free_echo_skb(netdev, idx);
523 gs_free_tx_context(txc);
524
525 usb_unanchor_urb(urb);
526 usb_free_coherent(dev->udev,
527 sizeof(*hf),
528 hf,
529 urb->transfer_dma);
530
531
532 if (rc == -ENODEV) {
533 netif_device_detach(netdev);
534 } else {
535 netdev_err(netdev, "usb_submit failed (err=%d)\n", rc);
536 stats->tx_dropped++;
537 }
538 } else {
539 /* Slow down tx path */
540 if (atomic_read(&dev->active_tx_urbs) >= GS_MAX_TX_URBS)
541 netif_stop_queue(netdev);
542 }
543
544 /* let usb core take care of this urb */
545 usb_free_urb(urb);
546
547 return NETDEV_TX_OK;
548
549 badidx:
550 usb_free_coherent(dev->udev,
551 sizeof(*hf),
552 hf,
553 urb->transfer_dma);
554 nomem_hf:
555 usb_free_urb(urb);
556
557 nomem_urb:
558 gs_free_tx_context(txc);
559 dev_kfree_skb(skb);
560 stats->tx_dropped++;
561 return NETDEV_TX_OK;
562}
563
564static int gs_can_open(struct net_device *netdev)
565{
566 struct gs_can *dev = netdev_priv(netdev);
567 struct gs_usb *parent = dev->parent;
568 int rc, i;
569 struct gs_device_mode *dm;
570 u32 ctrlmode;
571
572 rc = open_candev(netdev);
573 if (rc)
574 return rc;
575
576 if (atomic_add_return(1, &parent->active_channels) == 1) {
577 for (i = 0; i < GS_MAX_RX_URBS; i++) {
578 struct urb *urb;
579 u8 *buf;
580
581 /* alloc rx urb */
582 urb = usb_alloc_urb(0, GFP_KERNEL);
583 if (!urb) {
584 netdev_err(netdev,
585 "No memory left for URB\n");
586 return -ENOMEM;
587 }
588
589 /* alloc rx buffer */
590 buf = usb_alloc_coherent(dev->udev,
591 sizeof(struct gs_host_frame),
592 GFP_KERNEL,
593 &urb->transfer_dma);
594 if (!buf) {
595 netdev_err(netdev,
596 "No memory left for USB buffer\n");
597 usb_free_urb(urb);
598 return -ENOMEM;
599 }
600
601 /* fill, anchor, and submit rx urb */
602 usb_fill_bulk_urb(urb,
603 dev->udev,
604 usb_rcvbulkpipe(dev->udev,
605 GSUSB_ENDPOINT_IN),
606 buf,
607 sizeof(struct gs_host_frame),
608 gs_usb_recieve_bulk_callback,
609 parent);
610 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
611
612 usb_anchor_urb(urb, &parent->rx_submitted);
613
614 rc = usb_submit_urb(urb, GFP_KERNEL);
615 if (rc) {
616 if (rc == -ENODEV)
617 netif_device_detach(dev->netdev);
618
619 netdev_err(netdev,
620 "usb_submit failed (err=%d)\n",
621 rc);
622
623 usb_unanchor_urb(urb);
624 break;
625 }
626
627 /* Drop reference,
628 * USB core will take care of freeing it
629 */
630 usb_free_urb(urb);
631 }
632 }
633
634 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
635 if (!dm)
636 return -ENOMEM;
637
638 /* flags */
639 ctrlmode = dev->can.ctrlmode;
640 dm->flags = 0;
641
642 if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
643 dm->flags |= GS_CAN_MODE_LOOP_BACK;
644 else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
645 dm->flags |= GS_CAN_MODE_LISTEN_ONLY;
646
647 /* Controller is not allowed to retry TX
648 * this mode is unavailable on atmels uc3c hardware
649 */
650 if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
651 dm->flags |= GS_CAN_MODE_ONE_SHOT;
652
653 if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
654 dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
655
656 /* finally start device */
657 dm->mode = GS_CAN_MODE_START;
658 rc = usb_control_msg(interface_to_usbdev(dev->iface),
659 usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
660 GS_USB_BREQ_MODE,
661 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
662 dev->channel,
663 0,
664 dm,
665 sizeof(*dm),
666 1000);
667
668 if (rc < 0) {
669 netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
670 kfree(dm);
671 return rc;
672 }
673
674 kfree(dm);
675
676 dev->can.state = CAN_STATE_ERROR_ACTIVE;
677
678 if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
679 netif_start_queue(netdev);
680
681 return 0;
682}
683
684static int gs_can_close(struct net_device *netdev)
685{
686 int rc;
687 struct gs_can *dev = netdev_priv(netdev);
688 struct gs_usb *parent = dev->parent;
689
690 netif_stop_queue(netdev);
691
692 /* Stop polling */
693 if (atomic_dec_and_test(&parent->active_channels))
694 usb_kill_anchored_urbs(&parent->rx_submitted);
695
696 /* Stop sending URBs */
697 usb_kill_anchored_urbs(&dev->tx_submitted);
698 atomic_set(&dev->active_tx_urbs, 0);
699
700 /* reset the device */
701 rc = gs_cmd_reset(parent, dev);
702 if (rc < 0)
703 netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc);
704
705 /* reset tx contexts */
706 for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
707 dev->tx_context[rc].dev = dev;
708 dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
709 }
710
711 /* close the netdev */
712 close_candev(netdev);
713
714 return 0;
715}
716
717static const struct net_device_ops gs_usb_netdev_ops = {
718 .ndo_open = gs_can_open,
719 .ndo_stop = gs_can_close,
720 .ndo_start_xmit = gs_can_start_xmit,
721};
722
723static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf)
724{
725 struct gs_can *dev;
726 struct net_device *netdev;
727 int rc;
728 struct gs_device_bt_const *bt_const;
729
730 bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
731 if (!bt_const)
732 return ERR_PTR(-ENOMEM);
733
734 /* fetch bit timing constants */
735 rc = usb_control_msg(interface_to_usbdev(intf),
736 usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
737 GS_USB_BREQ_BT_CONST,
738 USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
739 channel,
740 0,
741 bt_const,
742 sizeof(*bt_const),
743 1000);
744
745 if (rc < 0) {
746 dev_err(&intf->dev,
747 "Couldn't get bit timing const for channel (err=%d)\n",
748 rc);
749 kfree(bt_const);
750 return ERR_PTR(rc);
751 }
752
753 /* create netdev */
754 netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS);
755 if (!netdev) {
756 dev_err(&intf->dev, "Couldn't allocate candev\n");
757 kfree(bt_const);
758 return ERR_PTR(-ENOMEM);
759 }
760
761 dev = netdev_priv(netdev);
762
763 netdev->netdev_ops = &gs_usb_netdev_ops;
764
765 netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
766
767 /* dev settup */
768 strcpy(dev->bt_const.name, "gs_usb");
769 dev->bt_const.tseg1_min = bt_const->tseg1_min;
770 dev->bt_const.tseg1_max = bt_const->tseg1_max;
771 dev->bt_const.tseg2_min = bt_const->tseg2_min;
772 dev->bt_const.tseg2_max = bt_const->tseg2_max;
773 dev->bt_const.sjw_max = bt_const->sjw_max;
774 dev->bt_const.brp_min = bt_const->brp_min;
775 dev->bt_const.brp_max = bt_const->brp_max;
776 dev->bt_const.brp_inc = bt_const->brp_inc;
777
778 dev->udev = interface_to_usbdev(intf);
779 dev->iface = intf;
780 dev->netdev = netdev;
781 dev->channel = channel;
782
783 init_usb_anchor(&dev->tx_submitted);
784 atomic_set(&dev->active_tx_urbs, 0);
785 spin_lock_init(&dev->tx_ctx_lock);
786 for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
787 dev->tx_context[rc].dev = dev;
788 dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
789 }
790
791 /* can settup */
792 dev->can.state = CAN_STATE_STOPPED;
793 dev->can.clock.freq = bt_const->fclk_can;
794 dev->can.bittiming_const = &dev->bt_const;
795 dev->can.do_set_bittiming = gs_usb_set_bittiming;
796
797 dev->can.ctrlmode_supported = 0;
798
799 if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY)
800 dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
801
802 if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK)
803 dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
804
805 if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
806 dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
807
808 if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
809 dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
810
811 kfree(bt_const);
812
813 SET_NETDEV_DEV(netdev, &intf->dev);
814
815 rc = register_candev(dev->netdev);
816 if (rc) {
817 free_candev(dev->netdev);
818 dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc);
819 return ERR_PTR(rc);
820 }
821
822 return dev;
823}
824
825static void gs_destroy_candev(struct gs_can *dev)
826{
827 unregister_candev(dev->netdev);
828 free_candev(dev->netdev);
829 usb_kill_anchored_urbs(&dev->tx_submitted);
830 kfree(dev);
831}
832
833static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
834{
835 struct gs_usb *dev;
836 int rc = -ENOMEM;
837 unsigned int icount, i;
838 struct gs_host_config *hconf;
839 struct gs_device_config *dconf;
840
841 hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
842 if (!hconf)
843 return -ENOMEM;
844
845 hconf->byte_order = 0x0000beef;
846
847 /* send host config */
848 rc = usb_control_msg(interface_to_usbdev(intf),
849 usb_sndctrlpipe(interface_to_usbdev(intf), 0),
850 GS_USB_BREQ_HOST_FORMAT,
851 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
852 1,
853 intf->altsetting[0].desc.bInterfaceNumber,
854 hconf,
855 sizeof(*hconf),
856 1000);
857
858 kfree(hconf);
859
860 if (rc < 0) {
861 dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
862 rc);
863 return rc;
864 }
865
866 dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
867 if (!dconf)
868 return -ENOMEM;
869
870 /* read device config */
871 rc = usb_control_msg(interface_to_usbdev(intf),
872 usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
873 GS_USB_BREQ_DEVICE_CONFIG,
874 USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
875 1,
876 intf->altsetting[0].desc.bInterfaceNumber,
877 dconf,
878 sizeof(*dconf),
879 1000);
880 if (rc < 0) {
881 dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
882 rc);
883
884 kfree(dconf);
885
886 return rc;
887 }
888
889 icount = dconf->icount+1;
890
891 kfree(dconf);
892
893 dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
894
895 if (icount > GS_MAX_INTF) {
896 dev_err(&intf->dev,
897 "Driver cannot handle more that %d CAN interfaces\n",
898 GS_MAX_INTF);
899 return -EINVAL;
900 }
901
902 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
903 init_usb_anchor(&dev->rx_submitted);
904
905 atomic_set(&dev->active_channels, 0);
906
907 usb_set_intfdata(intf, dev);
908 dev->udev = interface_to_usbdev(intf);
909
910 for (i = 0; i < icount; i++) {
911 dev->canch[i] = gs_make_candev(i, intf);
912 if (IS_ERR_OR_NULL(dev->canch[i])) {
913 /* on failure destroy previously created candevs */
914 icount = i;
915 for (i = 0; i < icount; i++) {
916 gs_destroy_candev(dev->canch[i]);
917 dev->canch[i] = NULL;
918 }
919 kfree(dev);
920 return rc;
921 }
922 dev->canch[i]->parent = dev;
923 }
924
925 return 0;
926}
927
928static void gs_usb_disconnect(struct usb_interface *intf)
929{
930 unsigned i;
931 struct gs_usb *dev = usb_get_intfdata(intf);
932 usb_set_intfdata(intf, NULL);
933
934 if (!dev) {
935 dev_err(&intf->dev, "Disconnect (nodata)\n");
936 return;
937 }
938
939 for (i = 0; i < GS_MAX_INTF; i++) {
940 struct gs_can *can = dev->canch[i];
941
942 if (!can)
943 continue;
944
945 gs_destroy_candev(can);
946 }
947
948 usb_kill_anchored_urbs(&dev->rx_submitted);
949}
950
951static const struct usb_device_id gs_usb_table[] = {
952 {USB_DEVICE(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID)},
953 {} /* Terminating entry */
954};
955
956MODULE_DEVICE_TABLE(usb, gs_usb_table);
957
958static struct usb_driver gs_usb_driver = {
959 .name = "gs_usb",
960 .probe = gs_usb_probe,
961 .disconnect = gs_usb_disconnect,
962 .id_table = gs_usb_table,
963};
964
965module_usb_driver(gs_usb_driver);
966
967MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
968MODULE_DESCRIPTION(
969"Socket CAN device driver for Geschwister Schneider Technologie-, "
970"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
971MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 4ca46edc061d..541fb7a05625 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -53,6 +53,8 @@
53#define USB_OEM_MERCURY_PRODUCT_ID 34 53#define USB_OEM_MERCURY_PRODUCT_ID 34
54#define USB_OEM_LEAF_PRODUCT_ID 35 54#define USB_OEM_LEAF_PRODUCT_ID 35
55#define USB_CAN_R_PRODUCT_ID 39 55#define USB_CAN_R_PRODUCT_ID 39
56#define USB_LEAF_LITE_V2_PRODUCT_ID 288
57#define USB_MINI_PCIE_HS_PRODUCT_ID 289
56 58
57/* USB devices features */ 59/* USB devices features */
58#define KVASER_HAS_SILENT_MODE BIT(0) 60#define KVASER_HAS_SILENT_MODE BIT(0)
@@ -356,6 +358,8 @@ static const struct usb_device_id kvaser_usb_table[] = {
356 .driver_info = KVASER_HAS_TXRX_ERRORS }, 358 .driver_info = KVASER_HAS_TXRX_ERRORS },
357 { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID), 359 { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
358 .driver_info = KVASER_HAS_TXRX_ERRORS }, 360 .driver_info = KVASER_HAS_TXRX_ERRORS },
361 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
362 { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
359 { } 363 { }
360}; 364};
361MODULE_DEVICE_TABLE(usb, kvaser_usb_table); 365MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -379,38 +383,43 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
379 void *buf; 383 void *buf;
380 int actual_len; 384 int actual_len;
381 int err; 385 int err;
382 int pos = 0; 386 int pos;
387 unsigned long to = jiffies + msecs_to_jiffies(USB_RECV_TIMEOUT);
383 388
384 buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL); 389 buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
385 if (!buf) 390 if (!buf)
386 return -ENOMEM; 391 return -ENOMEM;
387 392
388 err = usb_bulk_msg(dev->udev, 393 do {
389 usb_rcvbulkpipe(dev->udev, 394 err = usb_bulk_msg(dev->udev,
390 dev->bulk_in->bEndpointAddress), 395 usb_rcvbulkpipe(dev->udev,
391 buf, RX_BUFFER_SIZE, &actual_len, 396 dev->bulk_in->bEndpointAddress),
392 USB_RECV_TIMEOUT); 397 buf, RX_BUFFER_SIZE, &actual_len,
393 if (err < 0) 398 USB_RECV_TIMEOUT);
394 goto end; 399 if (err < 0)
400 goto end;
395 401
396 while (pos <= actual_len - MSG_HEADER_LEN) { 402 pos = 0;
397 tmp = buf + pos; 403 while (pos <= actual_len - MSG_HEADER_LEN) {
404 tmp = buf + pos;
398 405
399 if (!tmp->len) 406 if (!tmp->len)
400 break; 407 break;
401 408
402 if (pos + tmp->len > actual_len) { 409 if (pos + tmp->len > actual_len) {
403 dev_err(dev->udev->dev.parent, "Format error\n"); 410 dev_err(dev->udev->dev.parent,
404 break; 411 "Format error\n");
405 } 412 break;
413 }
406 414
407 if (tmp->id == id) { 415 if (tmp->id == id) {
408 memcpy(msg, tmp, tmp->len); 416 memcpy(msg, tmp, tmp->len);
409 goto end; 417 goto end;
410 } 418 }
411 419
412 pos += tmp->len; 420 pos += tmp->len;
413 } 421 }
422 } while (time_before(jiffies, to));
414 423
415 err = -EINVAL; 424 err = -EINVAL;
416 425
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
new file mode 100644
index 000000000000..5e8b5609c067
--- /dev/null
+++ b/drivers/net/can/xilinx_can.c
@@ -0,0 +1,1208 @@
1/* Xilinx CAN device driver
2 *
3 * Copyright (C) 2012 - 2014 Xilinx, Inc.
4 * Copyright (C) 2009 PetaLogix. All rights reserved.
5 *
6 * Description:
7 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
8 * This program is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/clk.h>
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/netdevice.h>
27#include <linux/of.h>
28#include <linux/platform_device.h>
29#include <linux/skbuff.h>
30#include <linux/string.h>
31#include <linux/types.h>
32#include <linux/can/dev.h>
33#include <linux/can/error.h>
34#include <linux/can/led.h>
35
36#define DRIVER_NAME "xilinx_can"
37
38/* CAN registers set */
39enum xcan_reg {
40 XCAN_SRR_OFFSET = 0x00, /* Software reset */
41 XCAN_MSR_OFFSET = 0x04, /* Mode select */
42 XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */
43 XCAN_BTR_OFFSET = 0x0C, /* Bit timing */
44 XCAN_ECR_OFFSET = 0x10, /* Error counter */
45 XCAN_ESR_OFFSET = 0x14, /* Error status */
46 XCAN_SR_OFFSET = 0x18, /* Status */
47 XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
48 XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
49 XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
50 XCAN_TXFIFO_ID_OFFSET = 0x30,/* TX FIFO ID */
51 XCAN_TXFIFO_DLC_OFFSET = 0x34, /* TX FIFO DLC */
52 XCAN_TXFIFO_DW1_OFFSET = 0x38, /* TX FIFO Data Word 1 */
53 XCAN_TXFIFO_DW2_OFFSET = 0x3C, /* TX FIFO Data Word 2 */
54 XCAN_RXFIFO_ID_OFFSET = 0x50, /* RX FIFO ID */
55 XCAN_RXFIFO_DLC_OFFSET = 0x54, /* RX FIFO DLC */
56 XCAN_RXFIFO_DW1_OFFSET = 0x58, /* RX FIFO Data Word 1 */
57 XCAN_RXFIFO_DW2_OFFSET = 0x5C, /* RX FIFO Data Word 2 */
58};
59
60/* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
61#define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
62#define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
63#define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
64#define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
65#define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
66#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
67#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
68#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
69#define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
70#define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
71#define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
72#define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */
73#define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
74#define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
75#define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
76#define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
77#define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
78#define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
79#define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
80#define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
81#define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
82#define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
83#define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
84#define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
85#define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */
86#define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */
87#define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */
88#define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */
89#define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */
90#define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
91#define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
92#define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
93#define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
94#define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
95#define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
96#define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
97#define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
98#define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
99
100#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
101 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
102 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
103 XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
104
105/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
106#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
107#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
108#define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
109#define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
110#define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
111#define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */
112
113/* CAN frame length constants */
114#define XCAN_FRAME_MAX_DATA_LEN 8
115#define XCAN_TIMEOUT (1 * HZ)
116
117/**
118 * struct xcan_priv - This definition define CAN driver instance
119 * @can: CAN private data structure.
120 * @tx_head: Tx CAN packets ready to send on the queue
121 * @tx_tail: Tx CAN packets successfully sended on the queue
122 * @tx_max: Maximum number packets the driver can send
123 * @napi: NAPI structure
124 * @read_reg: For reading data from CAN registers
125 * @write_reg: For writing data to CAN registers
126 * @dev: Network device data structure
127 * @reg_base: Ioremapped address to registers
128 * @irq_flags: For request_irq()
129 * @bus_clk: Pointer to struct clk
130 * @can_clk: Pointer to struct clk
131 */
132struct xcan_priv {
133 struct can_priv can;
134 unsigned int tx_head;
135 unsigned int tx_tail;
136 unsigned int tx_max;
137 struct napi_struct napi;
138 u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
139 void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
140 u32 val);
141 struct net_device *dev;
142 void __iomem *reg_base;
143 unsigned long irq_flags;
144 struct clk *bus_clk;
145 struct clk *can_clk;
146};
147
148/* CAN Bittiming constants as per Xilinx CAN specs */
149static const struct can_bittiming_const xcan_bittiming_const = {
150 .name = DRIVER_NAME,
151 .tseg1_min = 1,
152 .tseg1_max = 16,
153 .tseg2_min = 1,
154 .tseg2_max = 8,
155 .sjw_max = 4,
156 .brp_min = 1,
157 .brp_max = 256,
158 .brp_inc = 1,
159};
160
161/**
162 * xcan_write_reg_le - Write a value to the device register little endian
163 * @priv: Driver private data structure
164 * @reg: Register offset
165 * @val: Value to write at the Register offset
166 *
167 * Write data to the paricular CAN register
168 */
169static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
170 u32 val)
171{
172 iowrite32(val, priv->reg_base + reg);
173}
174
175/**
176 * xcan_read_reg_le - Read a value from the device register little endian
177 * @priv: Driver private data structure
178 * @reg: Register offset
179 *
180 * Read data from the particular CAN register
181 * Return: value read from the CAN register
182 */
183static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
184{
185 return ioread32(priv->reg_base + reg);
186}
187
188/**
189 * xcan_write_reg_be - Write a value to the device register big endian
190 * @priv: Driver private data structure
191 * @reg: Register offset
192 * @val: Value to write at the Register offset
193 *
194 * Write data to the paricular CAN register
195 */
196static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
197 u32 val)
198{
199 iowrite32be(val, priv->reg_base + reg);
200}
201
202/**
203 * xcan_read_reg_be - Read a value from the device register big endian
204 * @priv: Driver private data structure
205 * @reg: Register offset
206 *
207 * Read data from the particular CAN register
208 * Return: value read from the CAN register
209 */
210static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
211{
212 return ioread32be(priv->reg_base + reg);
213}
214
215/**
216 * set_reset_mode - Resets the CAN device mode
217 * @ndev: Pointer to net_device structure
218 *
219 * This is the driver reset mode routine.The driver
220 * enters into configuration mode.
221 *
222 * Return: 0 on success and failure value on error
223 */
224static int set_reset_mode(struct net_device *ndev)
225{
226 struct xcan_priv *priv = netdev_priv(ndev);
227 unsigned long timeout;
228
229 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
230
231 timeout = jiffies + XCAN_TIMEOUT;
232 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
233 if (time_after(jiffies, timeout)) {
234 netdev_warn(ndev, "timed out for config mode\n");
235 return -ETIMEDOUT;
236 }
237 usleep_range(500, 10000);
238 }
239
240 return 0;
241}
242
243/**
244 * xcan_set_bittiming - CAN set bit timing routine
245 * @ndev: Pointer to net_device structure
246 *
247 * This is the driver set bittiming routine.
248 * Return: 0 on success and failure value on error
249 */
250static int xcan_set_bittiming(struct net_device *ndev)
251{
252 struct xcan_priv *priv = netdev_priv(ndev);
253 struct can_bittiming *bt = &priv->can.bittiming;
254 u32 btr0, btr1;
255 u32 is_config_mode;
256
257 /* Check whether Xilinx CAN is in configuration mode.
258 * It cannot set bit timing if Xilinx CAN is not in configuration mode.
259 */
260 is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
261 XCAN_SR_CONFIG_MASK;
262 if (!is_config_mode) {
263 netdev_alert(ndev,
264 "BUG! Cannot set bittiming - CAN is not in config mode\n");
265 return -EPERM;
266 }
267
268 /* Setting Baud Rate prescalar value in BRPR Register */
269 btr0 = (bt->brp - 1);
270
271 /* Setting Time Segment 1 in BTR Register */
272 btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
273
274 /* Setting Time Segment 2 in BTR Register */
275 btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT;
276
277 /* Setting Synchronous jump width in BTR Register */
278 btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT;
279
280 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
281 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
282
283 netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
284 priv->read_reg(priv, XCAN_BRPR_OFFSET),
285 priv->read_reg(priv, XCAN_BTR_OFFSET));
286
287 return 0;
288}
289
290/**
291 * xcan_chip_start - This the drivers start routine
292 * @ndev: Pointer to net_device structure
293 *
294 * This is the drivers start routine.
295 * Based on the State of the CAN device it puts
296 * the CAN device into a proper mode.
297 *
298 * Return: 0 on success and failure value on error
299 */
300static int xcan_chip_start(struct net_device *ndev)
301{
302 struct xcan_priv *priv = netdev_priv(ndev);
303 u32 err, reg_msr, reg_sr_mask;
304 unsigned long timeout;
305
306 /* Check if it is in reset mode */
307 err = set_reset_mode(ndev);
308 if (err < 0)
309 return err;
310
311 err = xcan_set_bittiming(ndev);
312 if (err < 0)
313 return err;
314
315 /* Enable interrupts */
316 priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL);
317
318 /* Check whether it is loopback mode or normal mode */
319 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
320 reg_msr = XCAN_MSR_LBACK_MASK;
321 reg_sr_mask = XCAN_SR_LBACK_MASK;
322 } else {
323 reg_msr = 0x0;
324 reg_sr_mask = XCAN_SR_NORMAL_MASK;
325 }
326
327 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
328 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
329
330 timeout = jiffies + XCAN_TIMEOUT;
331 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
332 if (time_after(jiffies, timeout)) {
333 netdev_warn(ndev,
334 "timed out for correct mode\n");
335 return -ETIMEDOUT;
336 }
337 }
338 netdev_dbg(ndev, "status:#x%08x\n",
339 priv->read_reg(priv, XCAN_SR_OFFSET));
340
341 priv->can.state = CAN_STATE_ERROR_ACTIVE;
342 return 0;
343}
344
345/**
346 * xcan_do_set_mode - This sets the mode of the driver
347 * @ndev: Pointer to net_device structure
348 * @mode: Tells the mode of the driver
349 *
350 * This check the drivers state and calls the
351 * the corresponding modes to set.
352 *
353 * Return: 0 on success and failure value on error
354 */
355static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
356{
357 int ret;
358
359 switch (mode) {
360 case CAN_MODE_START:
361 ret = xcan_chip_start(ndev);
362 if (ret < 0) {
363 netdev_err(ndev, "xcan_chip_start failed!\n");
364 return ret;
365 }
366 netif_wake_queue(ndev);
367 break;
368 default:
369 ret = -EOPNOTSUPP;
370 break;
371 }
372
373 return ret;
374}
375
376/**
377 * xcan_start_xmit - Starts the transmission
378 * @skb: sk_buff pointer that contains data to be Txed
379 * @ndev: Pointer to net_device structure
380 *
381 * This function is invoked from upper layers to initiate transmission. This
382 * function uses the next available free txbuff and populates their fields to
383 * start the transmission.
384 *
385 * Return: 0 on success and failure value on error
386 */
387static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
388{
389 struct xcan_priv *priv = netdev_priv(ndev);
390 struct net_device_stats *stats = &ndev->stats;
391 struct can_frame *cf = (struct can_frame *)skb->data;
392 u32 id, dlc, data[2] = {0, 0};
393
394 if (can_dropped_invalid_skb(ndev, skb))
395 return NETDEV_TX_OK;
396
397 /* Check if the TX buffer is full */
398 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
399 XCAN_SR_TXFLL_MASK)) {
400 netif_stop_queue(ndev);
401 netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n");
402 return NETDEV_TX_BUSY;
403 }
404
405 /* Watch carefully on the bit sequence */
406 if (cf->can_id & CAN_EFF_FLAG) {
407 /* Extended CAN ID format */
408 id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
409 XCAN_IDR_ID2_MASK;
410 id |= (((cf->can_id & CAN_EFF_MASK) >>
411 (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
412 XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
413
414 /* The substibute remote TX request bit should be "1"
415 * for extended frames as in the Xilinx CAN datasheet
416 */
417 id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
418
419 if (cf->can_id & CAN_RTR_FLAG)
420 /* Extended frames remote TX request */
421 id |= XCAN_IDR_RTR_MASK;
422 } else {
423 /* Standard CAN ID format */
424 id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
425 XCAN_IDR_ID1_MASK;
426
427 if (cf->can_id & CAN_RTR_FLAG)
428 /* Standard frames remote TX request */
429 id |= XCAN_IDR_SRR_MASK;
430 }
431
432 dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
433
434 if (cf->can_dlc > 0)
435 data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
436 if (cf->can_dlc > 4)
437 data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
438
439 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
440 priv->tx_head++;
441
442 /* Write the Frame to Xilinx CAN TX FIFO */
443 priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id);
444 /* If the CAN frame is RTR frame this write triggers tranmission */
445 priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc);
446 if (!(cf->can_id & CAN_RTR_FLAG)) {
447 priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]);
448 /* If the CAN frame is Standard/Extended frame this
449 * write triggers tranmission
450 */
451 priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]);
452 stats->tx_bytes += cf->can_dlc;
453 }
454
455 /* Check if the TX buffer is full */
456 if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
457 netif_stop_queue(ndev);
458
459 return NETDEV_TX_OK;
460}
461
462/**
463 * xcan_rx - Is called from CAN isr to complete the received
464 * frame processing
465 * @ndev: Pointer to net_device structure
466 *
467 * This function is invoked from the CAN isr(poll) to process the Rx frames. It
468 * does minimal processing and invokes "netif_receive_skb" to complete further
469 * processing.
470 * Return: 1 on success and 0 on failure.
471 */
472static int xcan_rx(struct net_device *ndev)
473{
474 struct xcan_priv *priv = netdev_priv(ndev);
475 struct net_device_stats *stats = &ndev->stats;
476 struct can_frame *cf;
477 struct sk_buff *skb;
478 u32 id_xcan, dlc, data[2] = {0, 0};
479
480 skb = alloc_can_skb(ndev, &cf);
481 if (unlikely(!skb)) {
482 stats->rx_dropped++;
483 return 0;
484 }
485
486 /* Read a frame from Xilinx zynq CANPS */
487 id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET);
488 dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >>
489 XCAN_DLCR_DLC_SHIFT;
490
491 /* Change Xilinx CAN data length format to socketCAN data format */
492 cf->can_dlc = get_can_dlc(dlc);
493
494 /* Change Xilinx CAN ID format to socketCAN ID format */
495 if (id_xcan & XCAN_IDR_IDE_MASK) {
496 /* The received frame is an Extended format frame */
497 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
498 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
499 XCAN_IDR_ID2_SHIFT;
500 cf->can_id |= CAN_EFF_FLAG;
501 if (id_xcan & XCAN_IDR_RTR_MASK)
502 cf->can_id |= CAN_RTR_FLAG;
503 } else {
504 /* The received frame is a standard format frame */
505 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
506 XCAN_IDR_ID1_SHIFT;
507 if (id_xcan & XCAN_IDR_SRR_MASK)
508 cf->can_id |= CAN_RTR_FLAG;
509 }
510
511 if (!(id_xcan & XCAN_IDR_SRR_MASK)) {
512 data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
513 data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
514
515 /* Change Xilinx CAN data format to socketCAN data format */
516 if (cf->can_dlc > 0)
517 *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
518 if (cf->can_dlc > 4)
519 *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
520 }
521
522 stats->rx_bytes += cf->can_dlc;
523 stats->rx_packets++;
524 netif_receive_skb(skb);
525
526 return 1;
527}
528
529/**
530 * xcan_err_interrupt - error frame Isr
531 * @ndev: net_device pointer
532 * @isr: interrupt status register value
533 *
534 * This is the CAN error interrupt and it will
535 * check the the type of error and forward the error
536 * frame to upper layers.
537 */
538static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
539{
540 struct xcan_priv *priv = netdev_priv(ndev);
541 struct net_device_stats *stats = &ndev->stats;
542 struct can_frame *cf;
543 struct sk_buff *skb;
544 u32 err_status, status, txerr = 0, rxerr = 0;
545
546 skb = alloc_can_err_skb(ndev, &cf);
547
548 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
549 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
550 txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
551 rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
552 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
553 status = priv->read_reg(priv, XCAN_SR_OFFSET);
554
555 if (isr & XCAN_IXR_BSOFF_MASK) {
556 priv->can.state = CAN_STATE_BUS_OFF;
557 priv->can.can_stats.bus_off++;
558 /* Leave device in Config Mode in bus-off state */
559 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
560 can_bus_off(ndev);
561 if (skb)
562 cf->can_id |= CAN_ERR_BUSOFF;
563 } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
564 priv->can.state = CAN_STATE_ERROR_PASSIVE;
565 priv->can.can_stats.error_passive++;
566 if (skb) {
567 cf->can_id |= CAN_ERR_CRTL;
568 cf->data[1] = (rxerr > 127) ?
569 CAN_ERR_CRTL_RX_PASSIVE :
570 CAN_ERR_CRTL_TX_PASSIVE;
571 cf->data[6] = txerr;
572 cf->data[7] = rxerr;
573 }
574 } else if (status & XCAN_SR_ERRWRN_MASK) {
575 priv->can.state = CAN_STATE_ERROR_WARNING;
576 priv->can.can_stats.error_warning++;
577 if (skb) {
578 cf->can_id |= CAN_ERR_CRTL;
579 cf->data[1] |= (txerr > rxerr) ?
580 CAN_ERR_CRTL_TX_WARNING :
581 CAN_ERR_CRTL_RX_WARNING;
582 cf->data[6] = txerr;
583 cf->data[7] = rxerr;
584 }
585 }
586
587 /* Check for Arbitration lost interrupt */
588 if (isr & XCAN_IXR_ARBLST_MASK) {
589 priv->can.can_stats.arbitration_lost++;
590 if (skb) {
591 cf->can_id |= CAN_ERR_LOSTARB;
592 cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
593 }
594 }
595
596 /* Check for RX FIFO Overflow interrupt */
597 if (isr & XCAN_IXR_RXOFLW_MASK) {
598 stats->rx_over_errors++;
599 stats->rx_errors++;
600 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
601 if (skb) {
602 cf->can_id |= CAN_ERR_CRTL;
603 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
604 }
605 }
606
607 /* Check for error interrupt */
608 if (isr & XCAN_IXR_ERROR_MASK) {
609 if (skb) {
610 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
611 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
612 }
613
614 /* Check for Ack error interrupt */
615 if (err_status & XCAN_ESR_ACKER_MASK) {
616 stats->tx_errors++;
617 if (skb) {
618 cf->can_id |= CAN_ERR_ACK;
619 cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
620 }
621 }
622
623 /* Check for Bit error interrupt */
624 if (err_status & XCAN_ESR_BERR_MASK) {
625 stats->tx_errors++;
626 if (skb) {
627 cf->can_id |= CAN_ERR_PROT;
628 cf->data[2] = CAN_ERR_PROT_BIT;
629 }
630 }
631
632 /* Check for Stuff error interrupt */
633 if (err_status & XCAN_ESR_STER_MASK) {
634 stats->rx_errors++;
635 if (skb) {
636 cf->can_id |= CAN_ERR_PROT;
637 cf->data[2] = CAN_ERR_PROT_STUFF;
638 }
639 }
640
641 /* Check for Form error interrupt */
642 if (err_status & XCAN_ESR_FMER_MASK) {
643 stats->rx_errors++;
644 if (skb) {
645 cf->can_id |= CAN_ERR_PROT;
646 cf->data[2] = CAN_ERR_PROT_FORM;
647 }
648 }
649
650 /* Check for CRC error interrupt */
651 if (err_status & XCAN_ESR_CRCER_MASK) {
652 stats->rx_errors++;
653 if (skb) {
654 cf->can_id |= CAN_ERR_PROT;
655 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ |
656 CAN_ERR_PROT_LOC_CRC_DEL;
657 }
658 }
659 priv->can.can_stats.bus_error++;
660 }
661
662 if (skb) {
663 stats->rx_packets++;
664 stats->rx_bytes += cf->can_dlc;
665 netif_rx(skb);
666 }
667
668 netdev_dbg(ndev, "%s: error status register:0x%x\n",
669 __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
670}
671
672/**
673 * xcan_state_interrupt - It will check the state of the CAN device
674 * @ndev: net_device pointer
675 * @isr: interrupt status register value
676 *
677 * This will checks the state of the CAN device
678 * and puts the device into appropriate state.
679 */
680static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
681{
682 struct xcan_priv *priv = netdev_priv(ndev);
683
684 /* Check for Sleep interrupt if set put CAN device in sleep state */
685 if (isr & XCAN_IXR_SLP_MASK)
686 priv->can.state = CAN_STATE_SLEEPING;
687
688 /* Check for Wake up interrupt if set put CAN device in Active state */
689 if (isr & XCAN_IXR_WKUP_MASK)
690 priv->can.state = CAN_STATE_ERROR_ACTIVE;
691}
692
693/**
694 * xcan_rx_poll - Poll routine for rx packets (NAPI)
695 * @napi: napi structure pointer
696 * @quota: Max number of rx packets to be processed.
697 *
698 * This is the poll routine for rx part.
699 * It will process the packets maximux quota value.
700 *
701 * Return: number of packets received
702 */
703static int xcan_rx_poll(struct napi_struct *napi, int quota)
704{
705 struct net_device *ndev = napi->dev;
706 struct xcan_priv *priv = netdev_priv(ndev);
707 u32 isr, ier;
708 int work_done = 0;
709
710 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
711 while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
712 if (isr & XCAN_IXR_RXOK_MASK) {
713 priv->write_reg(priv, XCAN_ICR_OFFSET,
714 XCAN_IXR_RXOK_MASK);
715 work_done += xcan_rx(ndev);
716 } else {
717 priv->write_reg(priv, XCAN_ICR_OFFSET,
718 XCAN_IXR_RXNEMP_MASK);
719 break;
720 }
721 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
722 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
723 }
724
725 if (work_done)
726 can_led_event(ndev, CAN_LED_EVENT_RX);
727
728 if (work_done < quota) {
729 napi_complete(napi);
730 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
731 ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
732 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
733 }
734 return work_done;
735}
736
737/**
738 * xcan_tx_interrupt - Tx Done Isr
739 * @ndev: net_device pointer
740 * @isr: Interrupt status register value
741 */
742static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
743{
744 struct xcan_priv *priv = netdev_priv(ndev);
745 struct net_device_stats *stats = &ndev->stats;
746
747 while ((priv->tx_head - priv->tx_tail > 0) &&
748 (isr & XCAN_IXR_TXOK_MASK)) {
749 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
750 can_get_echo_skb(ndev, priv->tx_tail %
751 priv->tx_max);
752 priv->tx_tail++;
753 stats->tx_packets++;
754 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
755 }
756 can_led_event(ndev, CAN_LED_EVENT_TX);
757 netif_wake_queue(ndev);
758}
759
760/**
761 * xcan_interrupt - CAN Isr
762 * @irq: irq number
763 * @dev_id: device id poniter
764 *
765 * This is the xilinx CAN Isr. It checks for the type of interrupt
766 * and invokes the corresponding ISR.
767 *
768 * Return:
769 * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
770 */
771static irqreturn_t xcan_interrupt(int irq, void *dev_id)
772{
773 struct net_device *ndev = (struct net_device *)dev_id;
774 struct xcan_priv *priv = netdev_priv(ndev);
775 u32 isr, ier;
776
777 /* Get the interrupt status from Xilinx CAN */
778 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
779 if (!isr)
780 return IRQ_NONE;
781
782 /* Check for the type of interrupt and Processing it */
783 if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
784 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
785 XCAN_IXR_WKUP_MASK));
786 xcan_state_interrupt(ndev, isr);
787 }
788
789 /* Check for Tx interrupt and Processing it */
790 if (isr & XCAN_IXR_TXOK_MASK)
791 xcan_tx_interrupt(ndev, isr);
792
793 /* Check for the type of error interrupt and Processing it */
794 if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
795 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
796 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
797 XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
798 XCAN_IXR_ARBLST_MASK));
799 xcan_err_interrupt(ndev, isr);
800 }
801
802 /* Check for the type of receive interrupt and Processing it */
803 if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
804 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
805 ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
806 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
807 napi_schedule(&priv->napi);
808 }
809 return IRQ_HANDLED;
810}
811
812/**
813 * xcan_chip_stop - Driver stop routine
814 * @ndev: Pointer to net_device structure
815 *
816 * This is the drivers stop routine. It will disable the
817 * interrupts and put the device into configuration mode.
818 */
819static void xcan_chip_stop(struct net_device *ndev)
820{
821 struct xcan_priv *priv = netdev_priv(ndev);
822 u32 ier;
823
824 /* Disable interrupts and leave the can in configuration mode */
825 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
826 ier &= ~XCAN_INTR_ALL;
827 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
828 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
829 priv->can.state = CAN_STATE_STOPPED;
830}
831
832/**
833 * xcan_open - Driver open routine
834 * @ndev: Pointer to net_device structure
835 *
836 * This is the driver open routine.
837 * Return: 0 on success and failure value on error
838 */
839static int xcan_open(struct net_device *ndev)
840{
841 struct xcan_priv *priv = netdev_priv(ndev);
842 int ret;
843
844 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
845 ndev->name, ndev);
846 if (ret < 0) {
847 netdev_err(ndev, "irq allocation for CAN failed\n");
848 goto err;
849 }
850
851 ret = clk_prepare_enable(priv->can_clk);
852 if (ret) {
853 netdev_err(ndev, "unable to enable device clock\n");
854 goto err_irq;
855 }
856
857 ret = clk_prepare_enable(priv->bus_clk);
858 if (ret) {
859 netdev_err(ndev, "unable to enable bus clock\n");
860 goto err_can_clk;
861 }
862
863 /* Set chip into reset mode */
864 ret = set_reset_mode(ndev);
865 if (ret < 0) {
866 netdev_err(ndev, "mode resetting failed!\n");
867 goto err_bus_clk;
868 }
869
870 /* Common open */
871 ret = open_candev(ndev);
872 if (ret)
873 goto err_bus_clk;
874
875 ret = xcan_chip_start(ndev);
876 if (ret < 0) {
877 netdev_err(ndev, "xcan_chip_start failed!\n");
878 goto err_candev;
879 }
880
881 can_led_event(ndev, CAN_LED_EVENT_OPEN);
882 napi_enable(&priv->napi);
883 netif_start_queue(ndev);
884
885 return 0;
886
887err_candev:
888 close_candev(ndev);
889err_bus_clk:
890 clk_disable_unprepare(priv->bus_clk);
891err_can_clk:
892 clk_disable_unprepare(priv->can_clk);
893err_irq:
894 free_irq(ndev->irq, ndev);
895err:
896 return ret;
897}
898
899/**
900 * xcan_close - Driver close routine
901 * @ndev: Pointer to net_device structure
902 *
903 * Return: 0 always
904 */
905static int xcan_close(struct net_device *ndev)
906{
907 struct xcan_priv *priv = netdev_priv(ndev);
908
909 netif_stop_queue(ndev);
910 napi_disable(&priv->napi);
911 xcan_chip_stop(ndev);
912 clk_disable_unprepare(priv->bus_clk);
913 clk_disable_unprepare(priv->can_clk);
914 free_irq(ndev->irq, ndev);
915 close_candev(ndev);
916
917 can_led_event(ndev, CAN_LED_EVENT_STOP);
918
919 return 0;
920}
921
922/**
923 * xcan_get_berr_counter - error counter routine
924 * @ndev: Pointer to net_device structure
925 * @bec: Pointer to can_berr_counter structure
926 *
927 * This is the driver error counter routine.
928 * Return: 0 on success and failure value on error
929 */
930static int xcan_get_berr_counter(const struct net_device *ndev,
931 struct can_berr_counter *bec)
932{
933 struct xcan_priv *priv = netdev_priv(ndev);
934 int ret;
935
936 ret = clk_prepare_enable(priv->can_clk);
937 if (ret)
938 goto err;
939
940 ret = clk_prepare_enable(priv->bus_clk);
941 if (ret)
942 goto err_clk;
943
944 bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
945 bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
946 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
947
948 clk_disable_unprepare(priv->bus_clk);
949 clk_disable_unprepare(priv->can_clk);
950
951 return 0;
952
953err_clk:
954 clk_disable_unprepare(priv->can_clk);
955err:
956 return ret;
957}
958
959
960static const struct net_device_ops xcan_netdev_ops = {
961 .ndo_open = xcan_open,
962 .ndo_stop = xcan_close,
963 .ndo_start_xmit = xcan_start_xmit,
964};
965
966/**
967 * xcan_suspend - Suspend method for the driver
968 * @dev: Address of the platform_device structure
969 *
970 * Put the driver into low power mode.
971 * Return: 0 always
972 */
973static int __maybe_unused xcan_suspend(struct device *dev)
974{
975 struct platform_device *pdev = dev_get_drvdata(dev);
976 struct net_device *ndev = platform_get_drvdata(pdev);
977 struct xcan_priv *priv = netdev_priv(ndev);
978
979 if (netif_running(ndev)) {
980 netif_stop_queue(ndev);
981 netif_device_detach(ndev);
982 }
983
984 priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
985 priv->can.state = CAN_STATE_SLEEPING;
986
987 clk_disable(priv->bus_clk);
988 clk_disable(priv->can_clk);
989
990 return 0;
991}
992
993/**
994 * xcan_resume - Resume from suspend
995 * @dev: Address of the platformdevice structure
996 *
997 * Resume operation after suspend.
998 * Return: 0 on success and failure value on error
999 */
1000static int __maybe_unused xcan_resume(struct device *dev)
1001{
1002 struct platform_device *pdev = dev_get_drvdata(dev);
1003 struct net_device *ndev = platform_get_drvdata(pdev);
1004 struct xcan_priv *priv = netdev_priv(ndev);
1005 int ret;
1006
1007 ret = clk_enable(priv->bus_clk);
1008 if (ret) {
1009 dev_err(dev, "Cannot enable clock.\n");
1010 return ret;
1011 }
1012 ret = clk_enable(priv->can_clk);
1013 if (ret) {
1014 dev_err(dev, "Cannot enable clock.\n");
1015 clk_disable_unprepare(priv->bus_clk);
1016 return ret;
1017 }
1018
1019 priv->write_reg(priv, XCAN_MSR_OFFSET, 0);
1020 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
1021 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1022
1023 if (netif_running(ndev)) {
1024 netif_device_attach(ndev);
1025 netif_start_queue(ndev);
1026 }
1027
1028 return 0;
1029}
1030
1031static SIMPLE_DEV_PM_OPS(xcan_dev_pm_ops, xcan_suspend, xcan_resume);
1032
1033/**
1034 * xcan_probe - Platform registration call
1035 * @pdev: Handle to the platform device structure
1036 *
1037 * This function does all the memory allocation and registration for the CAN
1038 * device.
1039 *
1040 * Return: 0 on success and failure value on error
1041 */
1042static int xcan_probe(struct platform_device *pdev)
1043{
1044 struct resource *res; /* IO mem resources */
1045 struct net_device *ndev;
1046 struct xcan_priv *priv;
1047 void __iomem *addr;
1048 int ret, rx_max, tx_max;
1049
1050 /* Get the virtual base address for the device */
1051 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1052 addr = devm_ioremap_resource(&pdev->dev, res);
1053 if (IS_ERR(addr)) {
1054 ret = PTR_ERR(addr);
1055 goto err;
1056 }
1057
1058 ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
1059 if (ret < 0)
1060 goto err;
1061
1062 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max);
1063 if (ret < 0)
1064 goto err;
1065
1066 /* Create a CAN device instance */
1067 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1068 if (!ndev)
1069 return -ENOMEM;
1070
1071 priv = netdev_priv(ndev);
1072 priv->dev = ndev;
1073 priv->can.bittiming_const = &xcan_bittiming_const;
1074 priv->can.do_set_mode = xcan_do_set_mode;
1075 priv->can.do_get_berr_counter = xcan_get_berr_counter;
1076 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1077 CAN_CTRLMODE_BERR_REPORTING;
1078 priv->reg_base = addr;
1079 priv->tx_max = tx_max;
1080
1081 /* Get IRQ for the device */
1082 ndev->irq = platform_get_irq(pdev, 0);
1083 ndev->flags |= IFF_ECHO; /* We support local echo */
1084
1085 platform_set_drvdata(pdev, ndev);
1086 SET_NETDEV_DEV(ndev, &pdev->dev);
1087 ndev->netdev_ops = &xcan_netdev_ops;
1088
1089 /* Getting the CAN can_clk info */
1090 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
1091 if (IS_ERR(priv->can_clk)) {
1092 dev_err(&pdev->dev, "Device clock not found.\n");
1093 ret = PTR_ERR(priv->can_clk);
1094 goto err_free;
1095 }
1096 /* Check for type of CAN device */
1097 if (of_device_is_compatible(pdev->dev.of_node,
1098 "xlnx,zynq-can-1.0")) {
1099 priv->bus_clk = devm_clk_get(&pdev->dev, "pclk");
1100 if (IS_ERR(priv->bus_clk)) {
1101 dev_err(&pdev->dev, "bus clock not found\n");
1102 ret = PTR_ERR(priv->bus_clk);
1103 goto err_free;
1104 }
1105 } else {
1106 priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
1107 if (IS_ERR(priv->bus_clk)) {
1108 dev_err(&pdev->dev, "bus clock not found\n");
1109 ret = PTR_ERR(priv->bus_clk);
1110 goto err_free;
1111 }
1112 }
1113
1114 ret = clk_prepare_enable(priv->can_clk);
1115 if (ret) {
1116 dev_err(&pdev->dev, "unable to enable device clock\n");
1117 goto err_free;
1118 }
1119
1120 ret = clk_prepare_enable(priv->bus_clk);
1121 if (ret) {
1122 dev_err(&pdev->dev, "unable to enable bus clock\n");
1123 goto err_unprepare_disable_dev;
1124 }
1125
1126 priv->write_reg = xcan_write_reg_le;
1127 priv->read_reg = xcan_read_reg_le;
1128
1129 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
1130 priv->write_reg = xcan_write_reg_be;
1131 priv->read_reg = xcan_read_reg_be;
1132 }
1133
1134 priv->can.clock.freq = clk_get_rate(priv->can_clk);
1135
1136 netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
1137
1138 ret = register_candev(ndev);
1139 if (ret) {
1140 dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
1141 goto err_unprepare_disable_busclk;
1142 }
1143
1144 devm_can_led_init(ndev);
1145 clk_disable_unprepare(priv->bus_clk);
1146 clk_disable_unprepare(priv->can_clk);
1147 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
1148 priv->reg_base, ndev->irq, priv->can.clock.freq,
1149 priv->tx_max);
1150
1151 return 0;
1152
1153err_unprepare_disable_busclk:
1154 clk_disable_unprepare(priv->bus_clk);
1155err_unprepare_disable_dev:
1156 clk_disable_unprepare(priv->can_clk);
1157err_free:
1158 free_candev(ndev);
1159err:
1160 return ret;
1161}
1162
1163/**
1164 * xcan_remove - Unregister the device after releasing the resources
1165 * @pdev: Handle to the platform device structure
1166 *
1167 * This function frees all the resources allocated to the device.
1168 * Return: 0 always
1169 */
1170static int xcan_remove(struct platform_device *pdev)
1171{
1172 struct net_device *ndev = platform_get_drvdata(pdev);
1173 struct xcan_priv *priv = netdev_priv(ndev);
1174
1175 if (set_reset_mode(ndev) < 0)
1176 netdev_err(ndev, "mode resetting failed!\n");
1177
1178 unregister_candev(ndev);
1179 netif_napi_del(&priv->napi);
1180 free_candev(ndev);
1181
1182 return 0;
1183}
1184
1185/* Match table for OF platform binding */
1186static struct of_device_id xcan_of_match[] = {
1187 { .compatible = "xlnx,zynq-can-1.0", },
1188 { .compatible = "xlnx,axi-can-1.00.a", },
1189 { /* end of list */ },
1190};
1191MODULE_DEVICE_TABLE(of, xcan_of_match);
1192
1193static struct platform_driver xcan_driver = {
1194 .probe = xcan_probe,
1195 .remove = xcan_remove,
1196 .driver = {
1197 .owner = THIS_MODULE,
1198 .name = DRIVER_NAME,
1199 .pm = &xcan_dev_pm_ops,
1200 .of_match_table = xcan_of_match,
1201 },
1202};
1203
1204module_platform_driver(xcan_driver);
1205
1206MODULE_LICENSE("GPL");
1207MODULE_AUTHOR("Xilinx Inc");
1208MODULE_DESCRIPTION("Xilinx CAN interface");
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index 41ee5b6ae917..69c42513dd72 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -289,7 +289,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
289 289
290static int mv88e6123_61_65_setup(struct dsa_switch *ds) 290static int mv88e6123_61_65_setup(struct dsa_switch *ds)
291{ 291{
292 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 292 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
293 int i; 293 int i;
294 int ret; 294 int ret;
295 295
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index dadfafba64e9..953bc6a49e59 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -155,7 +155,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
155 155
156static int mv88e6131_setup_port(struct dsa_switch *ds, int p) 156static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
157{ 157{
158 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 158 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
159 int addr = REG_PORT(p); 159 int addr = REG_PORT(p);
160 u16 val; 160 u16 val;
161 161
@@ -274,7 +274,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
274 274
275static int mv88e6131_setup(struct dsa_switch *ds) 275static int mv88e6131_setup(struct dsa_switch *ds)
276{ 276{
277 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 277 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
278 int i; 278 int i;
279 int ret; 279 int ret;
280 280
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 17314ed9456d..9ce2146346b6 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -74,7 +74,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
74 74
75int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg) 75int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
76{ 76{
77 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 77 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
78 int ret; 78 int ret;
79 79
80 mutex_lock(&ps->smi_mutex); 80 mutex_lock(&ps->smi_mutex);
@@ -118,7 +118,7 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
118 118
119int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) 119int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
120{ 120{
121 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 121 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
122 int ret; 122 int ret;
123 123
124 mutex_lock(&ps->smi_mutex); 124 mutex_lock(&ps->smi_mutex);
@@ -256,7 +256,7 @@ static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
256 256
257static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds) 257static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
258{ 258{
259 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 259 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
260 int ret; 260 int ret;
261 261
262 mutex_lock(&ps->ppu_mutex); 262 mutex_lock(&ps->ppu_mutex);
@@ -283,7 +283,7 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
283 283
284static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds) 284static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
285{ 285{
286 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 286 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
287 287
288 /* Schedule a timer to re-enable the PHY polling unit. */ 288 /* Schedule a timer to re-enable the PHY polling unit. */
289 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10)); 289 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
@@ -292,7 +292,7 @@ static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
292 292
293void mv88e6xxx_ppu_state_init(struct dsa_switch *ds) 293void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
294{ 294{
295 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 295 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
296 296
297 mutex_init(&ps->ppu_mutex); 297 mutex_init(&ps->ppu_mutex);
298 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work); 298 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
@@ -463,7 +463,7 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
463 int nr_stats, struct mv88e6xxx_hw_stat *stats, 463 int nr_stats, struct mv88e6xxx_hw_stat *stats,
464 int port, uint64_t *data) 464 int port, uint64_t *data)
465{ 465{
466 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 466 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
467 int ret; 467 int ret;
468 int i; 468 int i;
469 469
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 35df0b9e6848..a968654b631d 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -534,7 +534,7 @@ static int el3_common_init(struct net_device *dev)
534 /* The EL3-specific entries in the device structure. */ 534 /* The EL3-specific entries in the device structure. */
535 dev->netdev_ops = &netdev_ops; 535 dev->netdev_ops = &netdev_ops;
536 dev->watchdog_timeo = TX_TIMEOUT; 536 dev->watchdog_timeo = TX_TIMEOUT;
537 SET_ETHTOOL_OPS(dev, &ethtool_ops); 537 dev->ethtool_ops = &ethtool_ops;
538 538
539 err = register_netdev(dev); 539 err = register_netdev(dev);
540 if (err) { 540 if (err) {
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 063557e037f2..f18647c23559 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -218,7 +218,7 @@ static int tc589_probe(struct pcmcia_device *link)
218 dev->netdev_ops = &el3_netdev_ops; 218 dev->netdev_ops = &el3_netdev_ops;
219 dev->watchdog_timeo = TX_TIMEOUT; 219 dev->watchdog_timeo = TX_TIMEOUT;
220 220
221 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 221 dev->ethtool_ops = &netdev_ethtool_ops;
222 222
223 return tc589_config(link); 223 return tc589_config(link);
224} 224}
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 465cc7108d8a..e13b04624ded 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2435,7 +2435,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2435 netif_napi_add(dev, &tp->napi, typhoon_poll, 16); 2435 netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2436 dev->watchdog_timeo = TX_TIMEOUT; 2436 dev->watchdog_timeo = TX_TIMEOUT;
2437 2437
2438 SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops); 2438 dev->ethtool_ops = &typhoon_ethtool_ops;
2439 2439
2440 /* We can handle scatter gather, up to 16 entries, and 2440 /* We can handle scatter gather, up to 16 entries, and
2441 * we can do IP checksumming (only version 4, doh...) 2441 * we can do IP checksumming (only version 4, doh...)
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 455d4c399b52..1d162ccb4733 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -157,7 +157,7 @@ static void ax_reset_8390(struct net_device *dev)
157 157
158 /* This check _should_not_ be necessary, omit eventually. */ 158 /* This check _should_not_ be necessary, omit eventually. */
159 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { 159 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
160 if (jiffies - reset_start_time > 2 * HZ / 100) { 160 if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
161 netdev_warn(dev, "%s: did not complete.\n", __func__); 161 netdev_warn(dev, "%s: did not complete.\n", __func__);
162 break; 162 break;
163 } 163 }
@@ -293,7 +293,7 @@ static void ax_block_output(struct net_device *dev, int count,
293 dma_start = jiffies; 293 dma_start = jiffies;
294 294
295 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { 295 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
296 if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */ 296 if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
297 netdev_warn(dev, "timeout waiting for Tx RDC.\n"); 297 netdev_warn(dev, "timeout waiting for Tx RDC.\n");
298 ax_reset_8390(dev); 298 ax_reset_8390(dev);
299 ax_NS8390_init(dev, 1); 299 ax_NS8390_init(dev, 1);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 051349458462..edb718661850 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -68,6 +68,7 @@ source "drivers/net/ethernet/neterion/Kconfig"
68source "drivers/net/ethernet/faraday/Kconfig" 68source "drivers/net/ethernet/faraday/Kconfig"
69source "drivers/net/ethernet/freescale/Kconfig" 69source "drivers/net/ethernet/freescale/Kconfig"
70source "drivers/net/ethernet/fujitsu/Kconfig" 70source "drivers/net/ethernet/fujitsu/Kconfig"
71source "drivers/net/ethernet/hisilicon/Kconfig"
71source "drivers/net/ethernet/hp/Kconfig" 72source "drivers/net/ethernet/hp/Kconfig"
72source "drivers/net/ethernet/ibm/Kconfig" 73source "drivers/net/ethernet/ibm/Kconfig"
73source "drivers/net/ethernet/intel/Kconfig" 74source "drivers/net/ethernet/intel/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 35190e36c456..58de3339ab3c 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
31obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/ 31obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
32obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/ 32obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
33obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/ 33obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
34obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
34obj-$(CONFIG_NET_VENDOR_HP) += hp/ 35obj-$(CONFIG_NET_VENDOR_HP) += hp/
35obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ 36obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
36obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ 37obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 171d73c1d3c2..40dbbf740331 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -784,7 +784,7 @@ static int starfire_init_one(struct pci_dev *pdev,
784 784
785 dev->netdev_ops = &netdev_ops; 785 dev->netdev_ops = &netdev_ops;
786 dev->watchdog_timeo = TX_TIMEOUT; 786 dev->watchdog_timeo = TX_TIMEOUT;
787 SET_ETHTOOL_OPS(dev, &ethtool_ops); 787 dev->ethtool_ops = &ethtool_ops;
788 788
789 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work); 789 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
790 790
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 1517e9df5ba1..9a6991be9749 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -476,7 +476,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
476 dev->watchdog_timeo = 5*HZ; 476 dev->watchdog_timeo = 5*HZ;
477 477
478 dev->netdev_ops = &ace_netdev_ops; 478 dev->netdev_ops = &ace_netdev_ops;
479 SET_ETHTOOL_OPS(dev, &ace_ethtool_ops); 479 dev->ethtool_ops = &ace_ethtool_ops;
480 480
481 /* we only display this string ONCE */ 481 /* we only display this string ONCE */
482 if (!boards_found) 482 if (!boards_found)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 99cc56f451cf..580553d42d34 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -353,7 +353,6 @@ static int sgdma_async_read(struct altera_tse_private *priv)
353 353
354 struct sgdma_descrip __iomem *cdesc = &descbase[0]; 354 struct sgdma_descrip __iomem *cdesc = &descbase[0];
355 struct sgdma_descrip __iomem *ndesc = &descbase[1]; 355 struct sgdma_descrip __iomem *ndesc = &descbase[1];
356
357 struct tse_buffer *rxbuffer = NULL; 356 struct tse_buffer *rxbuffer = NULL;
358 357
359 if (!sgdma_rxbusy(priv)) { 358 if (!sgdma_rxbusy(priv)) {
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 54c25eff7952..be72e1e64525 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -271,5 +271,5 @@ static const struct ethtool_ops tse_ethtool_ops = {
271 271
272void altera_tse_set_ethtool_ops(struct net_device *netdev) 272void altera_tse_set_ethtool_ops(struct net_device *netdev)
273{ 273{
274 SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops); 274 netdev->ethtool_ops = &tse_ethtool_ops;
275} 275}
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 562df46e0a82..bbaf36d9f5e1 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -7,7 +7,7 @@ config NET_VENDOR_AMD
7 default y 7 default y
8 depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \ 8 depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
9 SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \ 9 SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
10 (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA 10 (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA || ARM64
11 ---help--- 11 ---help---
12 If you have a network (Ethernet) chipset belonging to this class, 12 If you have a network (Ethernet) chipset belonging to this class,
13 say Y. 13 say Y.
@@ -177,4 +177,16 @@ config SUNLANCE
177 To compile this driver as a module, choose M here: the module 177 To compile this driver as a module, choose M here: the module
178 will be called sunlance. 178 will be called sunlance.
179 179
180config AMD_XGBE
181 tristate "AMD 10GbE Ethernet driver"
182 depends on OF_NET
183 select PHYLIB
184 select AMD_XGBE_PHY
185 ---help---
186 This driver supports the AMD 10GbE Ethernet device found on an
187 AMD SoC.
188
189 To compile this driver as a module, choose M here: the module
190 will be called amd-xgbe.
191
180endif # NET_VENDOR_AMD 192endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index cdd4301a973d..a38a2dce3eb3 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_NI65) += ni65.o
17obj-$(CONFIG_PCNET32) += pcnet32.o 17obj-$(CONFIG_PCNET32) += pcnet32.o
18obj-$(CONFIG_SUN3LANCE) += sun3lance.o 18obj-$(CONFIG_SUN3LANCE) += sun3lance.o
19obj-$(CONFIG_SUNLANCE) += sunlance.o 19obj-$(CONFIG_SUNLANCE) += sunlance.o
20obj-$(CONFIG_AMD_XGBE) += xgbe/
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 26efaaa5e73f..068dc7cad5fa 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1900,7 +1900,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
1900 1900
1901 /* Initialize driver entry points */ 1901 /* Initialize driver entry points */
1902 dev->netdev_ops = &amd8111e_netdev_ops; 1902 dev->netdev_ops = &amd8111e_netdev_ops;
1903 SET_ETHTOOL_OPS(dev, &ops); 1903 dev->ethtool_ops = &ops;
1904 dev->irq =pdev->irq; 1904 dev->irq =pdev->irq;
1905 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; 1905 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1906 netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); 1906 netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index b08101b31b8b..968b7bfac8fc 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -718,7 +718,6 @@ static int ariadne_init_one(struct zorro_dev *z,
718 unsigned long mem_start = board + ARIADNE_RAM; 718 unsigned long mem_start = board + ARIADNE_RAM;
719 struct resource *r1, *r2; 719 struct resource *r1, *r2;
720 struct net_device *dev; 720 struct net_device *dev;
721 struct ariadne_private *priv;
722 u32 serial; 721 u32 serial;
723 int err; 722 int err;
724 723
@@ -738,8 +737,6 @@ static int ariadne_init_one(struct zorro_dev *z,
738 return -ENOMEM; 737 return -ENOMEM;
739 } 738 }
740 739
741 priv = netdev_priv(dev);
742
743 r1->name = dev->name; 740 r1->name = dev->name;
744 r2->name = dev->name; 741 r2->name = dev->name;
745 742
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index a2bd91e3d302..a78e4c136959 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1229,7 +1229,7 @@ static int au1000_probe(struct platform_device *pdev)
1229 dev->base_addr = base->start; 1229 dev->base_addr = base->start;
1230 dev->irq = irq; 1230 dev->irq = irq;
1231 dev->netdev_ops = &au1000_netdev_ops; 1231 dev->netdev_ops = &au1000_netdev_ops;
1232 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); 1232 dev->ethtool_ops = &au1000_ethtool_ops;
1233 dev->watchdog_timeo = ETH_TX_TIMEOUT; 1233 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1234 1234
1235 /* 1235 /*
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 47ce57c2c893..6c9de117ffc6 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -27,9 +27,9 @@
27 27
28#include "hplance.h" 28#include "hplance.h"
29 29
30/* We have 16834 bytes of RAM for the init block and buffers. This places 30/* We have 16392 bytes of RAM for the init block and buffers. This places
31 * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx 31 * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
32 * buffers and 2 Tx buffers. 32 * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
33 */ 33 */
34#define LANCE_LOG_TX_BUFFERS 1 34#define LANCE_LOG_TX_BUFFERS 1
35#define LANCE_LOG_RX_BUFFERS 3 35#define LANCE_LOG_RX_BUFFERS 3
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 0e8399dec054..0660ac5846bb 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -26,9 +26,9 @@
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/mvme147hw.h> 27#include <asm/mvme147hw.h>
28 28
29/* We have 16834 bytes of RAM for the init block and buffers. This places 29/* We have 32K of RAM for the init block and buffers. This places
30 * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx 30 * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
31 * buffers and 2 Tx buffers. 31 * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
32 */ 32 */
33#define LANCE_LOG_TX_BUFFERS 1 33#define LANCE_LOG_TX_BUFFERS 1
34#define LANCE_LOG_RX_BUFFERS 3 34#define LANCE_LOG_RX_BUFFERS 3
@@ -111,7 +111,7 @@ struct net_device * __init mvme147lance_probe(int unit)
111 dev->dev_addr); 111 dev->dev_addr);
112 112
113 lp = netdev_priv(dev); 113 lp = netdev_priv(dev);
114 lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */ 114 lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */
115 if (!lp->ram) { 115 if (!lp->ram) {
116 printk("%s: No memory for LANCE buffers\n", dev->name); 116 printk("%s: No memory for LANCE buffers\n", dev->name);
117 free_netdev(dev); 117 free_netdev(dev);
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 08569fe2b182..abf3b1581c82 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -457,7 +457,7 @@ static int nmclan_probe(struct pcmcia_device *link)
457 lp->tx_free_frames=AM2150_MAX_TX_FRAMES; 457 lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
458 458
459 dev->netdev_ops = &mace_netdev_ops; 459 dev->netdev_ops = &mace_netdev_ops;
460 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 460 dev->ethtool_ops = &netdev_ethtool_ops;
461 dev->watchdog_timeo = TX_TIMEOUT; 461 dev->watchdog_timeo = TX_TIMEOUT;
462 462
463 return nmclan_config(link); 463 return nmclan_config(link);
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
new file mode 100644
index 000000000000..26cf9af1642f
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -0,0 +1,6 @@
1obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
2
3amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
4 xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o
5
6amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
new file mode 100644
index 000000000000..bf462ee86f5c
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -0,0 +1,1007 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#ifndef __XGBE_COMMON_H__
118#define __XGBE_COMMON_H__
119
120/* DMA register offsets */
121#define DMA_MR 0x3000
122#define DMA_SBMR 0x3004
123#define DMA_ISR 0x3008
124#define DMA_AXIARCR 0x3010
125#define DMA_AXIAWCR 0x3018
126#define DMA_DSR0 0x3020
127#define DMA_DSR1 0x3024
128#define DMA_DSR2 0x3028
129#define DMA_DSR3 0x302c
130#define DMA_DSR4 0x3030
131
132/* DMA register entry bit positions and sizes */
133#define DMA_AXIARCR_DRC_INDEX 0
134#define DMA_AXIARCR_DRC_WIDTH 4
135#define DMA_AXIARCR_DRD_INDEX 4
136#define DMA_AXIARCR_DRD_WIDTH 2
137#define DMA_AXIARCR_TEC_INDEX 8
138#define DMA_AXIARCR_TEC_WIDTH 4
139#define DMA_AXIARCR_TED_INDEX 12
140#define DMA_AXIARCR_TED_WIDTH 2
141#define DMA_AXIARCR_THC_INDEX 16
142#define DMA_AXIARCR_THC_WIDTH 4
143#define DMA_AXIARCR_THD_INDEX 20
144#define DMA_AXIARCR_THD_WIDTH 2
145#define DMA_AXIAWCR_DWC_INDEX 0
146#define DMA_AXIAWCR_DWC_WIDTH 4
147#define DMA_AXIAWCR_DWD_INDEX 4
148#define DMA_AXIAWCR_DWD_WIDTH 2
149#define DMA_AXIAWCR_RPC_INDEX 8
150#define DMA_AXIAWCR_RPC_WIDTH 4
151#define DMA_AXIAWCR_RPD_INDEX 12
152#define DMA_AXIAWCR_RPD_WIDTH 2
153#define DMA_AXIAWCR_RHC_INDEX 16
154#define DMA_AXIAWCR_RHC_WIDTH 4
155#define DMA_AXIAWCR_RHD_INDEX 20
156#define DMA_AXIAWCR_RHD_WIDTH 2
157#define DMA_AXIAWCR_TDC_INDEX 24
158#define DMA_AXIAWCR_TDC_WIDTH 4
159#define DMA_AXIAWCR_TDD_INDEX 28
160#define DMA_AXIAWCR_TDD_WIDTH 2
161#define DMA_DSR0_RPS_INDEX 8
162#define DMA_DSR0_RPS_WIDTH 4
163#define DMA_DSR0_TPS_INDEX 12
164#define DMA_DSR0_TPS_WIDTH 4
165#define DMA_ISR_MACIS_INDEX 17
166#define DMA_ISR_MACIS_WIDTH 1
167#define DMA_ISR_MTLIS_INDEX 16
168#define DMA_ISR_MTLIS_WIDTH 1
169#define DMA_MR_SWR_INDEX 0
170#define DMA_MR_SWR_WIDTH 1
171#define DMA_SBMR_EAME_INDEX 11
172#define DMA_SBMR_EAME_WIDTH 1
173#define DMA_SBMR_UNDEF_INDEX 0
174#define DMA_SBMR_UNDEF_WIDTH 1
175
176/* DMA channel register offsets
177 * Multiple channels can be active. The first channel has registers
178 * that begin at 0x3100. Each subsequent channel has registers that
179 * are accessed using an offset of 0x80 from the previous channel.
180 */
181#define DMA_CH_BASE 0x3100
182#define DMA_CH_INC 0x80
183
184#define DMA_CH_CR 0x00
185#define DMA_CH_TCR 0x04
186#define DMA_CH_RCR 0x08
187#define DMA_CH_TDLR_HI 0x10
188#define DMA_CH_TDLR_LO 0x14
189#define DMA_CH_RDLR_HI 0x18
190#define DMA_CH_RDLR_LO 0x1c
191#define DMA_CH_TDTR_LO 0x24
192#define DMA_CH_RDTR_LO 0x2c
193#define DMA_CH_TDRLR 0x30
194#define DMA_CH_RDRLR 0x34
195#define DMA_CH_IER 0x38
196#define DMA_CH_RIWT 0x3c
197#define DMA_CH_CATDR_LO 0x44
198#define DMA_CH_CARDR_LO 0x4c
199#define DMA_CH_CATBR_HI 0x50
200#define DMA_CH_CATBR_LO 0x54
201#define DMA_CH_CARBR_HI 0x58
202#define DMA_CH_CARBR_LO 0x5c
203#define DMA_CH_SR 0x60
204
205/* DMA channel register entry bit positions and sizes */
206#define DMA_CH_CR_PBLX8_INDEX 16
207#define DMA_CH_CR_PBLX8_WIDTH 1
208#define DMA_CH_IER_AIE_INDEX 15
209#define DMA_CH_IER_AIE_WIDTH 1
210#define DMA_CH_IER_FBEE_INDEX 12
211#define DMA_CH_IER_FBEE_WIDTH 1
212#define DMA_CH_IER_NIE_INDEX 16
213#define DMA_CH_IER_NIE_WIDTH 1
214#define DMA_CH_IER_RBUE_INDEX 7
215#define DMA_CH_IER_RBUE_WIDTH 1
216#define DMA_CH_IER_RIE_INDEX 6
217#define DMA_CH_IER_RIE_WIDTH 1
218#define DMA_CH_IER_RSE_INDEX 8
219#define DMA_CH_IER_RSE_WIDTH 1
220#define DMA_CH_IER_TBUE_INDEX 2
221#define DMA_CH_IER_TBUE_WIDTH 1
222#define DMA_CH_IER_TIE_INDEX 0
223#define DMA_CH_IER_TIE_WIDTH 1
224#define DMA_CH_IER_TXSE_INDEX 1
225#define DMA_CH_IER_TXSE_WIDTH 1
226#define DMA_CH_RCR_PBL_INDEX 16
227#define DMA_CH_RCR_PBL_WIDTH 6
228#define DMA_CH_RCR_RBSZ_INDEX 1
229#define DMA_CH_RCR_RBSZ_WIDTH 14
230#define DMA_CH_RCR_SR_INDEX 0
231#define DMA_CH_RCR_SR_WIDTH 1
232#define DMA_CH_RIWT_RWT_INDEX 0
233#define DMA_CH_RIWT_RWT_WIDTH 8
234#define DMA_CH_SR_FBE_INDEX 12
235#define DMA_CH_SR_FBE_WIDTH 1
236#define DMA_CH_SR_RBU_INDEX 7
237#define DMA_CH_SR_RBU_WIDTH 1
238#define DMA_CH_SR_RI_INDEX 6
239#define DMA_CH_SR_RI_WIDTH 1
240#define DMA_CH_SR_RPS_INDEX 8
241#define DMA_CH_SR_RPS_WIDTH 1
242#define DMA_CH_SR_TBU_INDEX 2
243#define DMA_CH_SR_TBU_WIDTH 1
244#define DMA_CH_SR_TI_INDEX 0
245#define DMA_CH_SR_TI_WIDTH 1
246#define DMA_CH_SR_TPS_INDEX 1
247#define DMA_CH_SR_TPS_WIDTH 1
248#define DMA_CH_TCR_OSP_INDEX 4
249#define DMA_CH_TCR_OSP_WIDTH 1
250#define DMA_CH_TCR_PBL_INDEX 16
251#define DMA_CH_TCR_PBL_WIDTH 6
252#define DMA_CH_TCR_ST_INDEX 0
253#define DMA_CH_TCR_ST_WIDTH 1
254#define DMA_CH_TCR_TSE_INDEX 12
255#define DMA_CH_TCR_TSE_WIDTH 1
256
257/* DMA channel register values */
258#define DMA_OSP_DISABLE 0x00
259#define DMA_OSP_ENABLE 0x01
260#define DMA_PBL_1 1
261#define DMA_PBL_2 2
262#define DMA_PBL_4 4
263#define DMA_PBL_8 8
264#define DMA_PBL_16 16
265#define DMA_PBL_32 32
266#define DMA_PBL_64 64 /* 8 x 8 */
267#define DMA_PBL_128 128 /* 8 x 16 */
268#define DMA_PBL_256 256 /* 8 x 32 */
269#define DMA_PBL_X8_DISABLE 0x00
270#define DMA_PBL_X8_ENABLE 0x01
271
272
273/* MAC register offsets */
274#define MAC_TCR 0x0000
275#define MAC_RCR 0x0004
276#define MAC_PFR 0x0008
277#define MAC_WTR 0x000c
278#define MAC_HTR0 0x0010
279#define MAC_HTR1 0x0014
280#define MAC_HTR2 0x0018
281#define MAC_HTR3 0x001c
282#define MAC_HTR4 0x0020
283#define MAC_HTR5 0x0024
284#define MAC_HTR6 0x0028
285#define MAC_HTR7 0x002c
286#define MAC_VLANTR 0x0050
287#define MAC_VLANHTR 0x0058
288#define MAC_VLANIR 0x0060
289#define MAC_IVLANIR 0x0064
290#define MAC_RETMR 0x006c
291#define MAC_Q0TFCR 0x0070
292#define MAC_RFCR 0x0090
293#define MAC_RQC0R 0x00a0
294#define MAC_RQC1R 0x00a4
295#define MAC_RQC2R 0x00a8
296#define MAC_RQC3R 0x00ac
297#define MAC_ISR 0x00b0
298#define MAC_IER 0x00b4
299#define MAC_RTSR 0x00b8
300#define MAC_PMTCSR 0x00c0
301#define MAC_RWKPFR 0x00c4
302#define MAC_LPICSR 0x00d0
303#define MAC_LPITCR 0x00d4
304#define MAC_VR 0x0110
305#define MAC_DR 0x0114
306#define MAC_HWF0R 0x011c
307#define MAC_HWF1R 0x0120
308#define MAC_HWF2R 0x0124
309#define MAC_GPIOCR 0x0278
310#define MAC_GPIOSR 0x027c
311#define MAC_MACA0HR 0x0300
312#define MAC_MACA0LR 0x0304
313#define MAC_MACA1HR 0x0308
314#define MAC_MACA1LR 0x030c
315
316#define MAC_QTFCR_INC 4
317#define MAC_MACA_INC 4
318
319/* MAC register entry bit positions and sizes */
320#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
321#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
322#define MAC_HWF0R_ARPOFFSEL_INDEX 9
323#define MAC_HWF0R_ARPOFFSEL_WIDTH 1
324#define MAC_HWF0R_EEESEL_INDEX 13
325#define MAC_HWF0R_EEESEL_WIDTH 1
326#define MAC_HWF0R_GMIISEL_INDEX 1
327#define MAC_HWF0R_GMIISEL_WIDTH 1
328#define MAC_HWF0R_MGKSEL_INDEX 7
329#define MAC_HWF0R_MGKSEL_WIDTH 1
330#define MAC_HWF0R_MMCSEL_INDEX 8
331#define MAC_HWF0R_MMCSEL_WIDTH 1
332#define MAC_HWF0R_RWKSEL_INDEX 6
333#define MAC_HWF0R_RWKSEL_WIDTH 1
334#define MAC_HWF0R_RXCOESEL_INDEX 16
335#define MAC_HWF0R_RXCOESEL_WIDTH 1
336#define MAC_HWF0R_SAVLANINS_INDEX 27
337#define MAC_HWF0R_SAVLANINS_WIDTH 1
338#define MAC_HWF0R_SMASEL_INDEX 5
339#define MAC_HWF0R_SMASEL_WIDTH 1
340#define MAC_HWF0R_TSSEL_INDEX 12
341#define MAC_HWF0R_TSSEL_WIDTH 1
342#define MAC_HWF0R_TSSTSSEL_INDEX 25
343#define MAC_HWF0R_TSSTSSEL_WIDTH 2
344#define MAC_HWF0R_TXCOESEL_INDEX 14
345#define MAC_HWF0R_TXCOESEL_WIDTH 1
346#define MAC_HWF0R_VLHASH_INDEX 4
347#define MAC_HWF0R_VLHASH_WIDTH 1
348#define MAC_HWF1R_ADVTHWORD_INDEX 13
349#define MAC_HWF1R_ADVTHWORD_WIDTH 1
350#define MAC_HWF1R_DBGMEMA_INDEX 19
351#define MAC_HWF1R_DBGMEMA_WIDTH 1
352#define MAC_HWF1R_DCBEN_INDEX 16
353#define MAC_HWF1R_DCBEN_WIDTH 1
354#define MAC_HWF1R_HASHTBLSZ_INDEX 24
355#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
356#define MAC_HWF1R_L3L4FNUM_INDEX 27
357#define MAC_HWF1R_L3L4FNUM_WIDTH 4
358#define MAC_HWF1R_RSSEN_INDEX 20
359#define MAC_HWF1R_RSSEN_WIDTH 1
360#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
361#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5
362#define MAC_HWF1R_SPHEN_INDEX 17
363#define MAC_HWF1R_SPHEN_WIDTH 1
364#define MAC_HWF1R_TSOEN_INDEX 18
365#define MAC_HWF1R_TSOEN_WIDTH 1
366#define MAC_HWF1R_TXFIFOSIZE_INDEX 6
367#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5
368#define MAC_HWF2R_AUXSNAPNUM_INDEX 28
369#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3
370#define MAC_HWF2R_PPSOUTNUM_INDEX 24
371#define MAC_HWF2R_PPSOUTNUM_WIDTH 3
372#define MAC_HWF2R_RXCHCNT_INDEX 12
373#define MAC_HWF2R_RXCHCNT_WIDTH 4
374#define MAC_HWF2R_RXQCNT_INDEX 0
375#define MAC_HWF2R_RXQCNT_WIDTH 4
376#define MAC_HWF2R_TXCHCNT_INDEX 18
377#define MAC_HWF2R_TXCHCNT_WIDTH 4
378#define MAC_HWF2R_TXQCNT_INDEX 6
379#define MAC_HWF2R_TXQCNT_WIDTH 4
380#define MAC_ISR_MMCRXIS_INDEX 9
381#define MAC_ISR_MMCRXIS_WIDTH 1
382#define MAC_ISR_MMCTXIS_INDEX 10
383#define MAC_ISR_MMCTXIS_WIDTH 1
384#define MAC_ISR_PMTIS_INDEX 4
385#define MAC_ISR_PMTIS_WIDTH 1
386#define MAC_MACA1HR_AE_INDEX 31
387#define MAC_MACA1HR_AE_WIDTH 1
388#define MAC_PFR_HMC_INDEX 2
389#define MAC_PFR_HMC_WIDTH 1
390#define MAC_PFR_HUC_INDEX 1
391#define MAC_PFR_HUC_WIDTH 1
392#define MAC_PFR_PM_INDEX 4
393#define MAC_PFR_PM_WIDTH 1
394#define MAC_PFR_PR_INDEX 0
395#define MAC_PFR_PR_WIDTH 1
396#define MAC_PMTCSR_MGKPKTEN_INDEX 1
397#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
398#define MAC_PMTCSR_PWRDWN_INDEX 0
399#define MAC_PMTCSR_PWRDWN_WIDTH 1
400#define MAC_PMTCSR_RWKFILTRST_INDEX 31
401#define MAC_PMTCSR_RWKFILTRST_WIDTH 1
402#define MAC_PMTCSR_RWKPKTEN_INDEX 2
403#define MAC_PMTCSR_RWKPKTEN_WIDTH 1
404#define MAC_Q0TFCR_PT_INDEX 16
405#define MAC_Q0TFCR_PT_WIDTH 16
406#define MAC_Q0TFCR_TFE_INDEX 1
407#define MAC_Q0TFCR_TFE_WIDTH 1
408#define MAC_RCR_ACS_INDEX 1
409#define MAC_RCR_ACS_WIDTH 1
410#define MAC_RCR_CST_INDEX 2
411#define MAC_RCR_CST_WIDTH 1
412#define MAC_RCR_DCRCC_INDEX 3
413#define MAC_RCR_DCRCC_WIDTH 1
414#define MAC_RCR_IPC_INDEX 9
415#define MAC_RCR_IPC_WIDTH 1
416#define MAC_RCR_JE_INDEX 8
417#define MAC_RCR_JE_WIDTH 1
418#define MAC_RCR_LM_INDEX 10
419#define MAC_RCR_LM_WIDTH 1
420#define MAC_RCR_RE_INDEX 0
421#define MAC_RCR_RE_WIDTH 1
422#define MAC_RFCR_RFE_INDEX 0
423#define MAC_RFCR_RFE_WIDTH 1
424#define MAC_RQC0R_RXQ0EN_INDEX 0
425#define MAC_RQC0R_RXQ0EN_WIDTH 2
426#define MAC_TCR_SS_INDEX 29
427#define MAC_TCR_SS_WIDTH 2
428#define MAC_TCR_TE_INDEX 0
429#define MAC_TCR_TE_WIDTH 1
430#define MAC_VLANTR_DOVLTC_INDEX 20
431#define MAC_VLANTR_DOVLTC_WIDTH 1
432#define MAC_VLANTR_ERSVLM_INDEX 19
433#define MAC_VLANTR_ERSVLM_WIDTH 1
434#define MAC_VLANTR_ESVL_INDEX 18
435#define MAC_VLANTR_ESVL_WIDTH 1
436#define MAC_VLANTR_EVLS_INDEX 21
437#define MAC_VLANTR_EVLS_WIDTH 2
438#define MAC_VLANTR_EVLRXS_INDEX 24
439#define MAC_VLANTR_EVLRXS_WIDTH 1
440#define MAC_VR_DEVID_INDEX 8
441#define MAC_VR_DEVID_WIDTH 8
442#define MAC_VR_SNPSVER_INDEX 0
443#define MAC_VR_SNPSVER_WIDTH 8
444#define MAC_VR_USERVER_INDEX 16
445#define MAC_VR_USERVER_WIDTH 8
446
447/* MMC register offsets */
448#define MMC_CR 0x0800
449#define MMC_RISR 0x0804
450#define MMC_TISR 0x0808
451#define MMC_RIER 0x080c
452#define MMC_TIER 0x0810
453#define MMC_TXOCTETCOUNT_GB_LO 0x0814
454#define MMC_TXOCTETCOUNT_GB_HI 0x0818
455#define MMC_TXFRAMECOUNT_GB_LO 0x081c
456#define MMC_TXFRAMECOUNT_GB_HI 0x0820
457#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
458#define MMC_TXBROADCASTFRAMES_G_HI 0x0828
459#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
460#define MMC_TXMULTICASTFRAMES_G_HI 0x0830
461#define MMC_TX64OCTETS_GB_LO 0x0834
462#define MMC_TX64OCTETS_GB_HI 0x0838
463#define MMC_TX65TO127OCTETS_GB_LO 0x083c
464#define MMC_TX65TO127OCTETS_GB_HI 0x0840
465#define MMC_TX128TO255OCTETS_GB_LO 0x0844
466#define MMC_TX128TO255OCTETS_GB_HI 0x0848
467#define MMC_TX256TO511OCTETS_GB_LO 0x084c
468#define MMC_TX256TO511OCTETS_GB_HI 0x0850
469#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
470#define MMC_TX512TO1023OCTETS_GB_HI 0x0858
471#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
472#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860
473#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
474#define MMC_TXUNICASTFRAMES_GB_HI 0x0868
475#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
476#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870
477#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
478#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878
479#define MMC_TXUNDERFLOWERROR_LO 0x087c
480#define MMC_TXUNDERFLOWERROR_HI 0x0880
481#define MMC_TXOCTETCOUNT_G_LO 0x0884
482#define MMC_TXOCTETCOUNT_G_HI 0x0888
483#define MMC_TXFRAMECOUNT_G_LO 0x088c
484#define MMC_TXFRAMECOUNT_G_HI 0x0890
485#define MMC_TXPAUSEFRAMES_LO 0x0894
486#define MMC_TXPAUSEFRAMES_HI 0x0898
487#define MMC_TXVLANFRAMES_G_LO 0x089c
488#define MMC_TXVLANFRAMES_G_HI 0x08a0
489#define MMC_RXFRAMECOUNT_GB_LO 0x0900
490#define MMC_RXFRAMECOUNT_GB_HI 0x0904
491#define MMC_RXOCTETCOUNT_GB_LO 0x0908
492#define MMC_RXOCTETCOUNT_GB_HI 0x090c
493#define MMC_RXOCTETCOUNT_G_LO 0x0910
494#define MMC_RXOCTETCOUNT_G_HI 0x0914
495#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
496#define MMC_RXBROADCASTFRAMES_G_HI 0x091c
497#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
498#define MMC_RXMULTICASTFRAMES_G_HI 0x0924
499#define MMC_RXCRCERROR_LO 0x0928
500#define MMC_RXCRCERROR_HI 0x092c
501#define MMC_RXRUNTERROR 0x0930
502#define MMC_RXJABBERERROR 0x0934
503#define MMC_RXUNDERSIZE_G 0x0938
504#define MMC_RXOVERSIZE_G 0x093c
505#define MMC_RX64OCTETS_GB_LO 0x0940
506#define MMC_RX64OCTETS_GB_HI 0x0944
507#define MMC_RX65TO127OCTETS_GB_LO 0x0948
508#define MMC_RX65TO127OCTETS_GB_HI 0x094c
509#define MMC_RX128TO255OCTETS_GB_LO 0x0950
510#define MMC_RX128TO255OCTETS_GB_HI 0x0954
511#define MMC_RX256TO511OCTETS_GB_LO 0x0958
512#define MMC_RX256TO511OCTETS_GB_HI 0x095c
513#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
514#define MMC_RX512TO1023OCTETS_GB_HI 0x0964
515#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
516#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c
517#define MMC_RXUNICASTFRAMES_G_LO 0x0970
518#define MMC_RXUNICASTFRAMES_G_HI 0x0974
519#define MMC_RXLENGTHERROR_LO 0x0978
520#define MMC_RXLENGTHERROR_HI 0x097c
521#define MMC_RXOUTOFRANGETYPE_LO 0x0980
522#define MMC_RXOUTOFRANGETYPE_HI 0x0984
523#define MMC_RXPAUSEFRAMES_LO 0x0988
524#define MMC_RXPAUSEFRAMES_HI 0x098c
525#define MMC_RXFIFOOVERFLOW_LO 0x0990
526#define MMC_RXFIFOOVERFLOW_HI 0x0994
527#define MMC_RXVLANFRAMES_GB_LO 0x0998
528#define MMC_RXVLANFRAMES_GB_HI 0x099c
529#define MMC_RXWATCHDOGERROR 0x09a0
530
531/* MMC register entry bit positions and sizes */
532#define MMC_CR_CR_INDEX 0
533#define MMC_CR_CR_WIDTH 1
534#define MMC_CR_CSR_INDEX 1
535#define MMC_CR_CSR_WIDTH 1
536#define MMC_CR_ROR_INDEX 2
537#define MMC_CR_ROR_WIDTH 1
538#define MMC_CR_MCF_INDEX 3
539#define MMC_CR_MCF_WIDTH 1
540#define MMC_CR_MCT_INDEX 4
541#define MMC_CR_MCT_WIDTH 2
542#define MMC_RIER_ALL_INTERRUPTS_INDEX 0
543#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23
544#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0
545#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1
546#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1
547#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1
548#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2
549#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1
550#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3
551#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1
552#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4
553#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1
554#define MMC_RISR_RXCRCERROR_INDEX 5
555#define MMC_RISR_RXCRCERROR_WIDTH 1
556#define MMC_RISR_RXRUNTERROR_INDEX 6
557#define MMC_RISR_RXRUNTERROR_WIDTH 1
558#define MMC_RISR_RXJABBERERROR_INDEX 7
559#define MMC_RISR_RXJABBERERROR_WIDTH 1
560#define MMC_RISR_RXUNDERSIZE_G_INDEX 8
561#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1
562#define MMC_RISR_RXOVERSIZE_G_INDEX 9
563#define MMC_RISR_RXOVERSIZE_G_WIDTH 1
564#define MMC_RISR_RX64OCTETS_GB_INDEX 10
565#define MMC_RISR_RX64OCTETS_GB_WIDTH 1
566#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11
567#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1
568#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12
569#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1
570#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13
571#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1
572#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14
573#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1
574#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15
575#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1
576#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16
577#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1
578#define MMC_RISR_RXLENGTHERROR_INDEX 17
579#define MMC_RISR_RXLENGTHERROR_WIDTH 1
580#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18
581#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1
582#define MMC_RISR_RXPAUSEFRAMES_INDEX 19
583#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1
584#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20
585#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1
586#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21
587#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
588#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
589#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
590#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
591#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
592#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
593#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1
594#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1
595#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1
596#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2
597#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1
598#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3
599#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1
600#define MMC_TISR_TX64OCTETS_GB_INDEX 4
601#define MMC_TISR_TX64OCTETS_GB_WIDTH 1
602#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5
603#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1
604#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6
605#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1
606#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7
607#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1
608#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8
609#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1
610#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9
611#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1
612#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10
613#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1
614#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11
615#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1
616#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12
617#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1
618#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13
619#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1
620#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14
621#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1
622#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15
623#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1
624#define MMC_TISR_TXPAUSEFRAMES_INDEX 16
625#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1
626#define MMC_TISR_TXVLANFRAMES_G_INDEX 17
627#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1
628
629/* MTL register offsets */
630#define MTL_OMR 0x1000
631#define MTL_FDCR 0x1008
632#define MTL_FDSR 0x100c
633#define MTL_FDDR 0x1010
634#define MTL_ISR 0x1020
635#define MTL_RQDCM0R 0x1030
636#define MTL_TCPM0R 0x1040
637#define MTL_TCPM1R 0x1044
638
639#define MTL_RQDCM_INC 4
640#define MTL_RQDCM_Q_PER_REG 4
641
642/* MTL register entry bit positions and sizes */
643#define MTL_OMR_ETSALG_INDEX 5
644#define MTL_OMR_ETSALG_WIDTH 2
645#define MTL_OMR_RAA_INDEX 2
646#define MTL_OMR_RAA_WIDTH 1
647
648/* MTL queue register offsets
649 * Multiple queues can be active. The first queue has registers
650 * that begin at 0x1100. Each subsequent queue has registers that
651 * are accessed using an offset of 0x80 from the previous queue.
652 */
653#define MTL_Q_BASE 0x1100
654#define MTL_Q_INC 0x80
655
656#define MTL_Q_TQOMR 0x00
657#define MTL_Q_TQUR 0x04
658#define MTL_Q_TQDR 0x08
659#define MTL_Q_TCECR 0x10
660#define MTL_Q_TCESR 0x14
661#define MTL_Q_TCQWR 0x18
662#define MTL_Q_RQOMR 0x40
663#define MTL_Q_RQMPOCR 0x44
664#define MTL_Q_RQDR 0x4c
665#define MTL_Q_IER 0x70
666#define MTL_Q_ISR 0x74
667
668/* MTL queue register entry bit positions and sizes */
669#define MTL_Q_TCQWR_QW_INDEX 0
670#define MTL_Q_TCQWR_QW_WIDTH 21
671#define MTL_Q_RQOMR_EHFC_INDEX 7
672#define MTL_Q_RQOMR_EHFC_WIDTH 1
673#define MTL_Q_RQOMR_RFA_INDEX 8
674#define MTL_Q_RQOMR_RFA_WIDTH 3
675#define MTL_Q_RQOMR_RFD_INDEX 13
676#define MTL_Q_RQOMR_RFD_WIDTH 3
677#define MTL_Q_RQOMR_RQS_INDEX 16
678#define MTL_Q_RQOMR_RQS_WIDTH 9
679#define MTL_Q_RQOMR_RSF_INDEX 5
680#define MTL_Q_RQOMR_RSF_WIDTH 1
681#define MTL_Q_RQOMR_RTC_INDEX 0
682#define MTL_Q_RQOMR_RTC_WIDTH 2
683#define MTL_Q_TQOMR_FTQ_INDEX 0
684#define MTL_Q_TQOMR_FTQ_WIDTH 1
685#define MTL_Q_TQOMR_TQS_INDEX 16
686#define MTL_Q_TQOMR_TQS_WIDTH 10
687#define MTL_Q_TQOMR_TSF_INDEX 1
688#define MTL_Q_TQOMR_TSF_WIDTH 1
689#define MTL_Q_TQOMR_TTC_INDEX 4
690#define MTL_Q_TQOMR_TTC_WIDTH 3
691#define MTL_Q_TQOMR_TXQEN_INDEX 2
692#define MTL_Q_TQOMR_TXQEN_WIDTH 2
693
694/* MTL queue register value */
695#define MTL_RSF_DISABLE 0x00
696#define MTL_RSF_ENABLE 0x01
697#define MTL_TSF_DISABLE 0x00
698#define MTL_TSF_ENABLE 0x01
699
700#define MTL_RX_THRESHOLD_64 0x00
701#define MTL_RX_THRESHOLD_96 0x02
702#define MTL_RX_THRESHOLD_128 0x03
703#define MTL_TX_THRESHOLD_32 0x01
704#define MTL_TX_THRESHOLD_64 0x00
705#define MTL_TX_THRESHOLD_96 0x02
706#define MTL_TX_THRESHOLD_128 0x03
707#define MTL_TX_THRESHOLD_192 0x04
708#define MTL_TX_THRESHOLD_256 0x05
709#define MTL_TX_THRESHOLD_384 0x06
710#define MTL_TX_THRESHOLD_512 0x07
711
712#define MTL_ETSALG_WRR 0x00
713#define MTL_ETSALG_WFQ 0x01
714#define MTL_ETSALG_DWRR 0x02
715#define MTL_RAA_SP 0x00
716#define MTL_RAA_WSP 0x01
717
718#define MTL_Q_DISABLED 0x00
719#define MTL_Q_ENABLED 0x02
720
721
722/* MTL traffic class register offsets
723 * Multiple traffic classes can be active. The first class has registers
724 * that begin at 0x1100. Each subsequent queue has registers that
725 * are accessed using an offset of 0x80 from the previous queue.
726 */
727#define MTL_TC_BASE MTL_Q_BASE
728#define MTL_TC_INC MTL_Q_INC
729
730#define MTL_TC_ETSCR 0x10
731
732/* MTL traffic class register entry bit positions and sizes */
733#define MTL_TC_ETSCR_TSA_INDEX 0
734#define MTL_TC_ETSCR_TSA_WIDTH 2
735
736/* MTL traffic class register value */
737#define MTL_TSA_SP 0x00
738#define MTL_TSA_ETS 0x02
739
740
741/* PCS MMD select register offset
742 * The MMD select register is used for accessing PCS registers
743 * when the underlying APB3 interface is using indirect addressing.
744 * Indirect addressing requires accessing registers in two phases,
745 * an address phase and a data phase. The address phases requires
746 * writing an address selection value to the MMD select regiesters.
747 */
748#define PCS_MMD_SELECT 0xff
749
750
751/* Descriptor/Packet entry bit positions and sizes */
752#define RX_PACKET_ERRORS_CRC_INDEX 2
753#define RX_PACKET_ERRORS_CRC_WIDTH 1
754#define RX_PACKET_ERRORS_FRAME_INDEX 3
755#define RX_PACKET_ERRORS_FRAME_WIDTH 1
756#define RX_PACKET_ERRORS_LENGTH_INDEX 0
757#define RX_PACKET_ERRORS_LENGTH_WIDTH 1
758#define RX_PACKET_ERRORS_OVERRUN_INDEX 1
759#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1
760
761#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0
762#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
763#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
764#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
765#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
766#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
767
768#define RX_NORMAL_DESC0_OVT_INDEX 0
769#define RX_NORMAL_DESC0_OVT_WIDTH 16
770#define RX_NORMAL_DESC3_ES_INDEX 15
771#define RX_NORMAL_DESC3_ES_WIDTH 1
772#define RX_NORMAL_DESC3_ETLT_INDEX 16
773#define RX_NORMAL_DESC3_ETLT_WIDTH 4
774#define RX_NORMAL_DESC3_INTE_INDEX 30
775#define RX_NORMAL_DESC3_INTE_WIDTH 1
776#define RX_NORMAL_DESC3_LD_INDEX 28
777#define RX_NORMAL_DESC3_LD_WIDTH 1
778#define RX_NORMAL_DESC3_OWN_INDEX 31
779#define RX_NORMAL_DESC3_OWN_WIDTH 1
780#define RX_NORMAL_DESC3_PL_INDEX 0
781#define RX_NORMAL_DESC3_PL_WIDTH 14
782
783#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
784#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
785#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
786#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
787#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
788#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
789
790#define TX_CONTEXT_DESC2_MSS_INDEX 0
791#define TX_CONTEXT_DESC2_MSS_WIDTH 15
792#define TX_CONTEXT_DESC3_CTXT_INDEX 30
793#define TX_CONTEXT_DESC3_CTXT_WIDTH 1
794#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26
795#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1
796#define TX_CONTEXT_DESC3_VLTV_INDEX 16
797#define TX_CONTEXT_DESC3_VLTV_WIDTH 1
798#define TX_CONTEXT_DESC3_VT_INDEX 0
799#define TX_CONTEXT_DESC3_VT_WIDTH 16
800
801#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
802#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
803#define TX_NORMAL_DESC2_IC_INDEX 31
804#define TX_NORMAL_DESC2_IC_WIDTH 1
805#define TX_NORMAL_DESC2_VTIR_INDEX 14
806#define TX_NORMAL_DESC2_VTIR_WIDTH 2
807#define TX_NORMAL_DESC3_CIC_INDEX 16
808#define TX_NORMAL_DESC3_CIC_WIDTH 2
809#define TX_NORMAL_DESC3_CPC_INDEX 26
810#define TX_NORMAL_DESC3_CPC_WIDTH 2
811#define TX_NORMAL_DESC3_CTXT_INDEX 30
812#define TX_NORMAL_DESC3_CTXT_WIDTH 1
813#define TX_NORMAL_DESC3_FD_INDEX 29
814#define TX_NORMAL_DESC3_FD_WIDTH 1
815#define TX_NORMAL_DESC3_FL_INDEX 0
816#define TX_NORMAL_DESC3_FL_WIDTH 15
817#define TX_NORMAL_DESC3_LD_INDEX 28
818#define TX_NORMAL_DESC3_LD_WIDTH 1
819#define TX_NORMAL_DESC3_OWN_INDEX 31
820#define TX_NORMAL_DESC3_OWN_WIDTH 1
821#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19
822#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4
823#define TX_NORMAL_DESC3_TCPPL_INDEX 0
824#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
825#define TX_NORMAL_DESC3_TSE_INDEX 18
826#define TX_NORMAL_DESC3_TSE_WIDTH 1
827
828#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
829
830/* MDIO undefined or vendor specific registers */
831#ifndef MDIO_AN_COMP_STAT
832#define MDIO_AN_COMP_STAT 0x0030
833#endif
834
835
836/* Bit setting and getting macros
837 * The get macro will extract the current bit field value from within
838 * the variable
839 *
840 * The set macro will clear the current bit field value within the
841 * variable and then set the bit field of the variable to the
842 * specified value
843 */
844#define GET_BITS(_var, _index, _width) \
845 (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
846
847#define SET_BITS(_var, _index, _width, _val) \
848do { \
849 (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
850 (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
851} while (0)
852
853#define GET_BITS_LE(_var, _index, _width) \
854 ((le32_to_cpu((_var)) >> (_index)) & ((0x1 << (_width)) - 1))
855
856#define SET_BITS_LE(_var, _index, _width, _val) \
857do { \
858 (_var) &= cpu_to_le32(~(((0x1 << (_width)) - 1) << (_index))); \
859 (_var) |= cpu_to_le32((((_val) & \
860 ((0x1 << (_width)) - 1)) << (_index))); \
861} while (0)
862
863
864/* Bit setting and getting macros based on register fields
865 * The get macro uses the bit field definitions formed using the input
866 * names to extract the current bit field value from within the
867 * variable
868 *
869 * The set macro uses the bit field definitions formed using the input
870 * names to set the bit field of the variable to the specified value
871 */
872#define XGMAC_GET_BITS(_var, _prefix, _field) \
873 GET_BITS((_var), \
874 _prefix##_##_field##_INDEX, \
875 _prefix##_##_field##_WIDTH)
876
877#define XGMAC_SET_BITS(_var, _prefix, _field, _val) \
878 SET_BITS((_var), \
879 _prefix##_##_field##_INDEX, \
880 _prefix##_##_field##_WIDTH, (_val))
881
882#define XGMAC_GET_BITS_LE(_var, _prefix, _field) \
883 GET_BITS_LE((_var), \
884 _prefix##_##_field##_INDEX, \
885 _prefix##_##_field##_WIDTH)
886
887#define XGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \
888 SET_BITS_LE((_var), \
889 _prefix##_##_field##_INDEX, \
890 _prefix##_##_field##_WIDTH, (_val))
891
892
893/* Macros for reading or writing registers
894 * The ioread macros will get bit fields or full values using the
895 * register definitions formed using the input names
896 *
897 * The iowrite macros will set bit fields or full values using the
898 * register definitions formed using the input names
899 */
900#define XGMAC_IOREAD(_pdata, _reg) \
901 ioread32((_pdata)->xgmac_regs + _reg)
902
903#define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \
904 GET_BITS(XGMAC_IOREAD((_pdata), _reg), \
905 _reg##_##_field##_INDEX, \
906 _reg##_##_field##_WIDTH)
907
908#define XGMAC_IOWRITE(_pdata, _reg, _val) \
909 iowrite32((_val), (_pdata)->xgmac_regs + _reg)
910
911#define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
912do { \
913 u32 reg_val = XGMAC_IOREAD((_pdata), _reg); \
914 SET_BITS(reg_val, \
915 _reg##_##_field##_INDEX, \
916 _reg##_##_field##_WIDTH, (_val)); \
917 XGMAC_IOWRITE((_pdata), _reg, reg_val); \
918} while (0)
919
920
921/* Macros for reading or writing MTL queue or traffic class registers
922 * Similar to the standard read and write macros except that the
923 * base register value is calculated by the queue or traffic class number
924 */
925#define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \
926 ioread32((_pdata)->xgmac_regs + \
927 MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
928
929#define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
930 GET_BITS(XGMAC_MTL_IOREAD((_pdata), (_n), _reg), \
931 _reg##_##_field##_INDEX, \
932 _reg##_##_field##_WIDTH)
933
934#define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
935 iowrite32((_val), (_pdata)->xgmac_regs + \
936 MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
937
938#define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
939do { \
940 u32 reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
941 SET_BITS(reg_val, \
942 _reg##_##_field##_INDEX, \
943 _reg##_##_field##_WIDTH, (_val)); \
944 XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
945} while (0)
946
947
948/* Macros for reading or writing DMA channel registers
949 * Similar to the standard read and write macros except that the
950 * base register value is obtained from the ring
951 */
952#define XGMAC_DMA_IOREAD(_channel, _reg) \
953 ioread32((_channel)->dma_regs + _reg)
954
955#define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
956 GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \
957 _reg##_##_field##_INDEX, \
958 _reg##_##_field##_WIDTH)
959
960#define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \
961 iowrite32((_val), (_channel)->dma_regs + _reg)
962
963#define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
964do { \
965 u32 reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \
966 SET_BITS(reg_val, \
967 _reg##_##_field##_INDEX, \
968 _reg##_##_field##_WIDTH, (_val)); \
969 XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
970} while (0)
971
972
973/* Macros for building, reading or writing register values or bits
974 * within the register values of XPCS registers.
975 */
976#define XPCS_IOWRITE(_pdata, _off, _val) \
977 iowrite32(_val, (_pdata)->xpcs_regs + (_off))
978
979#define XPCS_IOREAD(_pdata, _off) \
980 ioread32((_pdata)->xpcs_regs + (_off))
981
982
983/* Macros for building, reading or writing register values or bits
984 * using MDIO. Different from above because of the use of standardized
985 * Linux include values. No shifting is performed with the bit
986 * operations, everything works on mask values.
987 */
988#define XMDIO_READ(_pdata, _mmd, _reg) \
989 ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
990 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
991
992#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
993 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
994
995#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
996 ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
997 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
998
999#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
1000do { \
1001 u32 mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \
1002 mmd_val &= ~_mask; \
1003 mmd_val |= (_val); \
1004 XMDIO_WRITE((_pdata), _mmd, _reg, mmd_val); \
1005} while (0)
1006
1007#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
new file mode 100644
index 000000000000..6bb76d5c817b
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -0,0 +1,375 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/debugfs.h>
118#include <linux/module.h>
119#include <linux/slab.h>
120
121#include "xgbe.h"
122#include "xgbe-common.h"
123
124
125static ssize_t xgbe_common_read(char __user *buffer, size_t count,
126 loff_t *ppos, unsigned int value)
127{
128 char *buf;
129 ssize_t len;
130
131 if (*ppos != 0)
132 return 0;
133
134 buf = kasprintf(GFP_KERNEL, "0x%08x\n", value);
135 if (!buf)
136 return -ENOMEM;
137
138 if (count < strlen(buf)) {
139 kfree(buf);
140 return -ENOSPC;
141 }
142
143 len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
144 kfree(buf);
145
146 return len;
147}
148
149static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
150 loff_t *ppos, unsigned int *value)
151{
152 char workarea[32];
153 ssize_t len;
154 unsigned int scan_value;
155
156 if (*ppos != 0)
157 return 0;
158
159 if (count >= sizeof(workarea))
160 return -ENOSPC;
161
162 len = simple_write_to_buffer(workarea, sizeof(workarea) - 1, ppos,
163 buffer, count);
164 if (len < 0)
165 return len;
166
167 workarea[len] = '\0';
168 if (sscanf(workarea, "%x", &scan_value) == 1)
169 *value = scan_value;
170 else
171 return -EIO;
172
173 return len;
174}
175
176static ssize_t xgmac_reg_addr_read(struct file *filp, char __user *buffer,
177 size_t count, loff_t *ppos)
178{
179 struct xgbe_prv_data *pdata = filp->private_data;
180
181 return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xgmac_reg);
182}
183
184static ssize_t xgmac_reg_addr_write(struct file *filp,
185 const char __user *buffer,
186 size_t count, loff_t *ppos)
187{
188 struct xgbe_prv_data *pdata = filp->private_data;
189
190 return xgbe_common_write(buffer, count, ppos,
191 &pdata->debugfs_xgmac_reg);
192}
193
194static ssize_t xgmac_reg_value_read(struct file *filp, char __user *buffer,
195 size_t count, loff_t *ppos)
196{
197 struct xgbe_prv_data *pdata = filp->private_data;
198 unsigned int value;
199
200 value = XGMAC_IOREAD(pdata, pdata->debugfs_xgmac_reg);
201
202 return xgbe_common_read(buffer, count, ppos, value);
203}
204
205static ssize_t xgmac_reg_value_write(struct file *filp,
206 const char __user *buffer,
207 size_t count, loff_t *ppos)
208{
209 struct xgbe_prv_data *pdata = filp->private_data;
210 unsigned int value;
211 ssize_t len;
212
213 len = xgbe_common_write(buffer, count, ppos, &value);
214 if (len < 0)
215 return len;
216
217 XGMAC_IOWRITE(pdata, pdata->debugfs_xgmac_reg, value);
218
219 return len;
220}
221
222static const struct file_operations xgmac_reg_addr_fops = {
223 .owner = THIS_MODULE,
224 .open = simple_open,
225 .read = xgmac_reg_addr_read,
226 .write = xgmac_reg_addr_write,
227};
228
229static const struct file_operations xgmac_reg_value_fops = {
230 .owner = THIS_MODULE,
231 .open = simple_open,
232 .read = xgmac_reg_value_read,
233 .write = xgmac_reg_value_write,
234};
235
236static ssize_t xpcs_mmd_read(struct file *filp, char __user *buffer,
237 size_t count, loff_t *ppos)
238{
239 struct xgbe_prv_data *pdata = filp->private_data;
240
241 return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_mmd);
242}
243
244static ssize_t xpcs_mmd_write(struct file *filp, const char __user *buffer,
245 size_t count, loff_t *ppos)
246{
247 struct xgbe_prv_data *pdata = filp->private_data;
248
249 return xgbe_common_write(buffer, count, ppos,
250 &pdata->debugfs_xpcs_mmd);
251}
252
253static ssize_t xpcs_reg_addr_read(struct file *filp, char __user *buffer,
254 size_t count, loff_t *ppos)
255{
256 struct xgbe_prv_data *pdata = filp->private_data;
257
258 return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_reg);
259}
260
261static ssize_t xpcs_reg_addr_write(struct file *filp, const char __user *buffer,
262 size_t count, loff_t *ppos)
263{
264 struct xgbe_prv_data *pdata = filp->private_data;
265
266 return xgbe_common_write(buffer, count, ppos,
267 &pdata->debugfs_xpcs_reg);
268}
269
270static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
271 size_t count, loff_t *ppos)
272{
273 struct xgbe_prv_data *pdata = filp->private_data;
274 unsigned int value;
275
276 value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
277 pdata->debugfs_xpcs_reg);
278
279 return xgbe_common_read(buffer, count, ppos, value);
280}
281
282static ssize_t xpcs_reg_value_write(struct file *filp,
283 const char __user *buffer,
284 size_t count, loff_t *ppos)
285{
286 struct xgbe_prv_data *pdata = filp->private_data;
287 unsigned int value;
288 ssize_t len;
289
290 len = xgbe_common_write(buffer, count, ppos, &value);
291 if (len < 0)
292 return len;
293
294 pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
295 pdata->debugfs_xpcs_reg, value);
296
297 return len;
298}
299
300static const struct file_operations xpcs_mmd_fops = {
301 .owner = THIS_MODULE,
302 .open = simple_open,
303 .read = xpcs_mmd_read,
304 .write = xpcs_mmd_write,
305};
306
307static const struct file_operations xpcs_reg_addr_fops = {
308 .owner = THIS_MODULE,
309 .open = simple_open,
310 .read = xpcs_reg_addr_read,
311 .write = xpcs_reg_addr_write,
312};
313
314static const struct file_operations xpcs_reg_value_fops = {
315 .owner = THIS_MODULE,
316 .open = simple_open,
317 .read = xpcs_reg_value_read,
318 .write = xpcs_reg_value_write,
319};
320
321void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
322{
323 struct dentry *pfile;
324 char *buf;
325
326 /* Set defaults */
327 pdata->debugfs_xgmac_reg = 0;
328 pdata->debugfs_xpcs_mmd = 1;
329 pdata->debugfs_xpcs_reg = 0;
330
331 buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
332 pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
333 if (pdata->xgbe_debugfs == NULL) {
334 netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
335 return;
336 }
337
338 pfile = debugfs_create_file("xgmac_register", 0600,
339 pdata->xgbe_debugfs, pdata,
340 &xgmac_reg_addr_fops);
341 if (!pfile)
342 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
343
344 pfile = debugfs_create_file("xgmac_register_value", 0600,
345 pdata->xgbe_debugfs, pdata,
346 &xgmac_reg_value_fops);
347 if (!pfile)
348 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
349
350 pfile = debugfs_create_file("xpcs_mmd", 0600,
351 pdata->xgbe_debugfs, pdata,
352 &xpcs_mmd_fops);
353 if (!pfile)
354 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
355
356 pfile = debugfs_create_file("xpcs_register", 0600,
357 pdata->xgbe_debugfs, pdata,
358 &xpcs_reg_addr_fops);
359 if (!pfile)
360 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
361
362 pfile = debugfs_create_file("xpcs_register_value", 0600,
363 pdata->xgbe_debugfs, pdata,
364 &xpcs_reg_value_fops);
365 if (!pfile)
366 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
367
368 kfree(buf);
369}
370
371void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
372{
373 debugfs_remove_recursive(pdata->xgbe_debugfs);
374 pdata->xgbe_debugfs = NULL;
375}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
new file mode 100644
index 000000000000..6f1c85956d50
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -0,0 +1,556 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include "xgbe.h"
118#include "xgbe-common.h"
119
120
121static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
122
123static void xgbe_free_ring(struct xgbe_prv_data *pdata,
124 struct xgbe_ring *ring)
125{
126 struct xgbe_ring_data *rdata;
127 unsigned int i;
128
129 if (!ring)
130 return;
131
132 if (ring->rdata) {
133 for (i = 0; i < ring->rdesc_count; i++) {
134 rdata = GET_DESC_DATA(ring, i);
135 xgbe_unmap_skb(pdata, rdata);
136 }
137
138 kfree(ring->rdata);
139 ring->rdata = NULL;
140 }
141
142 if (ring->rdesc) {
143 dma_free_coherent(pdata->dev,
144 (sizeof(struct xgbe_ring_desc) *
145 ring->rdesc_count),
146 ring->rdesc, ring->rdesc_dma);
147 ring->rdesc = NULL;
148 }
149}
150
151static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
152{
153 struct xgbe_channel *channel;
154 unsigned int i;
155
156 DBGPR("-->xgbe_free_ring_resources\n");
157
158 channel = pdata->channel;
159 for (i = 0; i < pdata->channel_count; i++, channel++) {
160 xgbe_free_ring(pdata, channel->tx_ring);
161 xgbe_free_ring(pdata, channel->rx_ring);
162 }
163
164 DBGPR("<--xgbe_free_ring_resources\n");
165}
166
167static int xgbe_init_ring(struct xgbe_prv_data *pdata,
168 struct xgbe_ring *ring, unsigned int rdesc_count)
169{
170 DBGPR("-->xgbe_init_ring\n");
171
172 if (!ring)
173 return 0;
174
175 /* Descriptors */
176 ring->rdesc_count = rdesc_count;
177 ring->rdesc = dma_alloc_coherent(pdata->dev,
178 (sizeof(struct xgbe_ring_desc) *
179 rdesc_count), &ring->rdesc_dma,
180 GFP_KERNEL);
181 if (!ring->rdesc)
182 return -ENOMEM;
183
184 /* Descriptor information */
185 ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
186 GFP_KERNEL);
187 if (!ring->rdata)
188 return -ENOMEM;
189
190 DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
191 ring->rdesc, ring->rdesc_dma, ring->rdata);
192
193 DBGPR("<--xgbe_init_ring\n");
194
195 return 0;
196}
197
198static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
199{
200 struct xgbe_channel *channel;
201 unsigned int i;
202 int ret;
203
204 DBGPR("-->xgbe_alloc_ring_resources\n");
205
206 channel = pdata->channel;
207 for (i = 0; i < pdata->channel_count; i++, channel++) {
208 DBGPR(" %s - tx_ring:\n", channel->name);
209 ret = xgbe_init_ring(pdata, channel->tx_ring,
210 pdata->tx_desc_count);
211 if (ret) {
212 netdev_alert(pdata->netdev,
213 "error initializing Tx ring\n");
214 goto err_ring;
215 }
216
217 DBGPR(" %s - rx_ring:\n", channel->name);
218 ret = xgbe_init_ring(pdata, channel->rx_ring,
219 pdata->rx_desc_count);
220 if (ret) {
221 netdev_alert(pdata->netdev,
222 "error initializing Tx ring\n");
223 goto err_ring;
224 }
225 }
226
227 DBGPR("<--xgbe_alloc_ring_resources\n");
228
229 return 0;
230
231err_ring:
232 xgbe_free_ring_resources(pdata);
233
234 return ret;
235}
236
237static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
238{
239 struct xgbe_hw_if *hw_if = &pdata->hw_if;
240 struct xgbe_channel *channel;
241 struct xgbe_ring *ring;
242 struct xgbe_ring_data *rdata;
243 struct xgbe_ring_desc *rdesc;
244 dma_addr_t rdesc_dma;
245 unsigned int i, j;
246
247 DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
248
249 channel = pdata->channel;
250 for (i = 0; i < pdata->channel_count; i++, channel++) {
251 ring = channel->tx_ring;
252 if (!ring)
253 break;
254
255 rdesc = ring->rdesc;
256 rdesc_dma = ring->rdesc_dma;
257
258 for (j = 0; j < ring->rdesc_count; j++) {
259 rdata = GET_DESC_DATA(ring, j);
260
261 rdata->rdesc = rdesc;
262 rdata->rdesc_dma = rdesc_dma;
263
264 rdesc++;
265 rdesc_dma += sizeof(struct xgbe_ring_desc);
266 }
267
268 ring->cur = 0;
269 ring->dirty = 0;
270 ring->tx.queue_stopped = 0;
271
272 hw_if->tx_desc_init(channel);
273 }
274
275 DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
276}
277
278static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
279{
280 struct xgbe_hw_if *hw_if = &pdata->hw_if;
281 struct xgbe_channel *channel;
282 struct xgbe_ring *ring;
283 struct xgbe_ring_desc *rdesc;
284 struct xgbe_ring_data *rdata;
285 dma_addr_t rdesc_dma, skb_dma;
286 struct sk_buff *skb = NULL;
287 unsigned int i, j;
288
289 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
290
291 channel = pdata->channel;
292 for (i = 0; i < pdata->channel_count; i++, channel++) {
293 ring = channel->rx_ring;
294 if (!ring)
295 break;
296
297 rdesc = ring->rdesc;
298 rdesc_dma = ring->rdesc_dma;
299
300 for (j = 0; j < ring->rdesc_count; j++) {
301 rdata = GET_DESC_DATA(ring, j);
302
303 rdata->rdesc = rdesc;
304 rdata->rdesc_dma = rdesc_dma;
305
306 /* Allocate skb & assign to each rdesc */
307 skb = dev_alloc_skb(pdata->rx_buf_size);
308 if (skb == NULL)
309 break;
310 skb_dma = dma_map_single(pdata->dev, skb->data,
311 pdata->rx_buf_size,
312 DMA_FROM_DEVICE);
313 if (dma_mapping_error(pdata->dev, skb_dma)) {
314 netdev_alert(pdata->netdev,
315 "failed to do the dma map\n");
316 dev_kfree_skb_any(skb);
317 break;
318 }
319 rdata->skb = skb;
320 rdata->skb_dma = skb_dma;
321 rdata->skb_dma_len = pdata->rx_buf_size;
322
323 rdesc++;
324 rdesc_dma += sizeof(struct xgbe_ring_desc);
325 }
326
327 ring->cur = 0;
328 ring->dirty = 0;
329 ring->rx.realloc_index = 0;
330 ring->rx.realloc_threshold = 0;
331
332 hw_if->rx_desc_init(channel);
333 }
334
335 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
336}
337
338static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
339 struct xgbe_ring_data *rdata)
340{
341 if (rdata->skb_dma) {
342 if (rdata->mapped_as_page) {
343 dma_unmap_page(pdata->dev, rdata->skb_dma,
344 rdata->skb_dma_len, DMA_TO_DEVICE);
345 } else {
346 dma_unmap_single(pdata->dev, rdata->skb_dma,
347 rdata->skb_dma_len, DMA_TO_DEVICE);
348 }
349 rdata->skb_dma = 0;
350 rdata->skb_dma_len = 0;
351 }
352
353 if (rdata->skb) {
354 dev_kfree_skb_any(rdata->skb);
355 rdata->skb = NULL;
356 }
357
358 rdata->tso_header = 0;
359 rdata->len = 0;
360 rdata->interrupt = 0;
361 rdata->mapped_as_page = 0;
362}
363
364static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
365{
366 struct xgbe_prv_data *pdata = channel->pdata;
367 struct xgbe_ring *ring = channel->tx_ring;
368 struct xgbe_ring_data *rdata;
369 struct xgbe_packet_data *packet;
370 struct skb_frag_struct *frag;
371 dma_addr_t skb_dma;
372 unsigned int start_index, cur_index;
373 unsigned int offset, tso, vlan, datalen, len;
374 unsigned int i;
375
376 DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
377
378 offset = 0;
379 start_index = ring->cur;
380 cur_index = ring->cur;
381
382 packet = &ring->packet_data;
383 packet->rdesc_count = 0;
384 packet->length = 0;
385
386 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
387 TSO_ENABLE);
388 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
389 VLAN_CTAG);
390
391 /* Save space for a context descriptor if needed */
392 if ((tso && (packet->mss != ring->tx.cur_mss)) ||
393 (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
394 cur_index++;
395 rdata = GET_DESC_DATA(ring, cur_index);
396
397 if (tso) {
398 DBGPR(" TSO packet\n");
399
400 /* Map the TSO header */
401 skb_dma = dma_map_single(pdata->dev, skb->data,
402 packet->header_len, DMA_TO_DEVICE);
403 if (dma_mapping_error(pdata->dev, skb_dma)) {
404 netdev_alert(pdata->netdev, "dma_map_single failed\n");
405 goto err_out;
406 }
407 rdata->skb_dma = skb_dma;
408 rdata->skb_dma_len = packet->header_len;
409 rdata->tso_header = 1;
410
411 offset = packet->header_len;
412
413 packet->length += packet->header_len;
414
415 cur_index++;
416 rdata = GET_DESC_DATA(ring, cur_index);
417 }
418
419 /* Map the (remainder of the) packet */
420 for (datalen = skb_headlen(skb) - offset; datalen; ) {
421 len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
422
423 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
424 DMA_TO_DEVICE);
425 if (dma_mapping_error(pdata->dev, skb_dma)) {
426 netdev_alert(pdata->netdev, "dma_map_single failed\n");
427 goto err_out;
428 }
429 rdata->skb_dma = skb_dma;
430 rdata->skb_dma_len = len;
431 DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
432 cur_index, skb_dma, len);
433
434 datalen -= len;
435 offset += len;
436
437 packet->length += len;
438
439 cur_index++;
440 rdata = GET_DESC_DATA(ring, cur_index);
441 }
442
443 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
444 DBGPR(" mapping frag %u\n", i);
445
446 frag = &skb_shinfo(skb)->frags[i];
447 offset = 0;
448
449 for (datalen = skb_frag_size(frag); datalen; ) {
450 len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
451
452 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
453 len, DMA_TO_DEVICE);
454 if (dma_mapping_error(pdata->dev, skb_dma)) {
455 netdev_alert(pdata->netdev,
456 "skb_frag_dma_map failed\n");
457 goto err_out;
458 }
459 rdata->skb_dma = skb_dma;
460 rdata->skb_dma_len = len;
461 rdata->mapped_as_page = 1;
462 DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
463 cur_index, skb_dma, len);
464
465 datalen -= len;
466 offset += len;
467
468 packet->length += len;
469
470 cur_index++;
471 rdata = GET_DESC_DATA(ring, cur_index);
472 }
473 }
474
475 /* Save the skb address in the last entry */
476 rdata->skb = skb;
477
478 /* Save the number of descriptor entries used */
479 packet->rdesc_count = cur_index - start_index;
480
481 DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
482
483 return packet->rdesc_count;
484
485err_out:
486 while (start_index < cur_index) {
487 rdata = GET_DESC_DATA(ring, start_index++);
488 xgbe_unmap_skb(pdata, rdata);
489 }
490
491 DBGPR("<--xgbe_map_tx_skb: count=0\n");
492
493 return 0;
494}
495
496static void xgbe_realloc_skb(struct xgbe_channel *channel)
497{
498 struct xgbe_prv_data *pdata = channel->pdata;
499 struct xgbe_hw_if *hw_if = &pdata->hw_if;
500 struct xgbe_ring *ring = channel->rx_ring;
501 struct xgbe_ring_data *rdata;
502 struct sk_buff *skb = NULL;
503 dma_addr_t skb_dma;
504 int i;
505
506 DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n",
507 ring->rx.realloc_index);
508
509 for (i = 0; i < ring->dirty; i++) {
510 rdata = GET_DESC_DATA(ring, ring->rx.realloc_index);
511
512 /* Reset rdata values */
513 xgbe_unmap_skb(pdata, rdata);
514
515 /* Allocate skb & assign to each rdesc */
516 skb = dev_alloc_skb(pdata->rx_buf_size);
517 if (skb == NULL) {
518 netdev_alert(pdata->netdev,
519 "failed to allocate skb\n");
520 break;
521 }
522 skb_dma = dma_map_single(pdata->dev, skb->data,
523 pdata->rx_buf_size, DMA_FROM_DEVICE);
524 if (dma_mapping_error(pdata->dev, skb_dma)) {
525 netdev_alert(pdata->netdev,
526 "failed to do the dma map\n");
527 dev_kfree_skb_any(skb);
528 break;
529 }
530 rdata->skb = skb;
531 rdata->skb_dma = skb_dma;
532 rdata->skb_dma_len = pdata->rx_buf_size;
533
534 hw_if->rx_desc_reset(rdata);
535
536 ring->rx.realloc_index++;
537 }
538 ring->dirty = 0;
539
540 DBGPR("<--xgbe_realloc_skb\n");
541}
542
543void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
544{
545 DBGPR("-->xgbe_init_function_ptrs_desc\n");
546
547 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
548 desc_if->free_ring_resources = xgbe_free_ring_resources;
549 desc_if->map_tx_skb = xgbe_map_tx_skb;
550 desc_if->realloc_skb = xgbe_realloc_skb;
551 desc_if->unmap_skb = xgbe_unmap_skb;
552 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
553 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
554
555 DBGPR("<--xgbe_init_function_ptrs_desc\n");
556}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
new file mode 100644
index 000000000000..002293b0819d
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -0,0 +1,2182 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/phy.h>
118#include <linux/clk.h>
119
120#include "xgbe.h"
121#include "xgbe-common.h"
122
123
124static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
125 unsigned int usec)
126{
127 unsigned long rate;
128 unsigned int ret;
129
130 DBGPR("-->xgbe_usec_to_riwt\n");
131
132 rate = clk_get_rate(pdata->sysclock);
133
134 /*
135 * Convert the input usec value to the watchdog timer value. Each
136 * watchdog timer value is equivalent to 256 clock cycles.
137 * Calculate the required value as:
138 * ( usec * ( system_clock_mhz / 10^6 ) / 256
139 */
140 ret = (usec * (rate / 1000000)) / 256;
141
142 DBGPR("<--xgbe_usec_to_riwt\n");
143
144 return ret;
145}
146
147static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
148 unsigned int riwt)
149{
150 unsigned long rate;
151 unsigned int ret;
152
153 DBGPR("-->xgbe_riwt_to_usec\n");
154
155 rate = clk_get_rate(pdata->sysclock);
156
157 /*
158 * Convert the input watchdog timer value to the usec value. Each
159 * watchdog timer value is equivalent to 256 clock cycles.
160 * Calculate the required value as:
161 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
162 */
163 ret = (riwt * 256) / (rate / 1000000);
164
165 DBGPR("<--xgbe_riwt_to_usec\n");
166
167 return ret;
168}
169
170static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
171{
172 struct xgbe_channel *channel;
173 unsigned int i;
174
175 channel = pdata->channel;
176 for (i = 0; i < pdata->channel_count; i++, channel++)
177 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
178 pdata->pblx8);
179
180 return 0;
181}
182
183static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
184{
185 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
186}
187
188static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
189{
190 struct xgbe_channel *channel;
191 unsigned int i;
192
193 channel = pdata->channel;
194 for (i = 0; i < pdata->channel_count; i++, channel++) {
195 if (!channel->tx_ring)
196 break;
197
198 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
199 pdata->tx_pbl);
200 }
201
202 return 0;
203}
204
205static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
206{
207 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
208}
209
210static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
211{
212 struct xgbe_channel *channel;
213 unsigned int i;
214
215 channel = pdata->channel;
216 for (i = 0; i < pdata->channel_count; i++, channel++) {
217 if (!channel->rx_ring)
218 break;
219
220 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
221 pdata->rx_pbl);
222 }
223
224 return 0;
225}
226
227static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
228{
229 struct xgbe_channel *channel;
230 unsigned int i;
231
232 channel = pdata->channel;
233 for (i = 0; i < pdata->channel_count; i++, channel++) {
234 if (!channel->tx_ring)
235 break;
236
237 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
238 pdata->tx_osp_mode);
239 }
240
241 return 0;
242}
243
244static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
245{
246 unsigned int i;
247
248 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
249 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
250
251 return 0;
252}
253
254static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
255{
256 unsigned int i;
257
258 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
259 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
260
261 return 0;
262}
263
264static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
265 unsigned int val)
266{
267 unsigned int i;
268
269 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
270 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
271
272 return 0;
273}
274
275static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
276 unsigned int val)
277{
278 unsigned int i;
279
280 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
281 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
282
283 return 0;
284}
285
286static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
287{
288 struct xgbe_channel *channel;
289 unsigned int i;
290
291 channel = pdata->channel;
292 for (i = 0; i < pdata->channel_count; i++, channel++) {
293 if (!channel->rx_ring)
294 break;
295
296 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
297 pdata->rx_riwt);
298 }
299
300 return 0;
301}
302
303static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
304{
305 return 0;
306}
307
308static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
309{
310 struct xgbe_channel *channel;
311 unsigned int i;
312
313 channel = pdata->channel;
314 for (i = 0; i < pdata->channel_count; i++, channel++) {
315 if (!channel->rx_ring)
316 break;
317
318 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
319 pdata->rx_buf_size);
320 }
321}
322
323static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
324{
325 struct xgbe_channel *channel;
326 unsigned int i;
327
328 channel = pdata->channel;
329 for (i = 0; i < pdata->channel_count; i++, channel++) {
330 if (!channel->tx_ring)
331 break;
332
333 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
334 }
335}
336
337static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
338{
339 unsigned int max_q_count, q_count;
340 unsigned int reg, reg_val;
341 unsigned int i;
342
343 /* Clear MTL flow control */
344 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
345 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
346
347 /* Clear MAC flow control */
348 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
349 q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
350 reg = MAC_Q0TFCR;
351 for (i = 0; i < q_count; i++) {
352 reg_val = XGMAC_IOREAD(pdata, reg);
353 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
354 XGMAC_IOWRITE(pdata, reg, reg_val);
355
356 reg += MAC_QTFCR_INC;
357 }
358
359 return 0;
360}
361
362static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
363{
364 unsigned int max_q_count, q_count;
365 unsigned int reg, reg_val;
366 unsigned int i;
367
368 /* Set MTL flow control */
369 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
370 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
371
372 /* Set MAC flow control */
373 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
374 q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
375 reg = MAC_Q0TFCR;
376 for (i = 0; i < q_count; i++) {
377 reg_val = XGMAC_IOREAD(pdata, reg);
378
379 /* Enable transmit flow control */
380 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
381 /* Set pause time */
382 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
383
384 XGMAC_IOWRITE(pdata, reg, reg_val);
385
386 reg += MAC_QTFCR_INC;
387 }
388
389 return 0;
390}
391
392static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
393{
394 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
395
396 return 0;
397}
398
399static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
400{
401 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
402
403 return 0;
404}
405
406static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
407{
408 if (pdata->tx_pause)
409 xgbe_enable_tx_flow_control(pdata);
410 else
411 xgbe_disable_tx_flow_control(pdata);
412
413 return 0;
414}
415
416static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
417{
418 if (pdata->rx_pause)
419 xgbe_enable_rx_flow_control(pdata);
420 else
421 xgbe_disable_rx_flow_control(pdata);
422
423 return 0;
424}
425
426static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
427{
428 xgbe_config_tx_flow_control(pdata);
429 xgbe_config_rx_flow_control(pdata);
430}
431
432static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
433{
434 struct xgbe_channel *channel;
435 unsigned int dma_ch_isr, dma_ch_ier;
436 unsigned int i;
437
438 channel = pdata->channel;
439 for (i = 0; i < pdata->channel_count; i++, channel++) {
440 /* Clear all the interrupts which are set */
441 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
442 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
443
444 /* Clear all interrupt enable bits */
445 dma_ch_ier = 0;
446
447 /* Enable following interrupts
448 * NIE - Normal Interrupt Summary Enable
449 * AIE - Abnormal Interrupt Summary Enable
450 * FBEE - Fatal Bus Error Enable
451 */
452 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
453 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
454 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
455
456 if (channel->tx_ring) {
457 /* Enable the following Tx interrupts
458 * TIE - Transmit Interrupt Enable (unless polling)
459 */
460 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
461 }
462 if (channel->rx_ring) {
463 /* Enable following Rx interrupts
464 * RBUE - Receive Buffer Unavailable Enable
465 * RIE - Receive Interrupt Enable
466 */
467 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
468 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
469 }
470
471 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
472 }
473}
474
475static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
476{
477 unsigned int mtl_q_isr;
478 unsigned int q_count, i;
479
480 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
481 for (i = 0; i < q_count; i++) {
482 /* Clear all the interrupts which are set */
483 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
484 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
485
486 /* No MTL interrupts to be enabled */
487 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, 0);
488 }
489}
490
491static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
492{
493 /* No MAC interrupts to be enabled */
494 XGMAC_IOWRITE(pdata, MAC_IER, 0);
495
496 /* Enable all counter interrupts */
497 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
498 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
499}
500
501static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
502{
503 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
504
505 return 0;
506}
507
508static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
509{
510 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
511
512 return 0;
513}
514
515static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
516{
517 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
518
519 return 0;
520}
521
522static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
523 unsigned int enable)
524{
525 unsigned int val = enable ? 1 : 0;
526
527 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
528 return 0;
529
530 DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
531 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
532
533 return 0;
534}
535
536static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
537 unsigned int enable)
538{
539 unsigned int val = enable ? 1 : 0;
540
541 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
542 return 0;
543
544 DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
545 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
546
547 return 0;
548}
549
550static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata,
551 unsigned int am_mode)
552{
553 struct netdev_hw_addr *ha;
554 unsigned int mac_reg;
555 unsigned int mac_addr_hi, mac_addr_lo;
556 u8 *mac_addr;
557 unsigned int i;
558
559 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
560 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 0);
561
562 i = 0;
563 mac_reg = MAC_MACA1HR;
564
565 netdev_for_each_uc_addr(ha, pdata->netdev) {
566 mac_addr_lo = 0;
567 mac_addr_hi = 0;
568 mac_addr = (u8 *)&mac_addr_lo;
569 mac_addr[0] = ha->addr[0];
570 mac_addr[1] = ha->addr[1];
571 mac_addr[2] = ha->addr[2];
572 mac_addr[3] = ha->addr[3];
573 mac_addr = (u8 *)&mac_addr_hi;
574 mac_addr[0] = ha->addr[4];
575 mac_addr[1] = ha->addr[5];
576
577 DBGPR(" adding unicast address %pM at 0x%04x\n",
578 ha->addr, mac_reg);
579
580 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
581
582 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
583 mac_reg += MAC_MACA_INC;
584 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
585 mac_reg += MAC_MACA_INC;
586
587 i++;
588 }
589
590 if (!am_mode) {
591 netdev_for_each_mc_addr(ha, pdata->netdev) {
592 mac_addr_lo = 0;
593 mac_addr_hi = 0;
594 mac_addr = (u8 *)&mac_addr_lo;
595 mac_addr[0] = ha->addr[0];
596 mac_addr[1] = ha->addr[1];
597 mac_addr[2] = ha->addr[2];
598 mac_addr[3] = ha->addr[3];
599 mac_addr = (u8 *)&mac_addr_hi;
600 mac_addr[0] = ha->addr[4];
601 mac_addr[1] = ha->addr[5];
602
603 DBGPR(" adding multicast address %pM at 0x%04x\n",
604 ha->addr, mac_reg);
605
606 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
607
608 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
609 mac_reg += MAC_MACA_INC;
610 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
611 mac_reg += MAC_MACA_INC;
612
613 i++;
614 }
615 }
616
617 /* Clear remaining additional MAC address entries */
618 for (; i < pdata->hw_feat.addn_mac; i++) {
619 XGMAC_IOWRITE(pdata, mac_reg, 0);
620 mac_reg += MAC_MACA_INC;
621 XGMAC_IOWRITE(pdata, mac_reg, 0);
622 mac_reg += MAC_MACA_INC;
623 }
624
625 return 0;
626}
627
628static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
629{
630 unsigned int mac_addr_hi, mac_addr_lo;
631
632 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
633 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
634 (addr[1] << 8) | (addr[0] << 0);
635
636 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
637 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
638
639 return 0;
640}
641
642static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
643 int mmd_reg)
644{
645 unsigned int mmd_address;
646 int mmd_data;
647
648 if (mmd_reg & MII_ADDR_C45)
649 mmd_address = mmd_reg & ~MII_ADDR_C45;
650 else
651 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
652
653 /* The PCS registers are accessed using mmio. The underlying APB3
654 * management interface uses indirect addressing to access the MMD
655 * register sets. This requires accessing of the PCS register in two
656 * phases, an address phase and a data phase.
657 *
658 * The mmio interface is based on 32-bit offsets and values. All
659 * register offsets must therefore be adjusted by left shifting the
660 * offset 2 bits and reading 32 bits of data.
661 */
662 mutex_lock(&pdata->xpcs_mutex);
663 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
664 mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
665 mutex_unlock(&pdata->xpcs_mutex);
666
667 return mmd_data;
668}
669
670static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
671 int mmd_reg, int mmd_data)
672{
673 unsigned int mmd_address;
674
675 if (mmd_reg & MII_ADDR_C45)
676 mmd_address = mmd_reg & ~MII_ADDR_C45;
677 else
678 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
679
680 /* The PCS registers are accessed using mmio. The underlying APB3
681 * management interface uses indirect addressing to access the MMD
682 * register sets. This requires accessing of the PCS register in two
683 * phases, an address phase and a data phase.
684 *
685 * The mmio interface is based on 32-bit offsets and values. All
686 * register offsets must therefore be adjusted by left shifting the
687 * offset 2 bits and reading 32 bits of data.
688 */
689 mutex_lock(&pdata->xpcs_mutex);
690 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
691 XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
692 mutex_unlock(&pdata->xpcs_mutex);
693}
694
695static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
696{
697 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
698}
699
700static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
701{
702 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
703
704 return 0;
705}
706
707static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
708{
709 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
710
711 return 0;
712}
713
714static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
715{
716 /* Put the VLAN tag in the Rx descriptor */
717 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
718
719 /* Don't check the VLAN type */
720 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
721
722 /* Check only C-TAG (0x8100) packets */
723 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
724
725 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
726 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
727
728 /* Enable VLAN tag stripping */
729 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
730
731 return 0;
732}
733
734static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
735{
736 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
737
738 return 0;
739}
740
741static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
742{
743 struct xgbe_ring_desc *rdesc = rdata->rdesc;
744
745 /* Reset the Tx descriptor
746 * Set buffer 1 (lo) address to zero
747 * Set buffer 1 (hi) address to zero
748 * Reset all other control bits (IC, TTSE, B2L & B1L)
749 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
750 */
751 rdesc->desc0 = 0;
752 rdesc->desc1 = 0;
753 rdesc->desc2 = 0;
754 rdesc->desc3 = 0;
755}
756
757static void xgbe_tx_desc_init(struct xgbe_channel *channel)
758{
759 struct xgbe_ring *ring = channel->tx_ring;
760 struct xgbe_ring_data *rdata;
761 struct xgbe_ring_desc *rdesc;
762 int i;
763 int start_index = ring->cur;
764
765 DBGPR("-->tx_desc_init\n");
766
767 /* Initialze all descriptors */
768 for (i = 0; i < ring->rdesc_count; i++) {
769 rdata = GET_DESC_DATA(ring, i);
770 rdesc = rdata->rdesc;
771
772 /* Initialize Tx descriptor
773 * Set buffer 1 (lo) address to zero
774 * Set buffer 1 (hi) address to zero
775 * Reset all other control bits (IC, TTSE, B2L & B1L)
776 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
777 * etc)
778 */
779 rdesc->desc0 = 0;
780 rdesc->desc1 = 0;
781 rdesc->desc2 = 0;
782 rdesc->desc3 = 0;
783 }
784
785 /* Make sure everything is written to the descriptor(s) before
786 * telling the device about them
787 */
788 wmb();
789
790 /* Update the total number of Tx descriptors */
791 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
792
793 /* Update the starting address of descriptor ring */
794 rdata = GET_DESC_DATA(ring, start_index);
795 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
796 upper_32_bits(rdata->rdesc_dma));
797 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
798 lower_32_bits(rdata->rdesc_dma));
799
800 DBGPR("<--tx_desc_init\n");
801}
802
803static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
804{
805 struct xgbe_ring_desc *rdesc = rdata->rdesc;
806
807 /* Reset the Rx descriptor
808 * Set buffer 1 (lo) address to dma address (lo)
809 * Set buffer 1 (hi) address to dma address (hi)
810 * Set buffer 2 (lo) address to zero
811 * Set buffer 2 (hi) address to zero and set control bits
812 * OWN and INTE
813 */
814 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
815 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
816 rdesc->desc2 = 0;
817
818 rdesc->desc3 = 0;
819 if (rdata->interrupt)
820 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
821
822 /* Since the Rx DMA engine is likely running, make sure everything
823 * is written to the descriptor(s) before setting the OWN bit
824 * for the descriptor
825 */
826 wmb();
827
828 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
829
830 /* Make sure ownership is written to the descriptor */
831 wmb();
832}
833
834static void xgbe_rx_desc_init(struct xgbe_channel *channel)
835{
836 struct xgbe_prv_data *pdata = channel->pdata;
837 struct xgbe_ring *ring = channel->rx_ring;
838 struct xgbe_ring_data *rdata;
839 struct xgbe_ring_desc *rdesc;
840 unsigned int start_index = ring->cur;
841 unsigned int rx_coalesce, rx_frames;
842 unsigned int i;
843
844 DBGPR("-->rx_desc_init\n");
845
846 rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
847 rx_frames = pdata->rx_frames;
848
849 /* Initialize all descriptors */
850 for (i = 0; i < ring->rdesc_count; i++) {
851 rdata = GET_DESC_DATA(ring, i);
852 rdesc = rdata->rdesc;
853
854 /* Initialize Rx descriptor
855 * Set buffer 1 (lo) address to dma address (lo)
856 * Set buffer 1 (hi) address to dma address (hi)
857 * Set buffer 2 (lo) address to zero
858 * Set buffer 2 (hi) address to zero and set control
859 * bits OWN and INTE appropriateley
860 */
861 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
862 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
863 rdesc->desc2 = 0;
864 rdesc->desc3 = 0;
865 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
866 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
867 rdata->interrupt = 1;
868 if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
869 /* Clear interrupt on completion bit */
870 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
871 0);
872 rdata->interrupt = 0;
873 }
874 }
875
876 /* Make sure everything is written to the descriptors before
877 * telling the device about them
878 */
879 wmb();
880
881 /* Update the total number of Rx descriptors */
882 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
883
884 /* Update the starting address of descriptor ring */
885 rdata = GET_DESC_DATA(ring, start_index);
886 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
887 upper_32_bits(rdata->rdesc_dma));
888 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
889 lower_32_bits(rdata->rdesc_dma));
890
891 /* Update the Rx Descriptor Tail Pointer */
892 rdata = GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
893 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
894 lower_32_bits(rdata->rdesc_dma));
895
896 DBGPR("<--rx_desc_init\n");
897}
898
899static void xgbe_pre_xmit(struct xgbe_channel *channel)
900{
901 struct xgbe_prv_data *pdata = channel->pdata;
902 struct xgbe_ring *ring = channel->tx_ring;
903 struct xgbe_ring_data *rdata;
904 struct xgbe_ring_desc *rdesc;
905 struct xgbe_packet_data *packet = &ring->packet_data;
906 unsigned int csum, tso, vlan;
907 unsigned int tso_context, vlan_context;
908 unsigned int tx_coalesce, tx_frames;
909 int start_index = ring->cur;
910 int i;
911
912 DBGPR("-->xgbe_pre_xmit\n");
913
914 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
915 CSUM_ENABLE);
916 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
917 TSO_ENABLE);
918 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
919 VLAN_CTAG);
920
921 if (tso && (packet->mss != ring->tx.cur_mss))
922 tso_context = 1;
923 else
924 tso_context = 0;
925
926 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
927 vlan_context = 1;
928 else
929 vlan_context = 0;
930
931 tx_coalesce = (pdata->tx_usecs || pdata->tx_frames) ? 1 : 0;
932 tx_frames = pdata->tx_frames;
933 if (tx_coalesce && !channel->tx_timer_active)
934 ring->coalesce_count = 0;
935
936 rdata = GET_DESC_DATA(ring, ring->cur);
937 rdesc = rdata->rdesc;
938
939 /* Create a context descriptor if this is a TSO packet */
940 if (tso_context || vlan_context) {
941 if (tso_context) {
942 DBGPR(" TSO context descriptor, mss=%u\n",
943 packet->mss);
944
945 /* Set the MSS size */
946 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
947 MSS, packet->mss);
948
949 /* Mark it as a CONTEXT descriptor */
950 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
951 CTXT, 1);
952
953 /* Indicate this descriptor contains the MSS */
954 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
955 TCMSSV, 1);
956
957 ring->tx.cur_mss = packet->mss;
958 }
959
960 if (vlan_context) {
961 DBGPR(" VLAN context descriptor, ctag=%u\n",
962 packet->vlan_ctag);
963
964 /* Mark it as a CONTEXT descriptor */
965 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
966 CTXT, 1);
967
968 /* Set the VLAN tag */
969 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
970 VT, packet->vlan_ctag);
971
972 /* Indicate this descriptor contains the VLAN tag */
973 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
974 VLTV, 1);
975
976 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
977 }
978
979 ring->cur++;
980 rdata = GET_DESC_DATA(ring, ring->cur);
981 rdesc = rdata->rdesc;
982 }
983
984 /* Update buffer address (for TSO this is the header) */
985 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
986 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
987
988 /* Update the buffer length */
989 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
990 rdata->skb_dma_len);
991
992 /* VLAN tag insertion check */
993 if (vlan)
994 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
995 TX_NORMAL_DESC2_VLAN_INSERT);
996
997 /* Set IC bit based on Tx coalescing settings */
998 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
999 if (tx_coalesce && (!tx_frames ||
1000 (++ring->coalesce_count % tx_frames)))
1001 /* Clear IC bit */
1002 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
1003
1004 /* Mark it as First Descriptor */
1005 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1006
1007 /* Mark it as a NORMAL descriptor */
1008 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1009
1010 /* Set OWN bit if not the first descriptor */
1011 if (ring->cur != start_index)
1012 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1013
1014 if (tso) {
1015 /* Enable TSO */
1016 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1017 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1018 packet->tcp_payload_len);
1019 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1020 packet->tcp_header_len / 4);
1021 } else {
1022 /* Enable CRC and Pad Insertion */
1023 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1024
1025 /* Enable HW CSUM */
1026 if (csum)
1027 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1028 CIC, 0x3);
1029
1030 /* Set the total length to be transmitted */
1031 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1032 packet->length);
1033 }
1034
1035 for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
1036 ring->cur++;
1037 rdata = GET_DESC_DATA(ring, ring->cur);
1038 rdesc = rdata->rdesc;
1039
1040 /* Update buffer address */
1041 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1042 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1043
1044 /* Update the buffer length */
1045 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1046 rdata->skb_dma_len);
1047
1048 /* Set IC bit based on Tx coalescing settings */
1049 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1050 if (tx_coalesce && (!tx_frames ||
1051 (++ring->coalesce_count % tx_frames)))
1052 /* Clear IC bit */
1053 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
1054
1055 /* Set OWN bit */
1056 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1057
1058 /* Mark it as NORMAL descriptor */
1059 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1060
1061 /* Enable HW CSUM */
1062 if (csum)
1063 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1064 CIC, 0x3);
1065 }
1066
1067 /* Set LAST bit for the last descriptor */
1068 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1069
1070 /* In case the Tx DMA engine is running, make sure everything
1071 * is written to the descriptor(s) before setting the OWN bit
1072 * for the first descriptor
1073 */
1074 wmb();
1075
1076 /* Set OWN bit for the first descriptor */
1077 rdata = GET_DESC_DATA(ring, start_index);
1078 rdesc = rdata->rdesc;
1079 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1080
1081#ifdef XGMAC_ENABLE_TX_DESC_DUMP
1082 xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
1083#endif
1084
1085 /* Make sure ownership is written to the descriptor */
1086 wmb();
1087
1088 /* Issue a poll command to Tx DMA by writing address
1089 * of next immediate free descriptor */
1090 ring->cur++;
1091 rdata = GET_DESC_DATA(ring, ring->cur);
1092 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1093 lower_32_bits(rdata->rdesc_dma));
1094
1095 /* Start the Tx coalescing timer */
1096 if (tx_coalesce && !channel->tx_timer_active) {
1097 channel->tx_timer_active = 1;
1098 hrtimer_start(&channel->tx_timer,
1099 ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
1100 HRTIMER_MODE_REL);
1101 }
1102
1103 DBGPR(" %s: descriptors %u to %u written\n",
1104 channel->name, start_index & (ring->rdesc_count - 1),
1105 (ring->cur - 1) & (ring->rdesc_count - 1));
1106
1107 DBGPR("<--xgbe_pre_xmit\n");
1108}
1109
1110static int xgbe_dev_read(struct xgbe_channel *channel)
1111{
1112 struct xgbe_ring *ring = channel->rx_ring;
1113 struct xgbe_ring_data *rdata;
1114 struct xgbe_ring_desc *rdesc;
1115 struct xgbe_packet_data *packet = &ring->packet_data;
1116 unsigned int err, etlt;
1117
1118 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1119
1120 rdata = GET_DESC_DATA(ring, ring->cur);
1121 rdesc = rdata->rdesc;
1122
1123 /* Check for data availability */
1124 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1125 return 1;
1126
1127#ifdef XGMAC_ENABLE_RX_DESC_DUMP
1128 xgbe_dump_rx_desc(ring, rdesc, ring->cur);
1129#endif
1130
1131 /* Get the packet length */
1132 rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1133
1134 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
1135 /* Not all the data has been transferred for this packet */
1136 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1137 INCOMPLETE, 1);
1138 return 0;
1139 }
1140
1141 /* This is the last of the data for this packet */
1142 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1143 INCOMPLETE, 0);
1144
1145 /* Set checksum done indicator as appropriate */
1146 if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
1147 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1148 CSUM_DONE, 1);
1149
1150 /* Check for errors (only valid in last descriptor) */
1151 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
1152 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1153 DBGPR(" err=%u, etlt=%#x\n", err, etlt);
1154
1155 if (!err || (err && !etlt)) {
1156 if (etlt == 0x09) {
1157 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1158 VLAN_CTAG, 1);
1159 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
1160 RX_NORMAL_DESC0,
1161 OVT);
1162 DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
1163 }
1164 } else {
1165 if ((etlt == 0x05) || (etlt == 0x06))
1166 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1167 CSUM_DONE, 0);
1168 else
1169 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
1170 FRAME, 1);
1171 }
1172
1173 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
1174 ring->cur & (ring->rdesc_count - 1), ring->cur);
1175
1176 return 0;
1177}
1178
1179static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
1180{
1181 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1182 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
1183}
1184
1185static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
1186{
1187 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1188 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
1189}
1190
1191static void xgbe_save_interrupt_status(struct xgbe_channel *channel,
1192 enum xgbe_int_state int_state)
1193{
1194 unsigned int dma_ch_ier;
1195
1196 if (int_state == XGMAC_INT_STATE_SAVE) {
1197 channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1198 channel->saved_ier &= DMA_INTERRUPT_MASK;
1199 } else {
1200 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1201 dma_ch_ier |= channel->saved_ier;
1202 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1203 }
1204}
1205
1206static int xgbe_enable_int(struct xgbe_channel *channel,
1207 enum xgbe_int int_id)
1208{
1209 switch (int_id) {
1210 case XGMAC_INT_DMA_ISR_DC0IS:
1211 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
1212 break;
1213 case XGMAC_INT_DMA_CH_SR_TI:
1214 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
1215 break;
1216 case XGMAC_INT_DMA_CH_SR_TPS:
1217 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 1);
1218 break;
1219 case XGMAC_INT_DMA_CH_SR_TBU:
1220 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 1);
1221 break;
1222 case XGMAC_INT_DMA_CH_SR_RI:
1223 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 1);
1224 break;
1225 case XGMAC_INT_DMA_CH_SR_RBU:
1226 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 1);
1227 break;
1228 case XGMAC_INT_DMA_CH_SR_RPS:
1229 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 1);
1230 break;
1231 case XGMAC_INT_DMA_CH_SR_FBE:
1232 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 1);
1233 break;
1234 case XGMAC_INT_DMA_ALL:
1235 xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_RESTORE);
1236 break;
1237 default:
1238 return -1;
1239 }
1240
1241 return 0;
1242}
1243
1244static int xgbe_disable_int(struct xgbe_channel *channel,
1245 enum xgbe_int int_id)
1246{
1247 unsigned int dma_ch_ier;
1248
1249 switch (int_id) {
1250 case XGMAC_INT_DMA_ISR_DC0IS:
1251 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
1252 break;
1253 case XGMAC_INT_DMA_CH_SR_TI:
1254 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
1255 break;
1256 case XGMAC_INT_DMA_CH_SR_TPS:
1257 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 0);
1258 break;
1259 case XGMAC_INT_DMA_CH_SR_TBU:
1260 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 0);
1261 break;
1262 case XGMAC_INT_DMA_CH_SR_RI:
1263 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 0);
1264 break;
1265 case XGMAC_INT_DMA_CH_SR_RBU:
1266 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 0);
1267 break;
1268 case XGMAC_INT_DMA_CH_SR_RPS:
1269 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 0);
1270 break;
1271 case XGMAC_INT_DMA_CH_SR_FBE:
1272 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 0);
1273 break;
1274 case XGMAC_INT_DMA_ALL:
1275 xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE);
1276
1277 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1278 dma_ch_ier &= ~DMA_INTERRUPT_MASK;
1279 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1280 break;
1281 default:
1282 return -1;
1283 }
1284
1285 return 0;
1286}
1287
1288static int xgbe_exit(struct xgbe_prv_data *pdata)
1289{
1290 unsigned int count = 2000;
1291
1292 DBGPR("-->xgbe_exit\n");
1293
1294 /* Issue a software reset */
1295 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
1296 usleep_range(10, 15);
1297
1298 /* Poll Until Poll Condition */
1299 while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
1300 usleep_range(500, 600);
1301
1302 if (!count)
1303 return -EBUSY;
1304
1305 DBGPR("<--xgbe_exit\n");
1306
1307 return 0;
1308}
1309
1310static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1311{
1312 unsigned int i, count;
1313
1314 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1315 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
1316
1317 /* Poll Until Poll Condition */
1318 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) {
1319 count = 2000;
1320 while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
1321 MTL_Q_TQOMR, FTQ))
1322 usleep_range(500, 600);
1323
1324 if (!count)
1325 return -EBUSY;
1326 }
1327
1328 return 0;
1329}
1330
1331static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
1332{
1333 /* Set enhanced addressing mode */
1334 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
1335
1336 /* Set the System Bus mode */
1337 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
1338}
1339
1340static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
1341{
1342 unsigned int arcache, awcache;
1343
1344 arcache = 0;
1345 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, DMA_ARCACHE_SETTING);
1346 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, DMA_ARDOMAIN_SETTING);
1347 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, DMA_ARCACHE_SETTING);
1348 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, DMA_ARDOMAIN_SETTING);
1349 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, DMA_ARCACHE_SETTING);
1350 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, DMA_ARDOMAIN_SETTING);
1351 XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
1352
1353 awcache = 0;
1354 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, DMA_AWCACHE_SETTING);
1355 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, DMA_AWDOMAIN_SETTING);
1356 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, DMA_AWCACHE_SETTING);
1357 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, DMA_AWDOMAIN_SETTING);
1358 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, DMA_AWCACHE_SETTING);
1359 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, DMA_AWDOMAIN_SETTING);
1360 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, DMA_AWCACHE_SETTING);
1361 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, DMA_AWDOMAIN_SETTING);
1362 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
1363}
1364
1365static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
1366{
1367 unsigned int i;
1368
1369 /* Set Tx to weighted round robin scheduling algorithm (when
1370 * traffic class is using ETS algorithm)
1371 */
1372 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
1373
1374 /* Set Tx traffic classes to strict priority algorithm */
1375 for (i = 0; i < XGBE_TC_CNT; i++)
1376 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP);
1377
1378 /* Set Rx to strict priority algorithm */
1379 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
1380}
1381
1382static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
1383 unsigned char queue_count)
1384{
1385 unsigned int q_fifo_size = 0;
1386 enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
1387
1388 /* Calculate Tx/Rx fifo share per queue */
1389 switch (fifo_size) {
1390 case 0:
1391 q_fifo_size = FIFO_SIZE_B(128);
1392 break;
1393 case 1:
1394 q_fifo_size = FIFO_SIZE_B(256);
1395 break;
1396 case 2:
1397 q_fifo_size = FIFO_SIZE_B(512);
1398 break;
1399 case 3:
1400 q_fifo_size = FIFO_SIZE_KB(1);
1401 break;
1402 case 4:
1403 q_fifo_size = FIFO_SIZE_KB(2);
1404 break;
1405 case 5:
1406 q_fifo_size = FIFO_SIZE_KB(4);
1407 break;
1408 case 6:
1409 q_fifo_size = FIFO_SIZE_KB(8);
1410 break;
1411 case 7:
1412 q_fifo_size = FIFO_SIZE_KB(16);
1413 break;
1414 case 8:
1415 q_fifo_size = FIFO_SIZE_KB(32);
1416 break;
1417 case 9:
1418 q_fifo_size = FIFO_SIZE_KB(64);
1419 break;
1420 case 10:
1421 q_fifo_size = FIFO_SIZE_KB(128);
1422 break;
1423 case 11:
1424 q_fifo_size = FIFO_SIZE_KB(256);
1425 break;
1426 }
1427 q_fifo_size = q_fifo_size / queue_count;
1428
1429 /* Set the queue fifo size programmable value */
1430 if (q_fifo_size >= FIFO_SIZE_KB(256))
1431 p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
1432 else if (q_fifo_size >= FIFO_SIZE_KB(128))
1433 p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
1434 else if (q_fifo_size >= FIFO_SIZE_KB(64))
1435 p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
1436 else if (q_fifo_size >= FIFO_SIZE_KB(32))
1437 p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
1438 else if (q_fifo_size >= FIFO_SIZE_KB(16))
1439 p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
1440 else if (q_fifo_size >= FIFO_SIZE_KB(8))
1441 p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
1442 else if (q_fifo_size >= FIFO_SIZE_KB(4))
1443 p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
1444 else if (q_fifo_size >= FIFO_SIZE_KB(2))
1445 p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
1446 else if (q_fifo_size >= FIFO_SIZE_KB(1))
1447 p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
1448 else if (q_fifo_size >= FIFO_SIZE_B(512))
1449 p_fifo = XGMAC_MTL_FIFO_SIZE_512;
1450 else if (q_fifo_size >= FIFO_SIZE_B(256))
1451 p_fifo = XGMAC_MTL_FIFO_SIZE_256;
1452
1453 return p_fifo;
1454}
1455
1456static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
1457{
1458 enum xgbe_mtl_fifo_size fifo_size;
1459 unsigned int i;
1460
1461 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
1462 pdata->hw_feat.tx_q_cnt);
1463
1464 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1465 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
1466
1467 netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
1468 pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256));
1469}
1470
1471static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
1472{
1473 enum xgbe_mtl_fifo_size fifo_size;
1474 unsigned int i;
1475
1476 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
1477 pdata->hw_feat.rx_q_cnt);
1478
1479 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
1480 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
1481
1482 netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
1483 pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256));
1484}
1485
1486static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata)
1487{
1488 unsigned int i, reg, reg_val;
1489 unsigned int q_count = pdata->hw_feat.rx_q_cnt;
1490
1491 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
1492 reg = MTL_RQDCM0R;
1493 reg_val = 0;
1494 for (i = 0; i < q_count;) {
1495 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
1496
1497 if ((i % MTL_RQDCM_Q_PER_REG) && (i != q_count))
1498 continue;
1499
1500 XGMAC_IOWRITE(pdata, reg, reg_val);
1501
1502 reg += MTL_RQDCM_INC;
1503 reg_val = 0;
1504 }
1505}
1506
1507static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
1508{
1509 unsigned int i;
1510
1511 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) {
1512 /* Activate flow control when less than 4k left in fifo */
1513 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
1514
1515 /* De-activate flow control when more than 6k left in fifo */
1516 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
1517 }
1518}
1519
1520static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
1521{
1522 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
1523}
1524
1525static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
1526{
1527 unsigned int val;
1528
1529 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
1530
1531 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1532}
1533
1534static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
1535{
1536 if (pdata->netdev->features & NETIF_F_RXCSUM)
1537 xgbe_enable_rx_csum(pdata);
1538 else
1539 xgbe_disable_rx_csum(pdata);
1540}
1541
1542static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
1543{
1544 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1545 xgbe_enable_rx_vlan_stripping(pdata);
1546 else
1547 xgbe_disable_rx_vlan_stripping(pdata);
1548}
1549
1550static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
1551{
1552 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1553 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
1554
1555 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
1556 stats->txoctetcount_gb +=
1557 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
1558
1559 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
1560 stats->txframecount_gb +=
1561 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
1562
1563 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
1564 stats->txbroadcastframes_g +=
1565 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
1566
1567 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
1568 stats->txmulticastframes_g +=
1569 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
1570
1571 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
1572 stats->tx64octets_gb +=
1573 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
1574
1575 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
1576 stats->tx65to127octets_gb +=
1577 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
1578
1579 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
1580 stats->tx128to255octets_gb +=
1581 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
1582
1583 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
1584 stats->tx256to511octets_gb +=
1585 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
1586
1587 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
1588 stats->tx512to1023octets_gb +=
1589 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
1590
1591 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
1592 stats->tx1024tomaxoctets_gb +=
1593 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
1594
1595 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
1596 stats->txunicastframes_gb +=
1597 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
1598
1599 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
1600 stats->txmulticastframes_gb +=
1601 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
1602
1603 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
1604 stats->txbroadcastframes_g +=
1605 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
1606
1607 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
1608 stats->txunderflowerror +=
1609 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
1610
1611 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
1612 stats->txoctetcount_g +=
1613 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
1614
1615 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
1616 stats->txframecount_g +=
1617 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
1618
1619 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
1620 stats->txpauseframes +=
1621 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
1622
1623 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
1624 stats->txvlanframes_g +=
1625 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
1626}
1627
1628static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
1629{
1630 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1631 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
1632
1633 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
1634 stats->rxframecount_gb +=
1635 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
1636
1637 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
1638 stats->rxoctetcount_gb +=
1639 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
1640
1641 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
1642 stats->rxoctetcount_g +=
1643 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
1644
1645 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
1646 stats->rxbroadcastframes_g +=
1647 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
1648
1649 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
1650 stats->rxmulticastframes_g +=
1651 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
1652
1653 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
1654 stats->rxcrcerror +=
1655 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
1656
1657 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
1658 stats->rxrunterror +=
1659 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
1660
1661 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
1662 stats->rxjabbererror +=
1663 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
1664
1665 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
1666 stats->rxundersize_g +=
1667 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
1668
1669 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
1670 stats->rxoversize_g +=
1671 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
1672
1673 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
1674 stats->rx64octets_gb +=
1675 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
1676
1677 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
1678 stats->rx65to127octets_gb +=
1679 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
1680
1681 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
1682 stats->rx128to255octets_gb +=
1683 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
1684
1685 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
1686 stats->rx256to511octets_gb +=
1687 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
1688
1689 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
1690 stats->rx512to1023octets_gb +=
1691 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
1692
1693 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
1694 stats->rx1024tomaxoctets_gb +=
1695 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
1696
1697 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
1698 stats->rxunicastframes_g +=
1699 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
1700
1701 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
1702 stats->rxlengtherror +=
1703 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
1704
1705 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
1706 stats->rxoutofrangetype +=
1707 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
1708
1709 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
1710 stats->rxpauseframes +=
1711 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
1712
1713 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
1714 stats->rxfifooverflow +=
1715 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
1716
1717 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
1718 stats->rxvlanframes_gb +=
1719 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
1720
1721 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
1722 stats->rxwatchdogerror +=
1723 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
1724}
1725
1726static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
1727{
1728 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1729
1730 /* Freeze counters */
1731 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
1732
1733 stats->txoctetcount_gb +=
1734 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
1735
1736 stats->txframecount_gb +=
1737 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
1738
1739 stats->txbroadcastframes_g +=
1740 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
1741
1742 stats->txmulticastframes_g +=
1743 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
1744
1745 stats->tx64octets_gb +=
1746 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
1747
1748 stats->tx65to127octets_gb +=
1749 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
1750
1751 stats->tx128to255octets_gb +=
1752 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
1753
1754 stats->tx256to511octets_gb +=
1755 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
1756
1757 stats->tx512to1023octets_gb +=
1758 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
1759
1760 stats->tx1024tomaxoctets_gb +=
1761 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
1762
1763 stats->txunicastframes_gb +=
1764 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
1765
1766 stats->txmulticastframes_gb +=
1767 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
1768
1769 stats->txbroadcastframes_g +=
1770 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
1771
1772 stats->txunderflowerror +=
1773 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
1774
1775 stats->txoctetcount_g +=
1776 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
1777
1778 stats->txframecount_g +=
1779 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
1780
1781 stats->txpauseframes +=
1782 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
1783
1784 stats->txvlanframes_g +=
1785 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
1786
1787 stats->rxframecount_gb +=
1788 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
1789
1790 stats->rxoctetcount_gb +=
1791 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
1792
1793 stats->rxoctetcount_g +=
1794 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
1795
1796 stats->rxbroadcastframes_g +=
1797 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
1798
1799 stats->rxmulticastframes_g +=
1800 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
1801
1802 stats->rxcrcerror +=
1803 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
1804
1805 stats->rxrunterror +=
1806 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
1807
1808 stats->rxjabbererror +=
1809 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
1810
1811 stats->rxundersize_g +=
1812 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
1813
1814 stats->rxoversize_g +=
1815 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
1816
1817 stats->rx64octets_gb +=
1818 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
1819
1820 stats->rx65to127octets_gb +=
1821 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
1822
1823 stats->rx128to255octets_gb +=
1824 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
1825
1826 stats->rx256to511octets_gb +=
1827 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
1828
1829 stats->rx512to1023octets_gb +=
1830 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
1831
1832 stats->rx1024tomaxoctets_gb +=
1833 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
1834
1835 stats->rxunicastframes_g +=
1836 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
1837
1838 stats->rxlengtherror +=
1839 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
1840
1841 stats->rxoutofrangetype +=
1842 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
1843
1844 stats->rxpauseframes +=
1845 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
1846
1847 stats->rxfifooverflow +=
1848 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
1849
1850 stats->rxvlanframes_gb +=
1851 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
1852
1853 stats->rxwatchdogerror +=
1854 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
1855
1856 /* Un-freeze counters */
1857 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
1858}
1859
1860static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
1861{
1862 /* Set counters to reset on read */
1863 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
1864
1865 /* Reset the counters */
1866 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
1867}
1868
1869static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
1870{
1871 struct xgbe_channel *channel;
1872 unsigned int i;
1873
1874 /* Enable each Tx DMA channel */
1875 channel = pdata->channel;
1876 for (i = 0; i < pdata->channel_count; i++, channel++) {
1877 if (!channel->tx_ring)
1878 break;
1879
1880 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
1881 }
1882
1883 /* Enable each Tx queue */
1884 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1885 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
1886 MTL_Q_ENABLED);
1887
1888 /* Enable MAC Tx */
1889 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
1890}
1891
1892static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
1893{
1894 struct xgbe_channel *channel;
1895 unsigned int i;
1896
1897 /* Disable MAC Tx */
1898 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
1899
1900 /* Disable each Tx queue */
1901 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1902 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
1903
1904 /* Disable each Tx DMA channel */
1905 channel = pdata->channel;
1906 for (i = 0; i < pdata->channel_count; i++, channel++) {
1907 if (!channel->tx_ring)
1908 break;
1909
1910 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
1911 }
1912}
1913
1914static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
1915{
1916 struct xgbe_channel *channel;
1917 unsigned int reg_val, i;
1918
1919 /* Enable each Rx DMA channel */
1920 channel = pdata->channel;
1921 for (i = 0; i < pdata->channel_count; i++, channel++) {
1922 if (!channel->rx_ring)
1923 break;
1924
1925 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
1926 }
1927
1928 /* Enable each Rx queue */
1929 reg_val = 0;
1930 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
1931 reg_val |= (0x02 << (i << 1));
1932 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
1933
1934 /* Enable MAC Rx */
1935 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
1936 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
1937 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
1938 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
1939}
1940
1941static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
1942{
1943 struct xgbe_channel *channel;
1944 unsigned int i;
1945
1946 /* Disable MAC Rx */
1947 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
1948 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
1949 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
1950 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
1951
1952 /* Disable each Rx queue */
1953 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
1954
1955 /* Disable each Rx DMA channel */
1956 channel = pdata->channel;
1957 for (i = 0; i < pdata->channel_count; i++, channel++) {
1958 if (!channel->rx_ring)
1959 break;
1960
1961 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
1962 }
1963}
1964
1965static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
1966{
1967 struct xgbe_channel *channel;
1968 unsigned int i;
1969
1970 /* Enable each Tx DMA channel */
1971 channel = pdata->channel;
1972 for (i = 0; i < pdata->channel_count; i++, channel++) {
1973 if (!channel->tx_ring)
1974 break;
1975
1976 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
1977 }
1978
1979 /* Enable MAC Tx */
1980 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
1981}
1982
1983static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
1984{
1985 struct xgbe_channel *channel;
1986 unsigned int i;
1987
1988 /* Disable MAC Tx */
1989 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
1990
1991 /* Disable each Tx DMA channel */
1992 channel = pdata->channel;
1993 for (i = 0; i < pdata->channel_count; i++, channel++) {
1994 if (!channel->tx_ring)
1995 break;
1996
1997 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
1998 }
1999}
2000
2001static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
2002{
2003 struct xgbe_channel *channel;
2004 unsigned int i;
2005
2006 /* Enable each Rx DMA channel */
2007 channel = pdata->channel;
2008 for (i = 0; i < pdata->channel_count; i++, channel++) {
2009 if (!channel->rx_ring)
2010 break;
2011
2012 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2013 }
2014}
2015
2016static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
2017{
2018 struct xgbe_channel *channel;
2019 unsigned int i;
2020
2021 /* Disable each Rx DMA channel */
2022 channel = pdata->channel;
2023 for (i = 0; i < pdata->channel_count; i++, channel++) {
2024 if (!channel->rx_ring)
2025 break;
2026
2027 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2028 }
2029}
2030
2031static int xgbe_init(struct xgbe_prv_data *pdata)
2032{
2033 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2034 int ret;
2035
2036 DBGPR("-->xgbe_init\n");
2037
2038 /* Flush Tx queues */
2039 ret = xgbe_flush_tx_queues(pdata);
2040 if (ret)
2041 return ret;
2042
2043 /*
2044 * Initialize DMA related features
2045 */
2046 xgbe_config_dma_bus(pdata);
2047 xgbe_config_dma_cache(pdata);
2048 xgbe_config_osp_mode(pdata);
2049 xgbe_config_pblx8(pdata);
2050 xgbe_config_tx_pbl_val(pdata);
2051 xgbe_config_rx_pbl_val(pdata);
2052 xgbe_config_rx_coalesce(pdata);
2053 xgbe_config_tx_coalesce(pdata);
2054 xgbe_config_rx_buffer_size(pdata);
2055 xgbe_config_tso_mode(pdata);
2056 desc_if->wrapper_tx_desc_init(pdata);
2057 desc_if->wrapper_rx_desc_init(pdata);
2058 xgbe_enable_dma_interrupts(pdata);
2059
2060 /*
2061 * Initialize MTL related features
2062 */
2063 xgbe_config_mtl_mode(pdata);
2064 xgbe_config_rx_queue_mapping(pdata);
2065 /*TODO: Program the priorities mapped to the Selected Traffic Classes
2066 in MTL_TC_Prty_Map0-3 registers */
2067 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
2068 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
2069 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
2070 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
2071 xgbe_config_tx_fifo_size(pdata);
2072 xgbe_config_rx_fifo_size(pdata);
2073 xgbe_config_flow_control_threshold(pdata);
2074 /*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */
2075 /*TODO: Error Packet and undersized good Packet forwarding enable
2076 (FEP and FUP)
2077 */
2078 xgbe_enable_mtl_interrupts(pdata);
2079
2080 /* Transmit Class Weight */
2081 XGMAC_IOWRITE_BITS(pdata, MTL_Q_TCQWR, QW, 0x10);
2082
2083 /*
2084 * Initialize MAC related features
2085 */
2086 xgbe_config_mac_address(pdata);
2087 xgbe_config_jumbo_enable(pdata);
2088 xgbe_config_flow_control(pdata);
2089 xgbe_config_checksum_offload(pdata);
2090 xgbe_config_vlan_support(pdata);
2091 xgbe_config_mmc(pdata);
2092 xgbe_enable_mac_interrupts(pdata);
2093
2094 DBGPR("<--xgbe_init\n");
2095
2096 return 0;
2097}
2098
2099void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
2100{
2101 DBGPR("-->xgbe_init_function_ptrs\n");
2102
2103 hw_if->tx_complete = xgbe_tx_complete;
2104
2105 hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
2106 hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
2107 hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs;
2108 hw_if->set_mac_address = xgbe_set_mac_address;
2109
2110 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
2111 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
2112
2113 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
2114 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
2115
2116 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
2117 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
2118
2119 hw_if->set_gmii_speed = xgbe_set_gmii_speed;
2120 hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
2121 hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
2122
2123 hw_if->enable_tx = xgbe_enable_tx;
2124 hw_if->disable_tx = xgbe_disable_tx;
2125 hw_if->enable_rx = xgbe_enable_rx;
2126 hw_if->disable_rx = xgbe_disable_rx;
2127
2128 hw_if->powerup_tx = xgbe_powerup_tx;
2129 hw_if->powerdown_tx = xgbe_powerdown_tx;
2130 hw_if->powerup_rx = xgbe_powerup_rx;
2131 hw_if->powerdown_rx = xgbe_powerdown_rx;
2132
2133 hw_if->pre_xmit = xgbe_pre_xmit;
2134 hw_if->dev_read = xgbe_dev_read;
2135 hw_if->enable_int = xgbe_enable_int;
2136 hw_if->disable_int = xgbe_disable_int;
2137 hw_if->init = xgbe_init;
2138 hw_if->exit = xgbe_exit;
2139
2140 /* Descriptor related Sequences have to be initialized here */
2141 hw_if->tx_desc_init = xgbe_tx_desc_init;
2142 hw_if->rx_desc_init = xgbe_rx_desc_init;
2143 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
2144 hw_if->rx_desc_reset = xgbe_rx_desc_reset;
2145 hw_if->is_last_desc = xgbe_is_last_desc;
2146 hw_if->is_context_desc = xgbe_is_context_desc;
2147
2148 /* For FLOW ctrl */
2149 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
2150 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
2151
2152 /* For RX coalescing */
2153 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
2154 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
2155 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
2156 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
2157
2158 /* For RX and TX threshold config */
2159 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
2160 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
2161
2162 /* For RX and TX Store and Forward Mode config */
2163 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
2164 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
2165
2166 /* For TX DMA Operating on Second Frame config */
2167 hw_if->config_osp_mode = xgbe_config_osp_mode;
2168
2169 /* For RX and TX PBL config */
2170 hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
2171 hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
2172 hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
2173 hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
2174 hw_if->config_pblx8 = xgbe_config_pblx8;
2175
2176 /* For MMC statistics support */
2177 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
2178 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
2179 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
2180
2181 DBGPR("<--xgbe_init_function_ptrs\n");
2182}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
new file mode 100644
index 000000000000..cfe3d93b5f52
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -0,0 +1,1351 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/spinlock.h>
118#include <linux/tcp.h>
119#include <linux/if_vlan.h>
120#include <linux/phy.h>
121#include <net/busy_poll.h>
122#include <linux/clk.h>
123#include <linux/if_ether.h>
124
125#include "xgbe.h"
126#include "xgbe-common.h"
127
128
129static int xgbe_poll(struct napi_struct *, int);
130static void xgbe_set_rx_mode(struct net_device *);
131
132static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
133{
134 return (ring->rdesc_count - (ring->cur - ring->dirty));
135}
136
137static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
138{
139 unsigned int rx_buf_size;
140
141 if (mtu > XGMAC_JUMBO_PACKET_MTU) {
142 netdev_alert(netdev, "MTU exceeds maximum supported value\n");
143 return -EINVAL;
144 }
145
146 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
147 if (rx_buf_size < RX_MIN_BUF_SIZE)
148 rx_buf_size = RX_MIN_BUF_SIZE;
149 rx_buf_size = (rx_buf_size + RX_BUF_ALIGN - 1) & ~(RX_BUF_ALIGN - 1);
150
151 return rx_buf_size;
152}
153
154static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
155{
156 struct xgbe_hw_if *hw_if = &pdata->hw_if;
157 struct xgbe_channel *channel;
158 unsigned int i;
159
160 channel = pdata->channel;
161 for (i = 0; i < pdata->channel_count; i++, channel++) {
162 if (channel->tx_ring)
163 hw_if->enable_int(channel,
164 XGMAC_INT_DMA_CH_SR_TI);
165 if (channel->rx_ring)
166 hw_if->enable_int(channel,
167 XGMAC_INT_DMA_CH_SR_RI);
168 }
169}
170
171static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
172{
173 struct xgbe_hw_if *hw_if = &pdata->hw_if;
174 struct xgbe_channel *channel;
175 unsigned int i;
176
177 channel = pdata->channel;
178 for (i = 0; i < pdata->channel_count; i++, channel++) {
179 if (channel->tx_ring)
180 hw_if->disable_int(channel,
181 XGMAC_INT_DMA_CH_SR_TI);
182 if (channel->rx_ring)
183 hw_if->disable_int(channel,
184 XGMAC_INT_DMA_CH_SR_RI);
185 }
186}
187
188static irqreturn_t xgbe_isr(int irq, void *data)
189{
190 struct xgbe_prv_data *pdata = data;
191 struct xgbe_hw_if *hw_if = &pdata->hw_if;
192 struct xgbe_channel *channel;
193 unsigned int dma_isr, dma_ch_isr;
194 unsigned int mac_isr;
195 unsigned int i;
196
197 /* The DMA interrupt status register also reports MAC and MTL
198 * interrupts. So for polling mode, we just need to check for
199 * this register to be non-zero
200 */
201 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
202 if (!dma_isr)
203 goto isr_done;
204
205 DBGPR("-->xgbe_isr\n");
206
207 DBGPR(" DMA_ISR = %08x\n", dma_isr);
208 DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
209 DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
210
211 for (i = 0; i < pdata->channel_count; i++) {
212 if (!(dma_isr & (1 << i)))
213 continue;
214
215 channel = pdata->channel + i;
216
217 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
218 DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
219
220 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
221 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
222 if (napi_schedule_prep(&pdata->napi)) {
223 /* Disable Tx and Rx interrupts */
224 xgbe_disable_rx_tx_ints(pdata);
225
226 /* Turn on polling */
227 __napi_schedule(&pdata->napi);
228 }
229 }
230
231 /* Restart the device on a Fatal Bus Error */
232 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
233 schedule_work(&pdata->restart_work);
234
235 /* Clear all interrupt signals */
236 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
237 }
238
239 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
240 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
241
242 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
243 hw_if->tx_mmc_int(pdata);
244
245 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
246 hw_if->rx_mmc_int(pdata);
247 }
248
249 DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
250
251 DBGPR("<--xgbe_isr\n");
252
253isr_done:
254 return IRQ_HANDLED;
255}
256
257static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
258{
259 struct xgbe_channel *channel = container_of(timer,
260 struct xgbe_channel,
261 tx_timer);
262 struct xgbe_ring *ring = channel->tx_ring;
263 struct xgbe_prv_data *pdata = channel->pdata;
264 unsigned long flags;
265
266 DBGPR("-->xgbe_tx_timer\n");
267
268 spin_lock_irqsave(&ring->lock, flags);
269
270 if (napi_schedule_prep(&pdata->napi)) {
271 /* Disable Tx and Rx interrupts */
272 xgbe_disable_rx_tx_ints(pdata);
273
274 /* Turn on polling */
275 __napi_schedule(&pdata->napi);
276 }
277
278 channel->tx_timer_active = 0;
279
280 spin_unlock_irqrestore(&ring->lock, flags);
281
282 DBGPR("<--xgbe_tx_timer\n");
283
284 return HRTIMER_NORESTART;
285}
286
287static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
288{
289 struct xgbe_channel *channel;
290 unsigned int i;
291
292 DBGPR("-->xgbe_init_tx_timers\n");
293
294 channel = pdata->channel;
295 for (i = 0; i < pdata->channel_count; i++, channel++) {
296 if (!channel->tx_ring)
297 break;
298
299 DBGPR(" %s adding tx timer\n", channel->name);
300 hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
301 HRTIMER_MODE_REL);
302 channel->tx_timer.function = xgbe_tx_timer;
303 }
304
305 DBGPR("<--xgbe_init_tx_timers\n");
306}
307
308static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
309{
310 struct xgbe_channel *channel;
311 unsigned int i;
312
313 DBGPR("-->xgbe_stop_tx_timers\n");
314
315 channel = pdata->channel;
316 for (i = 0; i < pdata->channel_count; i++, channel++) {
317 if (!channel->tx_ring)
318 break;
319
320 DBGPR(" %s deleting tx timer\n", channel->name);
321 channel->tx_timer_active = 0;
322 hrtimer_cancel(&channel->tx_timer);
323 }
324
325 DBGPR("<--xgbe_stop_tx_timers\n");
326}
327
328void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
329{
330 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
331 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
332
333 DBGPR("-->xgbe_get_all_hw_features\n");
334
335 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
336 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
337 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
338
339 memset(hw_feat, 0, sizeof(*hw_feat));
340
341 /* Hardware feature register 0 */
342 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
343 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
344 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
345 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
346 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
347 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
348 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
349 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
350 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
351 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
352 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
353 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
354 ADDMACADRSEL);
355 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
356 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
357
358 /* Hardware feature register 1 */
359 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
360 RXFIFOSIZE);
361 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
362 TXFIFOSIZE);
363 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
364 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
365 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
366 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
367 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
368 HASHTBLSZ);
369 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
370 L3L4FNUM);
371
372 /* Hardware feature register 2 */
373 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
374 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
375 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
376 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
377 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
378 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
379
380 /* The Queue and Channel counts are zero based so increment them
381 * to get the actual number
382 */
383 hw_feat->rx_q_cnt++;
384 hw_feat->tx_q_cnt++;
385 hw_feat->rx_ch_cnt++;
386 hw_feat->tx_ch_cnt++;
387
388 DBGPR("<--xgbe_get_all_hw_features\n");
389}
390
391static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
392{
393 if (add)
394 netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
395 NAPI_POLL_WEIGHT);
396 napi_enable(&pdata->napi);
397}
398
399static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
400{
401 napi_disable(&pdata->napi);
402}
403
404void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
405{
406 struct xgbe_hw_if *hw_if = &pdata->hw_if;
407
408 DBGPR("-->xgbe_init_tx_coalesce\n");
409
410 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
411 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
412
413 hw_if->config_tx_coalesce(pdata);
414
415 DBGPR("<--xgbe_init_tx_coalesce\n");
416}
417
418void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
419{
420 struct xgbe_hw_if *hw_if = &pdata->hw_if;
421
422 DBGPR("-->xgbe_init_rx_coalesce\n");
423
424 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
425 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
426
427 hw_if->config_rx_coalesce(pdata);
428
429 DBGPR("<--xgbe_init_rx_coalesce\n");
430}
431
432static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
433{
434 struct xgbe_desc_if *desc_if = &pdata->desc_if;
435 struct xgbe_channel *channel;
436 struct xgbe_ring *ring;
437 struct xgbe_ring_data *rdata;
438 unsigned int i, j;
439
440 DBGPR("-->xgbe_free_tx_skbuff\n");
441
442 channel = pdata->channel;
443 for (i = 0; i < pdata->channel_count; i++, channel++) {
444 ring = channel->tx_ring;
445 if (!ring)
446 break;
447
448 for (j = 0; j < ring->rdesc_count; j++) {
449 rdata = GET_DESC_DATA(ring, j);
450 desc_if->unmap_skb(pdata, rdata);
451 }
452 }
453
454 DBGPR("<--xgbe_free_tx_skbuff\n");
455}
456
457static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
458{
459 struct xgbe_desc_if *desc_if = &pdata->desc_if;
460 struct xgbe_channel *channel;
461 struct xgbe_ring *ring;
462 struct xgbe_ring_data *rdata;
463 unsigned int i, j;
464
465 DBGPR("-->xgbe_free_rx_skbuff\n");
466
467 channel = pdata->channel;
468 for (i = 0; i < pdata->channel_count; i++, channel++) {
469 ring = channel->rx_ring;
470 if (!ring)
471 break;
472
473 for (j = 0; j < ring->rdesc_count; j++) {
474 rdata = GET_DESC_DATA(ring, j);
475 desc_if->unmap_skb(pdata, rdata);
476 }
477 }
478
479 DBGPR("<--xgbe_free_rx_skbuff\n");
480}
481
482int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
483{
484 struct xgbe_prv_data *pdata = netdev_priv(netdev);
485 struct xgbe_hw_if *hw_if = &pdata->hw_if;
486 unsigned long flags;
487
488 DBGPR("-->xgbe_powerdown\n");
489
490 if (!netif_running(netdev) ||
491 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
492 netdev_alert(netdev, "Device is already powered down\n");
493 DBGPR("<--xgbe_powerdown\n");
494 return -EINVAL;
495 }
496
497 phy_stop(pdata->phydev);
498
499 spin_lock_irqsave(&pdata->lock, flags);
500
501 if (caller == XGMAC_DRIVER_CONTEXT)
502 netif_device_detach(netdev);
503
504 netif_tx_stop_all_queues(netdev);
505 xgbe_napi_disable(pdata);
506
507 /* Powerdown Tx/Rx */
508 hw_if->powerdown_tx(pdata);
509 hw_if->powerdown_rx(pdata);
510
511 pdata->power_down = 1;
512
513 spin_unlock_irqrestore(&pdata->lock, flags);
514
515 DBGPR("<--xgbe_powerdown\n");
516
517 return 0;
518}
519
520int xgbe_powerup(struct net_device *netdev, unsigned int caller)
521{
522 struct xgbe_prv_data *pdata = netdev_priv(netdev);
523 struct xgbe_hw_if *hw_if = &pdata->hw_if;
524 unsigned long flags;
525
526 DBGPR("-->xgbe_powerup\n");
527
528 if (!netif_running(netdev) ||
529 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
530 netdev_alert(netdev, "Device is already powered up\n");
531 DBGPR("<--xgbe_powerup\n");
532 return -EINVAL;
533 }
534
535 spin_lock_irqsave(&pdata->lock, flags);
536
537 pdata->power_down = 0;
538
539 phy_start(pdata->phydev);
540
541 /* Enable Tx/Rx */
542 hw_if->powerup_tx(pdata);
543 hw_if->powerup_rx(pdata);
544
545 if (caller == XGMAC_DRIVER_CONTEXT)
546 netif_device_attach(netdev);
547
548 xgbe_napi_enable(pdata, 0);
549 netif_tx_start_all_queues(netdev);
550
551 spin_unlock_irqrestore(&pdata->lock, flags);
552
553 DBGPR("<--xgbe_powerup\n");
554
555 return 0;
556}
557
558static int xgbe_start(struct xgbe_prv_data *pdata)
559{
560 struct xgbe_hw_if *hw_if = &pdata->hw_if;
561 struct net_device *netdev = pdata->netdev;
562
563 DBGPR("-->xgbe_start\n");
564
565 xgbe_set_rx_mode(netdev);
566
567 hw_if->init(pdata);
568
569 phy_start(pdata->phydev);
570
571 hw_if->enable_tx(pdata);
572 hw_if->enable_rx(pdata);
573
574 xgbe_init_tx_timers(pdata);
575
576 xgbe_napi_enable(pdata, 1);
577 netif_tx_start_all_queues(netdev);
578
579 DBGPR("<--xgbe_start\n");
580
581 return 0;
582}
583
584static void xgbe_stop(struct xgbe_prv_data *pdata)
585{
586 struct xgbe_hw_if *hw_if = &pdata->hw_if;
587 struct net_device *netdev = pdata->netdev;
588
589 DBGPR("-->xgbe_stop\n");
590
591 phy_stop(pdata->phydev);
592
593 netif_tx_stop_all_queues(netdev);
594 xgbe_napi_disable(pdata);
595
596 xgbe_stop_tx_timers(pdata);
597
598 hw_if->disable_tx(pdata);
599 hw_if->disable_rx(pdata);
600
601 DBGPR("<--xgbe_stop\n");
602}
603
604static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
605{
606 struct xgbe_hw_if *hw_if = &pdata->hw_if;
607
608 DBGPR("-->xgbe_restart_dev\n");
609
610 /* If not running, "restart" will happen on open */
611 if (!netif_running(pdata->netdev))
612 return;
613
614 xgbe_stop(pdata);
615 synchronize_irq(pdata->irq_number);
616
617 xgbe_free_tx_skbuff(pdata);
618 xgbe_free_rx_skbuff(pdata);
619
620 /* Issue software reset to device if requested */
621 if (reset)
622 hw_if->exit(pdata);
623
624 xgbe_start(pdata);
625
626 DBGPR("<--xgbe_restart_dev\n");
627}
628
629static void xgbe_restart(struct work_struct *work)
630{
631 struct xgbe_prv_data *pdata = container_of(work,
632 struct xgbe_prv_data,
633 restart_work);
634
635 rtnl_lock();
636
637 xgbe_restart_dev(pdata, 1);
638
639 rtnl_unlock();
640}
641
642static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
643{
644 if (vlan_tx_tag_present(skb))
645 packet->vlan_ctag = vlan_tx_tag_get(skb);
646}
647
648static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
649{
650 int ret;
651
652 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
653 TSO_ENABLE))
654 return 0;
655
656 ret = skb_cow_head(skb, 0);
657 if (ret)
658 return ret;
659
660 packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
661 packet->tcp_header_len = tcp_hdrlen(skb);
662 packet->tcp_payload_len = skb->len - packet->header_len;
663 packet->mss = skb_shinfo(skb)->gso_size;
664 DBGPR(" packet->header_len=%u\n", packet->header_len);
665 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
666 packet->tcp_header_len, packet->tcp_payload_len);
667 DBGPR(" packet->mss=%u\n", packet->mss);
668
669 return 0;
670}
671
672static int xgbe_is_tso(struct sk_buff *skb)
673{
674 if (skb->ip_summed != CHECKSUM_PARTIAL)
675 return 0;
676
677 if (!skb_is_gso(skb))
678 return 0;
679
680 DBGPR(" TSO packet to be processed\n");
681
682 return 1;
683}
684
685static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
686 struct xgbe_packet_data *packet)
687{
688 struct skb_frag_struct *frag;
689 unsigned int context_desc;
690 unsigned int len;
691 unsigned int i;
692
693 context_desc = 0;
694 packet->rdesc_count = 0;
695
696 if (xgbe_is_tso(skb)) {
697 /* TSO requires an extra desriptor if mss is different */
698 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
699 context_desc = 1;
700 packet->rdesc_count++;
701 }
702
703 /* TSO requires an extra desriptor for TSO header */
704 packet->rdesc_count++;
705
706 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
707 TSO_ENABLE, 1);
708 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
709 CSUM_ENABLE, 1);
710 } else if (skb->ip_summed == CHECKSUM_PARTIAL)
711 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
712 CSUM_ENABLE, 1);
713
714 if (vlan_tx_tag_present(skb)) {
715 /* VLAN requires an extra descriptor if tag is different */
716 if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
717 /* We can share with the TSO context descriptor */
718 if (!context_desc) {
719 context_desc = 1;
720 packet->rdesc_count++;
721 }
722
723 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
724 VLAN_CTAG, 1);
725 }
726
727 for (len = skb_headlen(skb); len;) {
728 packet->rdesc_count++;
729 len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
730 }
731
732 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
733 frag = &skb_shinfo(skb)->frags[i];
734 for (len = skb_frag_size(frag); len; ) {
735 packet->rdesc_count++;
736 len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
737 }
738 }
739}
740
741static int xgbe_open(struct net_device *netdev)
742{
743 struct xgbe_prv_data *pdata = netdev_priv(netdev);
744 struct xgbe_hw_if *hw_if = &pdata->hw_if;
745 struct xgbe_desc_if *desc_if = &pdata->desc_if;
746 int ret;
747
748 DBGPR("-->xgbe_open\n");
749
750 /* Enable the clock */
751 ret = clk_prepare_enable(pdata->sysclock);
752 if (ret) {
753 netdev_alert(netdev, "clk_prepare_enable failed\n");
754 return ret;
755 }
756
757 /* Calculate the Rx buffer size before allocating rings */
758 ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
759 if (ret < 0)
760 goto err_clk;
761 pdata->rx_buf_size = ret;
762
763 /* Allocate the ring descriptors and buffers */
764 ret = desc_if->alloc_ring_resources(pdata);
765 if (ret)
766 goto err_clk;
767
768 /* Initialize the device restart work struct */
769 INIT_WORK(&pdata->restart_work, xgbe_restart);
770
771 /* Request interrupts */
772 ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
773 netdev->name, pdata);
774 if (ret) {
775 netdev_alert(netdev, "error requesting irq %d\n",
776 pdata->irq_number);
777 goto err_irq;
778 }
779 pdata->irq_number = netdev->irq;
780
781 ret = xgbe_start(pdata);
782 if (ret)
783 goto err_start;
784
785 DBGPR("<--xgbe_open\n");
786
787 return 0;
788
789err_start:
790 hw_if->exit(pdata);
791
792 devm_free_irq(pdata->dev, pdata->irq_number, pdata);
793 pdata->irq_number = 0;
794
795err_irq:
796 desc_if->free_ring_resources(pdata);
797
798err_clk:
799 clk_disable_unprepare(pdata->sysclock);
800
801 return ret;
802}
803
804static int xgbe_close(struct net_device *netdev)
805{
806 struct xgbe_prv_data *pdata = netdev_priv(netdev);
807 struct xgbe_hw_if *hw_if = &pdata->hw_if;
808 struct xgbe_desc_if *desc_if = &pdata->desc_if;
809
810 DBGPR("-->xgbe_close\n");
811
812 /* Stop the device */
813 xgbe_stop(pdata);
814
815 /* Issue software reset to device */
816 hw_if->exit(pdata);
817
818 /* Free all the ring data */
819 desc_if->free_ring_resources(pdata);
820
821 /* Release the interrupt */
822 if (pdata->irq_number != 0) {
823 devm_free_irq(pdata->dev, pdata->irq_number, pdata);
824 pdata->irq_number = 0;
825 }
826
827 /* Disable the clock */
828 clk_disable_unprepare(pdata->sysclock);
829
830 DBGPR("<--xgbe_close\n");
831
832 return 0;
833}
834
835static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
836{
837 struct xgbe_prv_data *pdata = netdev_priv(netdev);
838 struct xgbe_hw_if *hw_if = &pdata->hw_if;
839 struct xgbe_desc_if *desc_if = &pdata->desc_if;
840 struct xgbe_channel *channel;
841 struct xgbe_ring *ring;
842 struct xgbe_packet_data *packet;
843 unsigned long flags;
844 int ret;
845
846 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
847
848 channel = pdata->channel + skb->queue_mapping;
849 ring = channel->tx_ring;
850 packet = &ring->packet_data;
851
852 ret = NETDEV_TX_OK;
853
854 spin_lock_irqsave(&ring->lock, flags);
855
856 if (skb->len == 0) {
857 netdev_err(netdev, "empty skb received from stack\n");
858 dev_kfree_skb_any(skb);
859 goto tx_netdev_return;
860 }
861
862 /* Calculate preliminary packet info */
863 memset(packet, 0, sizeof(*packet));
864 xgbe_packet_info(ring, skb, packet);
865
866 /* Check that there are enough descriptors available */
867 if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
868 DBGPR(" Tx queue stopped, not enough descriptors available\n");
869 netif_stop_subqueue(netdev, channel->queue_index);
870 ring->tx.queue_stopped = 1;
871 ret = NETDEV_TX_BUSY;
872 goto tx_netdev_return;
873 }
874
875 ret = xgbe_prep_tso(skb, packet);
876 if (ret) {
877 netdev_err(netdev, "error processing TSO packet\n");
878 dev_kfree_skb_any(skb);
879 goto tx_netdev_return;
880 }
881 xgbe_prep_vlan(skb, packet);
882
883 if (!desc_if->map_tx_skb(channel, skb)) {
884 dev_kfree_skb_any(skb);
885 goto tx_netdev_return;
886 }
887
888 /* Configure required descriptor fields for transmission */
889 hw_if->pre_xmit(channel);
890
891#ifdef XGMAC_ENABLE_TX_PKT_DUMP
892 xgbe_print_pkt(netdev, skb, true);
893#endif
894
895tx_netdev_return:
896 spin_unlock_irqrestore(&ring->lock, flags);
897
898 DBGPR("<--xgbe_xmit\n");
899
900 return ret;
901}
902
903static void xgbe_set_rx_mode(struct net_device *netdev)
904{
905 struct xgbe_prv_data *pdata = netdev_priv(netdev);
906 struct xgbe_hw_if *hw_if = &pdata->hw_if;
907 unsigned int pr_mode, am_mode;
908
909 DBGPR("-->xgbe_set_rx_mode\n");
910
911 pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
912 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
913
914 if (netdev_uc_count(netdev) > pdata->hw_feat.addn_mac)
915 pr_mode = 1;
916 if (netdev_mc_count(netdev) > pdata->hw_feat.addn_mac)
917 am_mode = 1;
918 if ((netdev_uc_count(netdev) + netdev_mc_count(netdev)) >
919 pdata->hw_feat.addn_mac)
920 pr_mode = 1;
921
922 hw_if->set_promiscuous_mode(pdata, pr_mode);
923 hw_if->set_all_multicast_mode(pdata, am_mode);
924 if (!pr_mode)
925 hw_if->set_addn_mac_addrs(pdata, am_mode);
926
927 DBGPR("<--xgbe_set_rx_mode\n");
928}
929
930static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
931{
932 struct xgbe_prv_data *pdata = netdev_priv(netdev);
933 struct xgbe_hw_if *hw_if = &pdata->hw_if;
934 struct sockaddr *saddr = addr;
935
936 DBGPR("-->xgbe_set_mac_address\n");
937
938 if (!is_valid_ether_addr(saddr->sa_data))
939 return -EADDRNOTAVAIL;
940
941 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
942
943 hw_if->set_mac_address(pdata, netdev->dev_addr);
944
945 DBGPR("<--xgbe_set_mac_address\n");
946
947 return 0;
948}
949
950static int xgbe_change_mtu(struct net_device *netdev, int mtu)
951{
952 struct xgbe_prv_data *pdata = netdev_priv(netdev);
953 int ret;
954
955 DBGPR("-->xgbe_change_mtu\n");
956
957 ret = xgbe_calc_rx_buf_size(netdev, mtu);
958 if (ret < 0)
959 return ret;
960
961 pdata->rx_buf_size = ret;
962 netdev->mtu = mtu;
963
964 xgbe_restart_dev(pdata, 0);
965
966 DBGPR("<--xgbe_change_mtu\n");
967
968 return 0;
969}
970
971static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
972 struct rtnl_link_stats64 *s)
973{
974 struct xgbe_prv_data *pdata = netdev_priv(netdev);
975 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
976
977 DBGPR("-->%s\n", __func__);
978
979 pdata->hw_if.read_mmc_stats(pdata);
980
981 s->rx_packets = pstats->rxframecount_gb;
982 s->rx_bytes = pstats->rxoctetcount_gb;
983 s->rx_errors = pstats->rxframecount_gb -
984 pstats->rxbroadcastframes_g -
985 pstats->rxmulticastframes_g -
986 pstats->rxunicastframes_g;
987 s->multicast = pstats->rxmulticastframes_g;
988 s->rx_length_errors = pstats->rxlengtherror;
989 s->rx_crc_errors = pstats->rxcrcerror;
990 s->rx_fifo_errors = pstats->rxfifooverflow;
991
992 s->tx_packets = pstats->txframecount_gb;
993 s->tx_bytes = pstats->txoctetcount_gb;
994 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
995 s->tx_dropped = netdev->stats.tx_dropped;
996
997 DBGPR("<--%s\n", __func__);
998
999 return s;
1000}
1001
1002#ifdef CONFIG_NET_POLL_CONTROLLER
1003static void xgbe_poll_controller(struct net_device *netdev)
1004{
1005 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1006
1007 DBGPR("-->xgbe_poll_controller\n");
1008
1009 disable_irq(pdata->irq_number);
1010
1011 xgbe_isr(pdata->irq_number, pdata);
1012
1013 enable_irq(pdata->irq_number);
1014
1015 DBGPR("<--xgbe_poll_controller\n");
1016}
1017#endif /* End CONFIG_NET_POLL_CONTROLLER */
1018
1019static int xgbe_set_features(struct net_device *netdev,
1020 netdev_features_t features)
1021{
1022 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1023 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1024 unsigned int rxcsum_enabled, rxvlan_enabled;
1025
1026 rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
1027 rxvlan_enabled = !!(pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX);
1028
1029 if ((features & NETIF_F_RXCSUM) && !rxcsum_enabled) {
1030 hw_if->enable_rx_csum(pdata);
1031 netdev_alert(netdev, "state change - rxcsum enabled\n");
1032 } else if (!(features & NETIF_F_RXCSUM) && rxcsum_enabled) {
1033 hw_if->disable_rx_csum(pdata);
1034 netdev_alert(netdev, "state change - rxcsum disabled\n");
1035 }
1036
1037 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan_enabled) {
1038 hw_if->enable_rx_vlan_stripping(pdata);
1039 netdev_alert(netdev, "state change - rxvlan enabled\n");
1040 } else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan_enabled) {
1041 hw_if->disable_rx_vlan_stripping(pdata);
1042 netdev_alert(netdev, "state change - rxvlan disabled\n");
1043 }
1044
1045 pdata->netdev_features = features;
1046
1047 DBGPR("<--xgbe_set_features\n");
1048
1049 return 0;
1050}
1051
1052static const struct net_device_ops xgbe_netdev_ops = {
1053 .ndo_open = xgbe_open,
1054 .ndo_stop = xgbe_close,
1055 .ndo_start_xmit = xgbe_xmit,
1056 .ndo_set_rx_mode = xgbe_set_rx_mode,
1057 .ndo_set_mac_address = xgbe_set_mac_address,
1058 .ndo_validate_addr = eth_validate_addr,
1059 .ndo_change_mtu = xgbe_change_mtu,
1060 .ndo_get_stats64 = xgbe_get_stats64,
1061#ifdef CONFIG_NET_POLL_CONTROLLER
1062 .ndo_poll_controller = xgbe_poll_controller,
1063#endif
1064 .ndo_set_features = xgbe_set_features,
1065};
1066
1067struct net_device_ops *xgbe_get_netdev_ops(void)
1068{
1069 return (struct net_device_ops *)&xgbe_netdev_ops;
1070}
1071
1072static int xgbe_tx_poll(struct xgbe_channel *channel)
1073{
1074 struct xgbe_prv_data *pdata = channel->pdata;
1075 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1076 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1077 struct xgbe_ring *ring = channel->tx_ring;
1078 struct xgbe_ring_data *rdata;
1079 struct xgbe_ring_desc *rdesc;
1080 struct net_device *netdev = pdata->netdev;
1081 unsigned long flags;
1082 int processed = 0;
1083
1084 DBGPR("-->xgbe_tx_poll\n");
1085
1086 /* Nothing to do if there isn't a Tx ring for this channel */
1087 if (!ring)
1088 return 0;
1089
1090 spin_lock_irqsave(&ring->lock, flags);
1091
1092 while ((processed < TX_DESC_MAX_PROC) && (ring->dirty < ring->cur)) {
1093 rdata = GET_DESC_DATA(ring, ring->dirty);
1094 rdesc = rdata->rdesc;
1095
1096 if (!hw_if->tx_complete(rdesc))
1097 break;
1098
1099#ifdef XGMAC_ENABLE_TX_DESC_DUMP
1100 xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
1101#endif
1102
1103 /* Free the SKB and reset the descriptor for re-use */
1104 desc_if->unmap_skb(pdata, rdata);
1105 hw_if->tx_desc_reset(rdata);
1106
1107 processed++;
1108 ring->dirty++;
1109 }
1110
1111 if ((ring->tx.queue_stopped == 1) &&
1112 (xgbe_tx_avail_desc(ring) > TX_DESC_MIN_FREE)) {
1113 ring->tx.queue_stopped = 0;
1114 netif_wake_subqueue(netdev, channel->queue_index);
1115 }
1116
1117 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
1118
1119 spin_unlock_irqrestore(&ring->lock, flags);
1120
1121 return processed;
1122}
1123
1124static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1125{
1126 struct xgbe_prv_data *pdata = channel->pdata;
1127 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1128 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1129 struct xgbe_ring *ring = channel->rx_ring;
1130 struct xgbe_ring_data *rdata;
1131 struct xgbe_packet_data *packet;
1132 struct net_device *netdev = pdata->netdev;
1133 struct sk_buff *skb;
1134 unsigned int incomplete, error;
1135 unsigned int cur_len, put_len, max_len;
1136 int received = 0;
1137
1138 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
1139
1140 /* Nothing to do if there isn't a Rx ring for this channel */
1141 if (!ring)
1142 return 0;
1143
1144 packet = &ring->packet_data;
1145 while (received < budget) {
1146 DBGPR(" cur = %d\n", ring->cur);
1147
1148 /* Clear the packet data information */
1149 memset(packet, 0, sizeof(*packet));
1150 skb = NULL;
1151 error = 0;
1152 cur_len = 0;
1153
1154read_again:
1155 rdata = GET_DESC_DATA(ring, ring->cur);
1156
1157 if (hw_if->dev_read(channel))
1158 break;
1159
1160 received++;
1161 ring->cur++;
1162 ring->dirty++;
1163
1164 dma_unmap_single(pdata->dev, rdata->skb_dma,
1165 rdata->skb_dma_len, DMA_FROM_DEVICE);
1166 rdata->skb_dma = 0;
1167
1168 incomplete = XGMAC_GET_BITS(packet->attributes,
1169 RX_PACKET_ATTRIBUTES,
1170 INCOMPLETE);
1171
1172 /* Earlier error, just drain the remaining data */
1173 if (incomplete && error)
1174 goto read_again;
1175
1176 if (error || packet->errors) {
1177 if (packet->errors)
1178 DBGPR("Error in received packet\n");
1179 dev_kfree_skb(skb);
1180 continue;
1181 }
1182
1183 put_len = rdata->len - cur_len;
1184 if (skb) {
1185 if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) {
1186 DBGPR("pskb_expand_head error\n");
1187 if (incomplete) {
1188 error = 1;
1189 goto read_again;
1190 }
1191
1192 dev_kfree_skb(skb);
1193 continue;
1194 }
1195 memcpy(skb_tail_pointer(skb), rdata->skb->data,
1196 put_len);
1197 } else {
1198 skb = rdata->skb;
1199 rdata->skb = NULL;
1200 }
1201 skb_put(skb, put_len);
1202 cur_len += put_len;
1203
1204 if (incomplete)
1205 goto read_again;
1206
1207 /* Be sure we don't exceed the configured MTU */
1208 max_len = netdev->mtu + ETH_HLEN;
1209 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1210 (skb->protocol == htons(ETH_P_8021Q)))
1211 max_len += VLAN_HLEN;
1212
1213 if (skb->len > max_len) {
1214 DBGPR("packet length exceeds configured MTU\n");
1215 dev_kfree_skb(skb);
1216 continue;
1217 }
1218
1219#ifdef XGMAC_ENABLE_RX_PKT_DUMP
1220 xgbe_print_pkt(netdev, skb, false);
1221#endif
1222
1223 skb_checksum_none_assert(skb);
1224 if (XGMAC_GET_BITS(packet->attributes,
1225 RX_PACKET_ATTRIBUTES, CSUM_DONE))
1226 skb->ip_summed = CHECKSUM_UNNECESSARY;
1227
1228 if (XGMAC_GET_BITS(packet->attributes,
1229 RX_PACKET_ATTRIBUTES, VLAN_CTAG))
1230 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1231 packet->vlan_ctag);
1232
1233 skb->dev = netdev;
1234 skb->protocol = eth_type_trans(skb, netdev);
1235 skb_record_rx_queue(skb, channel->queue_index);
1236 skb_mark_napi_id(skb, &pdata->napi);
1237
1238 netdev->last_rx = jiffies;
1239 napi_gro_receive(&pdata->napi, skb);
1240 }
1241
1242 if (received) {
1243 desc_if->realloc_skb(channel);
1244
1245 /* Update the Rx Tail Pointer Register with address of
1246 * the last cleaned entry */
1247 rdata = GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
1248 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1249 lower_32_bits(rdata->rdesc_dma));
1250 }
1251
1252 DBGPR("<--xgbe_rx_poll: received = %d\n", received);
1253
1254 return received;
1255}
1256
1257static int xgbe_poll(struct napi_struct *napi, int budget)
1258{
1259 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
1260 napi);
1261 struct xgbe_channel *channel;
1262 int processed;
1263 unsigned int i;
1264
1265 DBGPR("-->xgbe_poll: budget=%d\n", budget);
1266
1267 /* Cleanup Tx ring first */
1268 channel = pdata->channel;
1269 for (i = 0; i < pdata->channel_count; i++, channel++)
1270 xgbe_tx_poll(channel);
1271
1272 /* Process Rx ring next */
1273 processed = 0;
1274 channel = pdata->channel;
1275 for (i = 0; i < pdata->channel_count; i++, channel++)
1276 processed += xgbe_rx_poll(channel, budget - processed);
1277
1278 /* If we processed everything, we are done */
1279 if (processed < budget) {
1280 /* Turn off polling */
1281 napi_complete(napi);
1282
1283 /* Enable Tx and Rx interrupts */
1284 xgbe_enable_rx_tx_ints(pdata);
1285 }
1286
1287 DBGPR("<--xgbe_poll: received = %d\n", processed);
1288
1289 return processed;
1290}
1291
1292void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
1293 unsigned int count, unsigned int flag)
1294{
1295 struct xgbe_ring_data *rdata;
1296 struct xgbe_ring_desc *rdesc;
1297
1298 while (count--) {
1299 rdata = GET_DESC_DATA(ring, idx);
1300 rdesc = rdata->rdesc;
1301 DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
1302 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
1303 le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
1304 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
1305 idx++;
1306 }
1307}
1308
1309void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
1310 unsigned int idx)
1311{
1312 DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
1313 le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
1314 le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
1315}
1316
1317void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
1318{
1319 struct ethhdr *eth = (struct ethhdr *)skb->data;
1320 unsigned char *buf = skb->data;
1321 unsigned char buffer[128];
1322 unsigned int i, j;
1323
1324 netdev_alert(netdev, "\n************** SKB dump ****************\n");
1325
1326 netdev_alert(netdev, "%s packet of %d bytes\n",
1327 (tx_rx ? "TX" : "RX"), skb->len);
1328
1329 netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
1330 netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
1331 netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
1332
1333 for (i = 0, j = 0; i < skb->len;) {
1334 j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
1335 buf[i++]);
1336
1337 if ((i % 32) == 0) {
1338 netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
1339 j = 0;
1340 } else if ((i % 16) == 0) {
1341 buffer[j++] = ' ';
1342 buffer[j++] = ' ';
1343 } else if ((i % 4) == 0) {
1344 buffer[j++] = ' ';
1345 }
1346 }
1347 if (i % 32)
1348 netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
1349
1350 netdev_alert(netdev, "\n************** SKB dump ****************\n");
1351}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
new file mode 100644
index 000000000000..8909f2b51af1
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -0,0 +1,510 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/spinlock.h>
118#include <linux/phy.h>
119
120#include "xgbe.h"
121#include "xgbe-common.h"
122
123
124struct xgbe_stats {
125 char stat_string[ETH_GSTRING_LEN];
126 int stat_size;
127 int stat_offset;
128};
129
130#define XGMAC_MMC_STAT(_string, _var) \
131 { _string, \
132 FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \
133 offsetof(struct xgbe_prv_data, mmc_stats._var), \
134 }
135
136static const struct xgbe_stats xgbe_gstring_stats[] = {
137 XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
138 XGMAC_MMC_STAT("tx_packets", txframecount_gb),
139 XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
140 XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
141 XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
142 XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
143 XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
144 XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
145 XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
146 XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
147 XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
148 XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
149 XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
150 XGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
151
152 XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
153 XGMAC_MMC_STAT("rx_packets", rxframecount_gb),
154 XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
155 XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
156 XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
157 XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
158 XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
159 XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
160 XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
161 XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
162 XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
163 XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
164 XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
165 XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
166 XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
167 XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
168 XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
169 XGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
170 XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
171 XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
172 XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
173 XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
174};
175#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
176
177static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
178{
179 int i;
180
181 DBGPR("-->%s\n", __func__);
182
183 switch (stringset) {
184 case ETH_SS_STATS:
185 for (i = 0; i < XGBE_STATS_COUNT; i++) {
186 memcpy(data, xgbe_gstring_stats[i].stat_string,
187 ETH_GSTRING_LEN);
188 data += ETH_GSTRING_LEN;
189 }
190 break;
191 }
192
193 DBGPR("<--%s\n", __func__);
194}
195
196static void xgbe_get_ethtool_stats(struct net_device *netdev,
197 struct ethtool_stats *stats, u64 *data)
198{
199 struct xgbe_prv_data *pdata = netdev_priv(netdev);
200 u8 *stat;
201 int i;
202
203 DBGPR("-->%s\n", __func__);
204
205 pdata->hw_if.read_mmc_stats(pdata);
206 for (i = 0; i < XGBE_STATS_COUNT; i++) {
207 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
208 *data++ = *(u64 *)stat;
209 }
210
211 DBGPR("<--%s\n", __func__);
212}
213
214static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
215{
216 int ret;
217
218 DBGPR("-->%s\n", __func__);
219
220 switch (stringset) {
221 case ETH_SS_STATS:
222 ret = XGBE_STATS_COUNT;
223 break;
224
225 default:
226 ret = -EOPNOTSUPP;
227 }
228
229 DBGPR("<--%s\n", __func__);
230
231 return ret;
232}
233
234static void xgbe_get_pauseparam(struct net_device *netdev,
235 struct ethtool_pauseparam *pause)
236{
237 struct xgbe_prv_data *pdata = netdev_priv(netdev);
238
239 DBGPR("-->xgbe_get_pauseparam\n");
240
241 pause->autoneg = pdata->pause_autoneg;
242 pause->tx_pause = pdata->tx_pause;
243 pause->rx_pause = pdata->rx_pause;
244
245 DBGPR("<--xgbe_get_pauseparam\n");
246}
247
248static int xgbe_set_pauseparam(struct net_device *netdev,
249 struct ethtool_pauseparam *pause)
250{
251 struct xgbe_prv_data *pdata = netdev_priv(netdev);
252 struct phy_device *phydev = pdata->phydev;
253 int ret = 0;
254
255 DBGPR("-->xgbe_set_pauseparam\n");
256
257 DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n",
258 pause->autoneg, pause->tx_pause, pause->rx_pause);
259
260 pdata->pause_autoneg = pause->autoneg;
261 if (pause->autoneg) {
262 phydev->advertising |= ADVERTISED_Pause;
263 phydev->advertising |= ADVERTISED_Asym_Pause;
264
265 } else {
266 phydev->advertising &= ~ADVERTISED_Pause;
267 phydev->advertising &= ~ADVERTISED_Asym_Pause;
268
269 pdata->tx_pause = pause->tx_pause;
270 pdata->rx_pause = pause->rx_pause;
271 }
272
273 if (netif_running(netdev))
274 ret = phy_start_aneg(phydev);
275
276 DBGPR("<--xgbe_set_pauseparam\n");
277
278 return ret;
279}
280
281static int xgbe_get_settings(struct net_device *netdev,
282 struct ethtool_cmd *cmd)
283{
284 struct xgbe_prv_data *pdata = netdev_priv(netdev);
285 int ret;
286
287 DBGPR("-->xgbe_get_settings\n");
288
289 if (!pdata->phydev)
290 return -ENODEV;
291
292 spin_lock_irq(&pdata->lock);
293
294 ret = phy_ethtool_gset(pdata->phydev, cmd);
295 cmd->transceiver = XCVR_EXTERNAL;
296
297 spin_unlock_irq(&pdata->lock);
298
299 DBGPR("<--xgbe_get_settings\n");
300
301 return ret;
302}
303
304static int xgbe_set_settings(struct net_device *netdev,
305 struct ethtool_cmd *cmd)
306{
307 struct xgbe_prv_data *pdata = netdev_priv(netdev);
308 struct phy_device *phydev = pdata->phydev;
309 u32 speed;
310 int ret;
311
312 DBGPR("-->xgbe_set_settings\n");
313
314 if (!pdata->phydev)
315 return -ENODEV;
316
317 spin_lock_irq(&pdata->lock);
318
319 speed = ethtool_cmd_speed(cmd);
320
321 ret = -EINVAL;
322 if (cmd->phy_address != phydev->addr)
323 goto unlock;
324
325 if ((cmd->autoneg != AUTONEG_ENABLE) &&
326 (cmd->autoneg != AUTONEG_DISABLE))
327 goto unlock;
328
329 if ((cmd->autoneg == AUTONEG_DISABLE) &&
330 (((speed != SPEED_10000) && (speed != SPEED_1000)) ||
331 (cmd->duplex != DUPLEX_FULL)))
332 goto unlock;
333
334 if (cmd->autoneg == AUTONEG_ENABLE) {
335 /* Clear settings needed to force speeds */
336 phydev->supported &= ~SUPPORTED_1000baseT_Full;
337 phydev->supported &= ~SUPPORTED_10000baseT_Full;
338 } else {
339 /* Add settings needed to force speed */
340 phydev->supported |= SUPPORTED_1000baseT_Full;
341 phydev->supported |= SUPPORTED_10000baseT_Full;
342 }
343
344 cmd->advertising &= phydev->supported;
345 if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
346 goto unlock;
347
348 ret = 0;
349 phydev->autoneg = cmd->autoneg;
350 phydev->speed = speed;
351 phydev->duplex = cmd->duplex;
352 phydev->advertising = cmd->advertising;
353
354 if (cmd->autoneg == AUTONEG_ENABLE)
355 phydev->advertising |= ADVERTISED_Autoneg;
356 else
357 phydev->advertising &= ~ADVERTISED_Autoneg;
358
359 if (netif_running(netdev))
360 ret = phy_start_aneg(phydev);
361
362unlock:
363 spin_unlock_irq(&pdata->lock);
364
365 DBGPR("<--xgbe_set_settings\n");
366
367 return ret;
368}
369
370static void xgbe_get_drvinfo(struct net_device *netdev,
371 struct ethtool_drvinfo *drvinfo)
372{
373 struct xgbe_prv_data *pdata = netdev_priv(netdev);
374
375 strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
376 strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
377 strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
378 sizeof(drvinfo->bus_info));
379 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
380 XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER),
381 XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID),
382 XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER));
383 drvinfo->n_stats = XGBE_STATS_COUNT;
384}
385
386static int xgbe_get_coalesce(struct net_device *netdev,
387 struct ethtool_coalesce *ec)
388{
389 struct xgbe_prv_data *pdata = netdev_priv(netdev);
390 struct xgbe_hw_if *hw_if = &pdata->hw_if;
391 unsigned int riwt;
392
393 DBGPR("-->xgbe_get_coalesce\n");
394
395 memset(ec, 0, sizeof(struct ethtool_coalesce));
396
397 riwt = pdata->rx_riwt;
398 ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt);
399 ec->rx_max_coalesced_frames = pdata->rx_frames;
400
401 ec->tx_coalesce_usecs = pdata->tx_usecs;
402 ec->tx_max_coalesced_frames = pdata->tx_frames;
403
404 DBGPR("<--xgbe_get_coalesce\n");
405
406 return 0;
407}
408
409static int xgbe_set_coalesce(struct net_device *netdev,
410 struct ethtool_coalesce *ec)
411{
412 struct xgbe_prv_data *pdata = netdev_priv(netdev);
413 struct xgbe_hw_if *hw_if = &pdata->hw_if;
414 unsigned int rx_frames, rx_riwt, rx_usecs;
415 unsigned int tx_frames, tx_usecs;
416
417 DBGPR("-->xgbe_set_coalesce\n");
418
419 /* Check for not supported parameters */
420 if ((ec->rx_coalesce_usecs_irq) ||
421 (ec->rx_max_coalesced_frames_irq) ||
422 (ec->tx_coalesce_usecs_irq) ||
423 (ec->tx_max_coalesced_frames_irq) ||
424 (ec->stats_block_coalesce_usecs) ||
425 (ec->use_adaptive_rx_coalesce) ||
426 (ec->use_adaptive_tx_coalesce) ||
427 (ec->pkt_rate_low) ||
428 (ec->rx_coalesce_usecs_low) ||
429 (ec->rx_max_coalesced_frames_low) ||
430 (ec->tx_coalesce_usecs_low) ||
431 (ec->tx_max_coalesced_frames_low) ||
432 (ec->pkt_rate_high) ||
433 (ec->rx_coalesce_usecs_high) ||
434 (ec->rx_max_coalesced_frames_high) ||
435 (ec->tx_coalesce_usecs_high) ||
436 (ec->tx_max_coalesced_frames_high) ||
437 (ec->rate_sample_interval))
438 return -EOPNOTSUPP;
439
440 /* Can only change rx-frames when interface is down (see
441 * rx_descriptor_init in xgbe-dev.c)
442 */
443 rx_frames = pdata->rx_frames;
444 if (rx_frames != ec->rx_max_coalesced_frames && netif_running(netdev)) {
445 netdev_alert(netdev,
446 "interface must be down to change rx-frames\n");
447 return -EINVAL;
448 }
449
450 rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
451 rx_frames = ec->rx_max_coalesced_frames;
452
453 /* Use smallest possible value if conversion resulted in zero */
454 if (ec->rx_coalesce_usecs && !rx_riwt)
455 rx_riwt = 1;
456
457 /* Check the bounds of values for Rx */
458 if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
459 rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT);
460 netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
461 rx_usecs);
462 return -EINVAL;
463 }
464 if (rx_frames > pdata->channel->rx_ring->rdesc_count) {
465 netdev_alert(netdev, "rx-frames is limited to %d frames\n",
466 pdata->channel->rx_ring->rdesc_count);
467 return -EINVAL;
468 }
469
470 tx_usecs = ec->tx_coalesce_usecs;
471 tx_frames = ec->tx_max_coalesced_frames;
472
473 /* Check the bounds of values for Tx */
474 if (tx_frames > pdata->channel->tx_ring->rdesc_count) {
475 netdev_alert(netdev, "tx-frames is limited to %d frames\n",
476 pdata->channel->tx_ring->rdesc_count);
477 return -EINVAL;
478 }
479
480 pdata->rx_riwt = rx_riwt;
481 pdata->rx_frames = rx_frames;
482 hw_if->config_rx_coalesce(pdata);
483
484 pdata->tx_usecs = tx_usecs;
485 pdata->tx_frames = tx_frames;
486 hw_if->config_tx_coalesce(pdata);
487
488 DBGPR("<--xgbe_set_coalesce\n");
489
490 return 0;
491}
492
493static const struct ethtool_ops xgbe_ethtool_ops = {
494 .get_settings = xgbe_get_settings,
495 .set_settings = xgbe_set_settings,
496 .get_drvinfo = xgbe_get_drvinfo,
497 .get_link = ethtool_op_get_link,
498 .get_coalesce = xgbe_get_coalesce,
499 .set_coalesce = xgbe_set_coalesce,
500 .get_pauseparam = xgbe_get_pauseparam,
501 .set_pauseparam = xgbe_set_pauseparam,
502 .get_strings = xgbe_get_strings,
503 .get_ethtool_stats = xgbe_get_ethtool_stats,
504 .get_sset_count = xgbe_get_sset_count,
505};
506
507struct ethtool_ops *xgbe_get_ethtool_ops(void)
508{
509 return (struct ethtool_ops *)&xgbe_ethtool_ops;
510}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
new file mode 100644
index 000000000000..c83584a26713
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -0,0 +1,512 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/module.h>
118#include <linux/device.h>
119#include <linux/platform_device.h>
120#include <linux/spinlock.h>
121#include <linux/netdevice.h>
122#include <linux/etherdevice.h>
123#include <linux/io.h>
124#include <linux/of.h>
125#include <linux/of_net.h>
126#include <linux/clk.h>
127
128#include "xgbe.h"
129#include "xgbe-common.h"
130
131
132MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
133MODULE_LICENSE("Dual BSD/GPL");
134MODULE_VERSION(XGBE_DRV_VERSION);
135MODULE_DESCRIPTION(XGBE_DRV_DESC);
136
137static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
138{
139 struct xgbe_channel *channel_mem, *channel;
140 struct xgbe_ring *tx_ring, *rx_ring;
141 unsigned int count, i;
142
143 DBGPR("-->xgbe_alloc_rings\n");
144
145 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
146
147 channel_mem = devm_kcalloc(pdata->dev, count,
148 sizeof(struct xgbe_channel), GFP_KERNEL);
149 if (!channel_mem)
150 return NULL;
151
152 tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count,
153 sizeof(struct xgbe_ring), GFP_KERNEL);
154 if (!tx_ring)
155 return NULL;
156
157 rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count,
158 sizeof(struct xgbe_ring), GFP_KERNEL);
159 if (!rx_ring)
160 return NULL;
161
162 for (i = 0, channel = channel_mem; i < count; i++, channel++) {
163 snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
164 channel->pdata = pdata;
165 channel->queue_index = i;
166 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
167 (DMA_CH_INC * i);
168
169 if (i < pdata->tx_ring_count) {
170 spin_lock_init(&tx_ring->lock);
171 channel->tx_ring = tx_ring++;
172 }
173
174 if (i < pdata->rx_ring_count) {
175 spin_lock_init(&tx_ring->lock);
176 channel->rx_ring = rx_ring++;
177 }
178
179 DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n",
180 channel->name, channel->queue_index, channel->dma_regs,
181 channel->tx_ring, channel->rx_ring);
182 }
183
184 pdata->channel_count = count;
185
186 DBGPR("<--xgbe_alloc_rings\n");
187
188 return channel_mem;
189}
190
191static void xgbe_default_config(struct xgbe_prv_data *pdata)
192{
193 DBGPR("-->xgbe_default_config\n");
194
195 pdata->pblx8 = DMA_PBL_X8_ENABLE;
196 pdata->tx_sf_mode = MTL_TSF_ENABLE;
197 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
198 pdata->tx_pbl = DMA_PBL_16;
199 pdata->tx_osp_mode = DMA_OSP_ENABLE;
200 pdata->rx_sf_mode = MTL_RSF_DISABLE;
201 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
202 pdata->rx_pbl = DMA_PBL_16;
203 pdata->pause_autoneg = 1;
204 pdata->tx_pause = 1;
205 pdata->rx_pause = 1;
206 pdata->power_down = 0;
207 pdata->default_autoneg = AUTONEG_ENABLE;
208 pdata->default_speed = SPEED_10000;
209
210 DBGPR("<--xgbe_default_config\n");
211}
212
213static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
214{
215 xgbe_init_function_ptrs_dev(&pdata->hw_if);
216 xgbe_init_function_ptrs_desc(&pdata->desc_if);
217}
218
219static int xgbe_probe(struct platform_device *pdev)
220{
221 struct xgbe_prv_data *pdata;
222 struct xgbe_hw_if *hw_if;
223 struct xgbe_desc_if *desc_if;
224 struct net_device *netdev;
225 struct device *dev = &pdev->dev;
226 struct resource *res;
227 const u8 *mac_addr;
228 int ret;
229
230 DBGPR("--> xgbe_probe\n");
231
232 netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
233 XGBE_MAX_DMA_CHANNELS);
234 if (!netdev) {
235 dev_err(dev, "alloc_etherdev failed\n");
236 ret = -ENOMEM;
237 goto err_alloc;
238 }
239 SET_NETDEV_DEV(netdev, dev);
240 pdata = netdev_priv(netdev);
241 pdata->netdev = netdev;
242 pdata->pdev = pdev;
243 pdata->dev = dev;
244 platform_set_drvdata(pdev, netdev);
245
246 spin_lock_init(&pdata->lock);
247 mutex_init(&pdata->xpcs_mutex);
248
249 /* Set and validate the number of descriptors for a ring */
250 BUILD_BUG_ON_NOT_POWER_OF_2(TX_DESC_CNT);
251 pdata->tx_desc_count = TX_DESC_CNT;
252 if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
253 dev_err(dev, "tx descriptor count (%d) is not valid\n",
254 pdata->tx_desc_count);
255 ret = -EINVAL;
256 goto err_io;
257 }
258 BUILD_BUG_ON_NOT_POWER_OF_2(RX_DESC_CNT);
259 pdata->rx_desc_count = RX_DESC_CNT;
260 if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
261 dev_err(dev, "rx descriptor count (%d) is not valid\n",
262 pdata->rx_desc_count);
263 ret = -EINVAL;
264 goto err_io;
265 }
266
267 /* Obtain the system clock setting */
268 pdata->sysclock = devm_clk_get(dev, NULL);
269 if (IS_ERR(pdata->sysclock)) {
270 dev_err(dev, "devm_clk_get failed\n");
271 ret = PTR_ERR(pdata->sysclock);
272 goto err_io;
273 }
274
275 /* Obtain the mmio areas for the device */
276 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
277 pdata->xgmac_regs = devm_ioremap_resource(dev, res);
278 if (IS_ERR(pdata->xgmac_regs)) {
279 dev_err(dev, "xgmac ioremap failed\n");
280 ret = PTR_ERR(pdata->xgmac_regs);
281 goto err_io;
282 }
283 DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs);
284
285 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
286 pdata->xpcs_regs = devm_ioremap_resource(dev, res);
287 if (IS_ERR(pdata->xpcs_regs)) {
288 dev_err(dev, "xpcs ioremap failed\n");
289 ret = PTR_ERR(pdata->xpcs_regs);
290 goto err_io;
291 }
292 DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
293
294 /* Set the DMA mask */
295 if (!dev->dma_mask)
296 dev->dma_mask = &dev->coherent_dma_mask;
297 *(dev->dma_mask) = DMA_BIT_MASK(40);
298 dev->coherent_dma_mask = DMA_BIT_MASK(40);
299
300 ret = platform_get_irq(pdev, 0);
301 if (ret < 0) {
302 dev_err(dev, "platform_get_irq failed\n");
303 goto err_io;
304 }
305 netdev->irq = ret;
306 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
307
308 /* Set all the function pointers */
309 xgbe_init_all_fptrs(pdata);
310 hw_if = &pdata->hw_if;
311 desc_if = &pdata->desc_if;
312
313 /* Issue software reset to device */
314 hw_if->exit(pdata);
315
316 /* Populate the hardware features */
317 xgbe_get_all_hw_features(pdata);
318
319 /* Retrieve the MAC address */
320 mac_addr = of_get_mac_address(dev->of_node);
321 if (!mac_addr) {
322 dev_err(dev, "invalid mac address for this device\n");
323 ret = -EINVAL;
324 goto err_io;
325 }
326 memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
327
328 /* Retrieve the PHY mode - it must be "xgmii" */
329 pdata->phy_mode = of_get_phy_mode(dev->of_node);
330 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
331 dev_err(dev, "invalid phy-mode specified for this device\n");
332 ret = -EINVAL;
333 goto err_io;
334 }
335
336 /* Set default configuration data */
337 xgbe_default_config(pdata);
338
339 /* Calculate the number of Tx and Rx rings to be created */
340 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
341 pdata->hw_feat.tx_ch_cnt);
342 if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) {
343 dev_err(dev, "error setting real tx queue count\n");
344 goto err_io;
345 }
346
347 pdata->rx_ring_count = min_t(unsigned int,
348 netif_get_num_default_rss_queues(),
349 pdata->hw_feat.rx_ch_cnt);
350 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
351 if (ret) {
352 dev_err(dev, "error setting real rx queue count\n");
353 goto err_io;
354 }
355
356 /* Allocate the rings for the DMA channels */
357 pdata->channel = xgbe_alloc_rings(pdata);
358 if (!pdata->channel) {
359 dev_err(dev, "ring allocation failed\n");
360 ret = -ENOMEM;
361 goto err_io;
362 }
363
364 /* Prepare to regsiter with MDIO */
365 pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
366 if (!pdata->mii_bus_id) {
367 dev_err(dev, "failed to allocate mii bus id\n");
368 ret = -ENOMEM;
369 goto err_io;
370 }
371 ret = xgbe_mdio_register(pdata);
372 if (ret)
373 goto err_bus_id;
374
375 /* Set network and ethtool operations */
376 netdev->netdev_ops = xgbe_get_netdev_ops();
377 netdev->ethtool_ops = xgbe_get_ethtool_ops();
378
379 /* Set device features */
380 netdev->hw_features = NETIF_F_SG |
381 NETIF_F_IP_CSUM |
382 NETIF_F_IPV6_CSUM |
383 NETIF_F_RXCSUM |
384 NETIF_F_TSO |
385 NETIF_F_TSO6 |
386 NETIF_F_GRO |
387 NETIF_F_HW_VLAN_CTAG_RX |
388 NETIF_F_HW_VLAN_CTAG_TX;
389
390 netdev->vlan_features |= NETIF_F_SG |
391 NETIF_F_IP_CSUM |
392 NETIF_F_IPV6_CSUM |
393 NETIF_F_TSO |
394 NETIF_F_TSO6;
395
396 netdev->features |= netdev->hw_features;
397 pdata->netdev_features = netdev->features;
398
399 xgbe_init_rx_coalesce(pdata);
400 xgbe_init_tx_coalesce(pdata);
401
402 netif_carrier_off(netdev);
403 ret = register_netdev(netdev);
404 if (ret) {
405 dev_err(dev, "net device registration failed\n");
406 goto err_reg_netdev;
407 }
408
409 xgbe_debugfs_init(pdata);
410
411 netdev_notice(netdev, "net device enabled\n");
412
413 DBGPR("<-- xgbe_probe\n");
414
415 return 0;
416
417err_reg_netdev:
418 xgbe_mdio_unregister(pdata);
419
420err_bus_id:
421 kfree(pdata->mii_bus_id);
422
423err_io:
424 free_netdev(netdev);
425
426err_alloc:
427 dev_notice(dev, "net device not enabled\n");
428
429 return ret;
430}
431
432static int xgbe_remove(struct platform_device *pdev)
433{
434 struct net_device *netdev = platform_get_drvdata(pdev);
435 struct xgbe_prv_data *pdata = netdev_priv(netdev);
436
437 DBGPR("-->xgbe_remove\n");
438
439 xgbe_debugfs_exit(pdata);
440
441 unregister_netdev(netdev);
442
443 xgbe_mdio_unregister(pdata);
444
445 kfree(pdata->mii_bus_id);
446
447 free_netdev(netdev);
448
449 DBGPR("<--xgbe_remove\n");
450
451 return 0;
452}
453
454#ifdef CONFIG_PM
455static int xgbe_suspend(struct device *dev)
456{
457 struct net_device *netdev = dev_get_drvdata(dev);
458 int ret;
459
460 DBGPR("-->xgbe_suspend\n");
461
462 if (!netif_running(netdev)) {
463 DBGPR("<--xgbe_dev_suspend\n");
464 return -EINVAL;
465 }
466
467 ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
468
469 DBGPR("<--xgbe_suspend\n");
470
471 return ret;
472}
473
474static int xgbe_resume(struct device *dev)
475{
476 struct net_device *netdev = dev_get_drvdata(dev);
477 int ret;
478
479 DBGPR("-->xgbe_resume\n");
480
481 if (!netif_running(netdev)) {
482 DBGPR("<--xgbe_dev_resume\n");
483 return -EINVAL;
484 }
485
486 ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
487
488 DBGPR("<--xgbe_resume\n");
489
490 return ret;
491}
492#endif /* CONFIG_PM */
493
494static const struct of_device_id xgbe_of_match[] = {
495 { .compatible = "amd,xgbe-seattle-v1a", },
496 {},
497};
498
499MODULE_DEVICE_TABLE(of, xgbe_of_match);
500static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
501
502static struct platform_driver xgbe_driver = {
503 .driver = {
504 .name = "amd-xgbe",
505 .of_match_table = xgbe_of_match,
506 .pm = &xgbe_pm_ops,
507 },
508 .probe = xgbe_probe,
509 .remove = xgbe_remove,
510};
511
512module_platform_driver(xgbe_driver);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
new file mode 100644
index 000000000000..ea7a5d6750ea
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -0,0 +1,433 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/module.h>
118#include <linux/kmod.h>
119#include <linux/spinlock.h>
120#include <linux/mdio.h>
121#include <linux/phy.h>
122#include <linux/of.h>
123
124#include "xgbe.h"
125#include "xgbe-common.h"
126
127
128static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
129{
130 struct xgbe_prv_data *pdata = mii->priv;
131 struct xgbe_hw_if *hw_if = &pdata->hw_if;
132 int mmd_data;
133
134 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
135 prtad, mmd_reg);
136
137 mmd_data = hw_if->read_mmd_regs(pdata, prtad, mmd_reg);
138
139 DBGPR_MDIO("<--xgbe_mdio_read: mmd_data=%#x\n", mmd_data);
140
141 return mmd_data;
142}
143
144static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
145 u16 mmd_val)
146{
147 struct xgbe_prv_data *pdata = mii->priv;
148 struct xgbe_hw_if *hw_if = &pdata->hw_if;
149 int mmd_data = mmd_val;
150
151 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
152 prtad, mmd_reg, mmd_data);
153
154 hw_if->write_mmd_regs(pdata, prtad, mmd_reg, mmd_data);
155
156 DBGPR_MDIO("<--xgbe_mdio_write\n");
157
158 return 0;
159}
160
161static void xgbe_adjust_link(struct net_device *netdev)
162{
163 struct xgbe_prv_data *pdata = netdev_priv(netdev);
164 struct xgbe_hw_if *hw_if = &pdata->hw_if;
165 struct phy_device *phydev = pdata->phydev;
166 unsigned long flags;
167 int new_state = 0;
168
169 if (phydev == NULL)
170 return;
171
172 DBGPR_MDIO("-->xgbe_adjust_link: address=%d, newlink=%d, curlink=%d\n",
173 phydev->addr, phydev->link, pdata->phy_link);
174
175 spin_lock_irqsave(&pdata->lock, flags);
176
177 if (phydev->link) {
178 /* Flow control support */
179 if (pdata->pause_autoneg) {
180 if (phydev->pause || phydev->asym_pause) {
181 pdata->tx_pause = 1;
182 pdata->rx_pause = 1;
183 } else {
184 pdata->tx_pause = 0;
185 pdata->rx_pause = 0;
186 }
187 }
188
189 if (pdata->tx_pause != pdata->phy_tx_pause) {
190 hw_if->config_tx_flow_control(pdata);
191 pdata->phy_tx_pause = pdata->tx_pause;
192 }
193
194 if (pdata->rx_pause != pdata->phy_rx_pause) {
195 hw_if->config_rx_flow_control(pdata);
196 pdata->phy_rx_pause = pdata->rx_pause;
197 }
198
199 /* Speed support */
200 if (phydev->speed != pdata->phy_speed) {
201 new_state = 1;
202
203 switch (phydev->speed) {
204 case SPEED_10000:
205 hw_if->set_xgmii_speed(pdata);
206 break;
207
208 case SPEED_2500:
209 hw_if->set_gmii_2500_speed(pdata);
210 break;
211
212 case SPEED_1000:
213 hw_if->set_gmii_speed(pdata);
214 break;
215 }
216 pdata->phy_speed = phydev->speed;
217 }
218
219 if (phydev->link != pdata->phy_link) {
220 new_state = 1;
221 pdata->phy_link = 1;
222 }
223 } else if (pdata->phy_link) {
224 new_state = 1;
225 pdata->phy_link = 0;
226 pdata->phy_speed = SPEED_UNKNOWN;
227 }
228
229 if (new_state)
230 phy_print_status(phydev);
231
232 spin_unlock_irqrestore(&pdata->lock, flags);
233
234 DBGPR_MDIO("<--xgbe_adjust_link\n");
235}
236
237void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
238{
239 struct device *dev = pdata->dev;
240 struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD];
241 int i;
242
243 dev_alert(dev, "\n************* PHY Reg dump **********************\n");
244
245 dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
246 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
247 dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
248 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
249 dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
250 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
251 dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
252 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
253 dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
254 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
255 dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
256 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
257
258 dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
259 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
260 dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
261 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
262 dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
263 MDIO_AN_ADVERTISE,
264 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
265 dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
266 MDIO_AN_ADVERTISE + 1,
267 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
268 dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
269 MDIO_AN_ADVERTISE + 2,
270 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
271 dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
272 MDIO_AN_COMP_STAT,
273 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
274
275 dev_alert(dev, "MMD Device Mask = %#x\n",
276 phydev->c45_ids.devices_in_package);
277 for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++)
278 dev_alert(dev, " MMD %d: ID = %#08x\n", i,
279 phydev->c45_ids.device_ids[i]);
280
281 dev_alert(dev, "\n*************************************************\n");
282}
283
284int xgbe_mdio_register(struct xgbe_prv_data *pdata)
285{
286 struct net_device *netdev = pdata->netdev;
287 struct device_node *phy_node;
288 struct mii_bus *mii;
289 struct phy_device *phydev;
290 int ret = 0;
291
292 DBGPR("-->xgbe_mdio_register\n");
293
294 /* Retrieve the phy-handle */
295 phy_node = of_parse_phandle(pdata->dev->of_node, "phy-handle", 0);
296 if (!phy_node) {
297 dev_err(pdata->dev, "unable to parse phy-handle\n");
298 return -EINVAL;
299 }
300
301 /* Register with the MDIO bus */
302 mii = mdiobus_alloc();
303 if (mii == NULL) {
304 dev_err(pdata->dev, "mdiobus_alloc failed\n");
305 ret = -ENOMEM;
306 goto err_node_get;
307 }
308
309 /* Register on the MDIO bus (don't probe any PHYs) */
310 mii->name = XGBE_PHY_NAME;
311 mii->read = xgbe_mdio_read;
312 mii->write = xgbe_mdio_write;
313 snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id);
314 mii->priv = pdata;
315 mii->phy_mask = ~0;
316 mii->parent = pdata->dev;
317 ret = mdiobus_register(mii);
318 if (ret) {
319 dev_err(pdata->dev, "mdiobus_register failed\n");
320 goto err_mdiobus_alloc;
321 }
322 DBGPR(" mdiobus_register succeeded for %s\n", pdata->mii_bus_id);
323
324 /* Probe the PCS using Clause 45 */
325 phydev = get_phy_device(mii, XGBE_PRTAD, true);
326 if (IS_ERR(phydev) || !phydev ||
327 !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) {
328 dev_err(pdata->dev, "get_phy_device failed\n");
329 ret = phydev ? PTR_ERR(phydev) : -ENOLINK;
330 goto err_mdiobus_register;
331 }
332 request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
333 MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
334
335 of_node_get(phy_node);
336 phydev->dev.of_node = phy_node;
337 ret = phy_device_register(phydev);
338 if (ret) {
339 dev_err(pdata->dev, "phy_device_register failed\n");
340 of_node_put(phy_node);
341 goto err_phy_device;
342 }
343
344 /* Add a reference to the PHY driver so it can't be unloaded */
345 pdata->phy_module = phydev->dev.driver ?
346 phydev->dev.driver->owner : NULL;
347 if (!try_module_get(pdata->phy_module)) {
348 dev_err(pdata->dev, "try_module_get failed\n");
349 ret = -EIO;
350 goto err_phy_device;
351 }
352
353 pdata->mii = mii;
354 pdata->mdio_mmd = MDIO_MMD_PCS;
355
356 pdata->phy_link = -1;
357 pdata->phy_speed = SPEED_UNKNOWN;
358 pdata->phy_tx_pause = pdata->tx_pause;
359 pdata->phy_rx_pause = pdata->rx_pause;
360
361 ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
362 pdata->phy_mode);
363 if (ret) {
364 netdev_err(netdev, "phy_connect_direct failed\n");
365 goto err_phy_device;
366 }
367
368 if (!phydev->drv || (phydev->drv->phy_id == 0)) {
369 netdev_err(netdev, "phy_id not valid\n");
370 ret = -ENODEV;
371 goto err_phy_connect;
372 }
373 DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
374 dev_name(&phydev->dev), phydev->link);
375
376 phydev->autoneg = pdata->default_autoneg;
377 if (phydev->autoneg == AUTONEG_DISABLE) {
378 /* Add settings needed to force speed */
379 phydev->supported |= SUPPORTED_1000baseT_Full;
380 phydev->supported |= SUPPORTED_10000baseT_Full;
381
382 phydev->speed = pdata->default_speed;
383 phydev->duplex = DUPLEX_FULL;
384
385 phydev->advertising &= ~ADVERTISED_Autoneg;
386 }
387
388 pdata->phydev = phydev;
389
390 of_node_put(phy_node);
391
392 DBGPHY_REGS(pdata);
393
394 DBGPR("<--xgbe_mdio_register\n");
395
396 return 0;
397
398err_phy_connect:
399 phy_disconnect(phydev);
400
401err_phy_device:
402 phy_device_free(phydev);
403
404err_mdiobus_register:
405 mdiobus_unregister(mii);
406
407err_mdiobus_alloc:
408 mdiobus_free(mii);
409
410err_node_get:
411 of_node_put(phy_node);
412
413 return ret;
414}
415
416void xgbe_mdio_unregister(struct xgbe_prv_data *pdata)
417{
418 DBGPR("-->xgbe_mdio_unregister\n");
419
420 phy_disconnect(pdata->phydev);
421 pdata->phydev = NULL;
422
423 module_put(pdata->phy_module);
424 pdata->phy_module = NULL;
425
426 mdiobus_unregister(pdata->mii);
427 pdata->mii->priv = NULL;
428
429 mdiobus_free(pdata->mii);
430 pdata->mii = NULL;
431
432 DBGPR("<--xgbe_mdio_unregister\n");
433}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
new file mode 100644
index 000000000000..ab0627162c01
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -0,0 +1,676 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#ifndef __XGBE_H__
118#define __XGBE_H__
119
120#include <linux/dma-mapping.h>
121#include <linux/netdevice.h>
122#include <linux/workqueue.h>
123#include <linux/phy.h>
124
125
126#define XGBE_DRV_NAME "amd-xgbe"
127#define XGBE_DRV_VERSION "1.0.0-a"
128#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
129
130/* Descriptor related defines */
131#define TX_DESC_CNT 512
132#define TX_DESC_MIN_FREE (TX_DESC_CNT >> 3)
133#define TX_DESC_MAX_PROC (TX_DESC_CNT >> 1)
134#define RX_DESC_CNT 512
135
136#define TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
137
138#define RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
139#define RX_BUF_ALIGN 64
140
141#define XGBE_MAX_DMA_CHANNELS 16
142#define DMA_ARDOMAIN_SETTING 0x2
143#define DMA_ARCACHE_SETTING 0xb
144#define DMA_AWDOMAIN_SETTING 0x2
145#define DMA_AWCACHE_SETTING 0x7
146#define DMA_INTERRUPT_MASK 0x31c7
147
148#define XGMAC_MIN_PACKET 60
149#define XGMAC_STD_PACKET_MTU 1500
150#define XGMAC_MAX_STD_PACKET 1518
151#define XGMAC_JUMBO_PACKET_MTU 9000
152#define XGMAC_MAX_JUMBO_PACKET 9018
153
154#define MAX_MULTICAST_LIST 14
155#define TX_FLAGS_IP_PKT 0x00000001
156#define TX_FLAGS_TCP_PKT 0x00000002
157
158/* MDIO bus phy name */
159#define XGBE_PHY_NAME "amd_xgbe_phy"
160#define XGBE_PRTAD 0
161
162/* Driver PMT macros */
163#define XGMAC_DRIVER_CONTEXT 1
164#define XGMAC_IOCTL_CONTEXT 2
165
166#define FIFO_SIZE_B(x) (x)
167#define FIFO_SIZE_KB(x) (x * 1024)
168
169#define XGBE_TC_CNT 2
170
171/* Helper macro for descriptor handling
172 * Always use GET_DESC_DATA to access the descriptor data
173 * since the index is free-running and needs to be and-ed
174 * with the descriptor count value of the ring to index to
175 * the proper descriptor data.
176 */
177#define GET_DESC_DATA(_ring, _idx) \
178 ((_ring)->rdata + \
179 ((_idx) & ((_ring)->rdesc_count - 1)))
180
181
182/* Default coalescing parameters */
183#define XGMAC_INIT_DMA_TX_USECS 100
184#define XGMAC_INIT_DMA_TX_FRAMES 16
185
186#define XGMAC_MAX_DMA_RIWT 0xff
187#define XGMAC_INIT_DMA_RX_USECS 100
188#define XGMAC_INIT_DMA_RX_FRAMES 16
189
190/* Flow control queue count */
191#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
192
193
194struct xgbe_prv_data;
195
196struct xgbe_packet_data {
197 unsigned int attributes;
198
199 unsigned int errors;
200
201 unsigned int rdesc_count;
202 unsigned int length;
203
204 unsigned int header_len;
205 unsigned int tcp_header_len;
206 unsigned int tcp_payload_len;
207 unsigned short mss;
208
209 unsigned short vlan_ctag;
210};
211
212/* Common Rx and Tx descriptor mapping */
213struct xgbe_ring_desc {
214 unsigned int desc0;
215 unsigned int desc1;
216 unsigned int desc2;
217 unsigned int desc3;
218};
219
220/* Structure used to hold information related to the descriptor
221 * and the packet associated with the descriptor (always use
222 * use the GET_DESC_DATA macro to access this data from the ring)
223 */
224struct xgbe_ring_data {
225 struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
226 dma_addr_t rdesc_dma; /* DMA address of descriptor */
227
228 struct sk_buff *skb; /* Virtual address of SKB */
229 dma_addr_t skb_dma; /* DMA address of SKB data */
230 unsigned int skb_dma_len; /* Length of SKB DMA area */
231 unsigned int tso_header; /* TSO header indicator */
232
233 unsigned short len; /* Length of received Rx packet */
234
235 unsigned int interrupt; /* Interrupt indicator */
236
237 unsigned int mapped_as_page;
238};
239
240struct xgbe_ring {
241 /* Ring lock - used just for TX rings at the moment */
242 spinlock_t lock;
243
244 /* Per packet related information */
245 struct xgbe_packet_data packet_data;
246
247 /* Virtual/DMA addresses and count of allocated descriptor memory */
248 struct xgbe_ring_desc *rdesc;
249 dma_addr_t rdesc_dma;
250 unsigned int rdesc_count;
251
252 /* Array of descriptor data corresponding the descriptor memory
253 * (always use the GET_DESC_DATA macro to access this data)
254 */
255 struct xgbe_ring_data *rdata;
256
257 /* Ring index values
258 * cur - Tx: index of descriptor to be used for current transfer
259 * Rx: index of descriptor to check for packet availability
260 * dirty - Tx: index of descriptor to check for transfer complete
261 * Rx: count of descriptors in which a packet has been received
262 * (used with skb_realloc_index to refresh the ring)
263 */
264 unsigned int cur;
265 unsigned int dirty;
266
267 /* Coalesce frame count used for interrupt bit setting */
268 unsigned int coalesce_count;
269
270 union {
271 struct {
272 unsigned int queue_stopped;
273 unsigned short cur_mss;
274 unsigned short cur_vlan_ctag;
275 } tx;
276
277 struct {
278 unsigned int realloc_index;
279 unsigned int realloc_threshold;
280 } rx;
281 };
282} ____cacheline_aligned;
283
284/* Structure used to describe the descriptor rings associated with
285 * a DMA channel.
286 */
287struct xgbe_channel {
288 char name[16];
289
290 /* Address of private data area for device */
291 struct xgbe_prv_data *pdata;
292
293 /* Queue index and base address of queue's DMA registers */
294 unsigned int queue_index;
295 void __iomem *dma_regs;
296
297 unsigned int saved_ier;
298
299 unsigned int tx_timer_active;
300 struct hrtimer tx_timer;
301
302 struct xgbe_ring *tx_ring;
303 struct xgbe_ring *rx_ring;
304} ____cacheline_aligned;
305
306enum xgbe_int {
307 XGMAC_INT_DMA_ISR_DC0IS,
308 XGMAC_INT_DMA_CH_SR_TI,
309 XGMAC_INT_DMA_CH_SR_TPS,
310 XGMAC_INT_DMA_CH_SR_TBU,
311 XGMAC_INT_DMA_CH_SR_RI,
312 XGMAC_INT_DMA_CH_SR_RBU,
313 XGMAC_INT_DMA_CH_SR_RPS,
314 XGMAC_INT_DMA_CH_SR_FBE,
315 XGMAC_INT_DMA_ALL,
316};
317
318enum xgbe_int_state {
319 XGMAC_INT_STATE_SAVE,
320 XGMAC_INT_STATE_RESTORE,
321};
322
323enum xgbe_mtl_fifo_size {
324 XGMAC_MTL_FIFO_SIZE_256 = 0x00,
325 XGMAC_MTL_FIFO_SIZE_512 = 0x01,
326 XGMAC_MTL_FIFO_SIZE_1K = 0x03,
327 XGMAC_MTL_FIFO_SIZE_2K = 0x07,
328 XGMAC_MTL_FIFO_SIZE_4K = 0x0f,
329 XGMAC_MTL_FIFO_SIZE_8K = 0x1f,
330 XGMAC_MTL_FIFO_SIZE_16K = 0x3f,
331 XGMAC_MTL_FIFO_SIZE_32K = 0x7f,
332 XGMAC_MTL_FIFO_SIZE_64K = 0xff,
333 XGMAC_MTL_FIFO_SIZE_128K = 0x1ff,
334 XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
335};
336
337struct xgbe_mmc_stats {
338 /* Tx Stats */
339 u64 txoctetcount_gb;
340 u64 txframecount_gb;
341 u64 txbroadcastframes_g;
342 u64 txmulticastframes_g;
343 u64 tx64octets_gb;
344 u64 tx65to127octets_gb;
345 u64 tx128to255octets_gb;
346 u64 tx256to511octets_gb;
347 u64 tx512to1023octets_gb;
348 u64 tx1024tomaxoctets_gb;
349 u64 txunicastframes_gb;
350 u64 txmulticastframes_gb;
351 u64 txbroadcastframes_gb;
352 u64 txunderflowerror;
353 u64 txoctetcount_g;
354 u64 txframecount_g;
355 u64 txpauseframes;
356 u64 txvlanframes_g;
357
358 /* Rx Stats */
359 u64 rxframecount_gb;
360 u64 rxoctetcount_gb;
361 u64 rxoctetcount_g;
362 u64 rxbroadcastframes_g;
363 u64 rxmulticastframes_g;
364 u64 rxcrcerror;
365 u64 rxrunterror;
366 u64 rxjabbererror;
367 u64 rxundersize_g;
368 u64 rxoversize_g;
369 u64 rx64octets_gb;
370 u64 rx65to127octets_gb;
371 u64 rx128to255octets_gb;
372 u64 rx256to511octets_gb;
373 u64 rx512to1023octets_gb;
374 u64 rx1024tomaxoctets_gb;
375 u64 rxunicastframes_g;
376 u64 rxlengtherror;
377 u64 rxoutofrangetype;
378 u64 rxpauseframes;
379 u64 rxfifooverflow;
380 u64 rxvlanframes_gb;
381 u64 rxwatchdogerror;
382};
383
384struct xgbe_hw_if {
385 int (*tx_complete)(struct xgbe_ring_desc *);
386
387 int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
388 int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
389 int (*set_addn_mac_addrs)(struct xgbe_prv_data *, unsigned int);
390 int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
391
392 int (*enable_rx_csum)(struct xgbe_prv_data *);
393 int (*disable_rx_csum)(struct xgbe_prv_data *);
394
395 int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *);
396 int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *);
397
398 int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
399 void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
400 int (*set_gmii_speed)(struct xgbe_prv_data *);
401 int (*set_gmii_2500_speed)(struct xgbe_prv_data *);
402 int (*set_xgmii_speed)(struct xgbe_prv_data *);
403
404 void (*enable_tx)(struct xgbe_prv_data *);
405 void (*disable_tx)(struct xgbe_prv_data *);
406 void (*enable_rx)(struct xgbe_prv_data *);
407 void (*disable_rx)(struct xgbe_prv_data *);
408
409 void (*powerup_tx)(struct xgbe_prv_data *);
410 void (*powerdown_tx)(struct xgbe_prv_data *);
411 void (*powerup_rx)(struct xgbe_prv_data *);
412 void (*powerdown_rx)(struct xgbe_prv_data *);
413
414 int (*init)(struct xgbe_prv_data *);
415 int (*exit)(struct xgbe_prv_data *);
416
417 int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
418 int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
419 void (*pre_xmit)(struct xgbe_channel *);
420 int (*dev_read)(struct xgbe_channel *);
421 void (*tx_desc_init)(struct xgbe_channel *);
422 void (*rx_desc_init)(struct xgbe_channel *);
423 void (*rx_desc_reset)(struct xgbe_ring_data *);
424 void (*tx_desc_reset)(struct xgbe_ring_data *);
425 int (*is_last_desc)(struct xgbe_ring_desc *);
426 int (*is_context_desc)(struct xgbe_ring_desc *);
427
428 /* For FLOW ctrl */
429 int (*config_tx_flow_control)(struct xgbe_prv_data *);
430 int (*config_rx_flow_control)(struct xgbe_prv_data *);
431
432 /* For RX coalescing */
433 int (*config_rx_coalesce)(struct xgbe_prv_data *);
434 int (*config_tx_coalesce)(struct xgbe_prv_data *);
435 unsigned int (*usec_to_riwt)(struct xgbe_prv_data *, unsigned int);
436 unsigned int (*riwt_to_usec)(struct xgbe_prv_data *, unsigned int);
437
438 /* For RX and TX threshold config */
439 int (*config_rx_threshold)(struct xgbe_prv_data *, unsigned int);
440 int (*config_tx_threshold)(struct xgbe_prv_data *, unsigned int);
441
442 /* For RX and TX Store and Forward Mode config */
443 int (*config_rsf_mode)(struct xgbe_prv_data *, unsigned int);
444 int (*config_tsf_mode)(struct xgbe_prv_data *, unsigned int);
445
446 /* For TX DMA Operate on Second Frame config */
447 int (*config_osp_mode)(struct xgbe_prv_data *);
448
449 /* For RX and TX PBL config */
450 int (*config_rx_pbl_val)(struct xgbe_prv_data *);
451 int (*get_rx_pbl_val)(struct xgbe_prv_data *);
452 int (*config_tx_pbl_val)(struct xgbe_prv_data *);
453 int (*get_tx_pbl_val)(struct xgbe_prv_data *);
454 int (*config_pblx8)(struct xgbe_prv_data *);
455
456 /* For MMC statistics */
457 void (*rx_mmc_int)(struct xgbe_prv_data *);
458 void (*tx_mmc_int)(struct xgbe_prv_data *);
459 void (*read_mmc_stats)(struct xgbe_prv_data *);
460};
461
462struct xgbe_desc_if {
463 int (*alloc_ring_resources)(struct xgbe_prv_data *);
464 void (*free_ring_resources)(struct xgbe_prv_data *);
465 int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
466 void (*realloc_skb)(struct xgbe_channel *);
467 void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *);
468 void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
469 void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
470};
471
472/* This structure contains flags that indicate what hardware features
473 * or configurations are present in the device.
474 */
475struct xgbe_hw_features {
476 /* HW Feature Register0 */
477 unsigned int gmii; /* 1000 Mbps support */
478 unsigned int vlhash; /* VLAN Hash Filter */
479 unsigned int sma; /* SMA(MDIO) Interface */
480 unsigned int rwk; /* PMT remote wake-up packet */
481 unsigned int mgk; /* PMT magic packet */
482 unsigned int mmc; /* RMON module */
483 unsigned int aoe; /* ARP Offload */
484 unsigned int ts; /* IEEE 1588-2008 Adavanced Timestamp */
485 unsigned int eee; /* Energy Efficient Ethernet */
486 unsigned int tx_coe; /* Tx Checksum Offload */
487 unsigned int rx_coe; /* Rx Checksum Offload */
488 unsigned int addn_mac; /* Additional MAC Addresses */
489 unsigned int ts_src; /* Timestamp Source */
490 unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
491
492 /* HW Feature Register1 */
493 unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
494 unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
495 unsigned int adv_ts_hi; /* Advance Timestamping High Word */
496 unsigned int dcb; /* DCB Feature */
497 unsigned int sph; /* Split Header Feature */
498 unsigned int tso; /* TCP Segmentation Offload */
499 unsigned int dma_debug; /* DMA Debug Registers */
500 unsigned int rss; /* Receive Side Scaling */
501 unsigned int hash_table_size; /* Hash Table Size */
502 unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
503
504 /* HW Feature Register2 */
505 unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
506 unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
507 unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
508 unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
509 unsigned int pps_out_num; /* Number of PPS outputs */
510 unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
511};
512
513struct xgbe_prv_data {
514 struct net_device *netdev;
515 struct platform_device *pdev;
516 struct device *dev;
517
518 /* XGMAC/XPCS related mmio registers */
519 void __iomem *xgmac_regs; /* XGMAC CSRs */
520 void __iomem *xpcs_regs; /* XPCS MMD registers */
521
522 /* Overall device lock */
523 spinlock_t lock;
524
525 /* XPCS indirect addressing mutex */
526 struct mutex xpcs_mutex;
527
528 int irq_number;
529
530 struct xgbe_hw_if hw_if;
531 struct xgbe_desc_if desc_if;
532
533 /* Rings for Tx/Rx on a DMA channel */
534 struct xgbe_channel *channel;
535 unsigned int channel_count;
536 unsigned int tx_ring_count;
537 unsigned int tx_desc_count;
538 unsigned int rx_ring_count;
539 unsigned int rx_desc_count;
540
541 /* Tx/Rx common settings */
542 unsigned int pblx8;
543
544 /* Tx settings */
545 unsigned int tx_sf_mode;
546 unsigned int tx_threshold;
547 unsigned int tx_pbl;
548 unsigned int tx_osp_mode;
549
550 /* Rx settings */
551 unsigned int rx_sf_mode;
552 unsigned int rx_threshold;
553 unsigned int rx_pbl;
554
555 /* Tx coalescing settings */
556 unsigned int tx_usecs;
557 unsigned int tx_frames;
558
559 /* Rx coalescing settings */
560 unsigned int rx_riwt;
561 unsigned int rx_frames;
562
563 /* Current MTU */
564 unsigned int rx_buf_size;
565
566 /* Flow control settings */
567 unsigned int pause_autoneg;
568 unsigned int tx_pause;
569 unsigned int rx_pause;
570
571 /* MDIO settings */
572 struct module *phy_module;
573 char *mii_bus_id;
574 struct mii_bus *mii;
575 int mdio_mmd;
576 struct phy_device *phydev;
577 int default_autoneg;
578 int default_speed;
579
580 /* Current PHY settings */
581 phy_interface_t phy_mode;
582 int phy_link;
583 int phy_speed;
584 unsigned int phy_tx_pause;
585 unsigned int phy_rx_pause;
586
587 /* Netdev related settings */
588 netdev_features_t netdev_features;
589 struct napi_struct napi;
590 struct xgbe_mmc_stats mmc_stats;
591
592 /* System clock value used for Rx watchdog */
593 struct clk *sysclock;
594
595 /* Hardware features of the device */
596 struct xgbe_hw_features hw_feat;
597
598 /* Device restart work structure */
599 struct work_struct restart_work;
600
601 /* Keeps track of power mode */
602 unsigned int power_down;
603
604#ifdef CONFIG_DEBUG_FS
605 struct dentry *xgbe_debugfs;
606
607 unsigned int debugfs_xgmac_reg;
608
609 unsigned int debugfs_xpcs_mmd;
610 unsigned int debugfs_xpcs_reg;
611#endif
612};
613
614/* Function prototypes*/
615
616void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
617void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
618struct net_device_ops *xgbe_get_netdev_ops(void);
619struct ethtool_ops *xgbe_get_ethtool_ops(void);
620
621int xgbe_mdio_register(struct xgbe_prv_data *);
622void xgbe_mdio_unregister(struct xgbe_prv_data *);
623void xgbe_dump_phy_registers(struct xgbe_prv_data *);
624void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
625 unsigned int);
626void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
627 unsigned int);
628void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
629void xgbe_get_all_hw_features(struct xgbe_prv_data *);
630int xgbe_powerup(struct net_device *, unsigned int);
631int xgbe_powerdown(struct net_device *, unsigned int);
632void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
633void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
634
635#ifdef CONFIG_DEBUG_FS
636void xgbe_debugfs_init(struct xgbe_prv_data *);
637void xgbe_debugfs_exit(struct xgbe_prv_data *);
638#else
639static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
640static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
641#endif /* CONFIG_DEBUG_FS */
642
643/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */
644#if 0
645#define XGMAC_ENABLE_TX_DESC_DUMP
646#define XGMAC_ENABLE_RX_DESC_DUMP
647#endif
648
649/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */
650#if 0
651#define XGMAC_ENABLE_TX_PKT_DUMP
652#define XGMAC_ENABLE_RX_PKT_DUMP
653#endif
654
655/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
656#if 0
657#define YDEBUG
658#define YDEBUG_MDIO
659#endif
660
661/* For debug prints */
662#ifdef YDEBUG
663#define DBGPR(x...) pr_alert(x)
664#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x)
665#else
666#define DBGPR(x...) do { } while (0)
667#define DBGPHY_REGS(x...) do { } while (0)
668#endif
669
670#ifdef YDEBUG_MDIO
671#define DBGPR_MDIO(x...) pr_alert(x)
672#else
673#define DBGPR_MDIO(x...) do { } while (0)
674#endif
675
676#endif
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index d647a7d115ac..18e2faccebb0 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -13,6 +13,7 @@
13 * Vineet Gupta 13 * Vineet Gupta
14 */ 14 */
15 15
16#include <linux/crc32.h>
16#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
18#include <linux/io.h> 19#include <linux/io.h>
@@ -362,6 +363,15 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
362 return IRQ_HANDLED; 363 return IRQ_HANDLED;
363} 364}
364 365
366#ifdef CONFIG_NET_POLL_CONTROLLER
367static void arc_emac_poll_controller(struct net_device *dev)
368{
369 disable_irq(dev->irq);
370 arc_emac_intr(dev->irq, dev);
371 enable_irq(dev->irq);
372}
373#endif
374
365/** 375/**
366 * arc_emac_open - Open the network device. 376 * arc_emac_open - Open the network device.
367 * @ndev: Pointer to the network device. 377 * @ndev: Pointer to the network device.
@@ -451,6 +461,41 @@ static int arc_emac_open(struct net_device *ndev)
451} 461}
452 462
453/** 463/**
464 * arc_emac_set_rx_mode - Change the receive filtering mode.
465 * @ndev: Pointer to the network device.
466 *
467 * This function enables/disables promiscuous or all-multicast mode
468 * and updates the multicast filtering list of the network device.
469 */
470static void arc_emac_set_rx_mode(struct net_device *ndev)
471{
472 struct arc_emac_priv *priv = netdev_priv(ndev);
473
474 if (ndev->flags & IFF_PROMISC) {
475 arc_reg_or(priv, R_CTRL, PROM_MASK);
476 } else {
477 arc_reg_clr(priv, R_CTRL, PROM_MASK);
478
479 if (ndev->flags & IFF_ALLMULTI) {
480 arc_reg_set(priv, R_LAFL, ~0);
481 arc_reg_set(priv, R_LAFH, ~0);
482 } else {
483 struct netdev_hw_addr *ha;
484 unsigned int filter[2] = { 0, 0 };
485 int bit;
486
487 netdev_for_each_mc_addr(ha, ndev) {
488 bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
489 filter[bit >> 5] |= 1 << (bit & 31);
490 }
491
492 arc_reg_set(priv, R_LAFL, filter[0]);
493 arc_reg_set(priv, R_LAFH, filter[1]);
494 }
495 }
496}
497
498/**
454 * arc_emac_stop - Close the network device. 499 * arc_emac_stop - Close the network device.
455 * @ndev: Pointer to the network device. 500 * @ndev: Pointer to the network device.
456 * 501 *
@@ -620,6 +665,10 @@ static const struct net_device_ops arc_emac_netdev_ops = {
620 .ndo_start_xmit = arc_emac_tx, 665 .ndo_start_xmit = arc_emac_tx,
621 .ndo_set_mac_address = arc_emac_set_address, 666 .ndo_set_mac_address = arc_emac_set_address,
622 .ndo_get_stats = arc_emac_stats, 667 .ndo_get_stats = arc_emac_stats,
668 .ndo_set_rx_mode = arc_emac_set_rx_mode,
669#ifdef CONFIG_NET_POLL_CONTROLLER
670 .ndo_poll_controller = arc_emac_poll_controller,
671#endif
623}; 672};
624 673
625static int arc_emac_probe(struct platform_device *pdev) 674static int arc_emac_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 17bb9ce96260..49faa97a30c3 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1302,7 +1302,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1302 } 1302 }
1303 1303
1304 netdev->netdev_ops = &alx_netdev_ops; 1304 netdev->netdev_ops = &alx_netdev_ops;
1305 SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops); 1305 netdev->ethtool_ops = &alx_ethtool_ops;
1306 netdev->irq = pdev->irq; 1306 netdev->irq = pdev->irq;
1307 netdev->watchdog_timeo = ALX_WATCHDOG_TIME; 1307 netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
1308 1308
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 859ea844ba0f..48694c239d5c 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -56,8 +56,8 @@ static int atl1c_get_settings(struct net_device *netdev,
56 else 56 else
57 ecmd->duplex = DUPLEX_HALF; 57 ecmd->duplex = DUPLEX_HALF;
58 } else { 58 } else {
59 ethtool_cmd_speed_set(ecmd, -1); 59 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
60 ecmd->duplex = -1; 60 ecmd->duplex = DUPLEX_UNKNOWN;
61 } 61 }
62 62
63 ecmd->autoneg = AUTONEG_ENABLE; 63 ecmd->autoneg = AUTONEG_ENABLE;
@@ -305,5 +305,5 @@ static const struct ethtool_ops atl1c_ethtool_ops = {
305 305
306void atl1c_set_ethtool_ops(struct net_device *netdev) 306void atl1c_set_ethtool_ops(struct net_device *netdev)
307{ 307{
308 SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops); 308 netdev->ethtool_ops = &atl1c_ethtool_ops;
309} 309}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 82b23861bf55..1be072f4afc2 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -57,8 +57,8 @@ static int atl1e_get_settings(struct net_device *netdev,
57 else 57 else
58 ecmd->duplex = DUPLEX_HALF; 58 ecmd->duplex = DUPLEX_HALF;
59 } else { 59 } else {
60 ethtool_cmd_speed_set(ecmd, -1); 60 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
61 ecmd->duplex = -1; 61 ecmd->duplex = DUPLEX_UNKNOWN;
62 } 62 }
63 63
64 ecmd->autoneg = AUTONEG_ENABLE; 64 ecmd->autoneg = AUTONEG_ENABLE;
@@ -388,5 +388,5 @@ static const struct ethtool_ops atl1e_ethtool_ops = {
388 388
389void atl1e_set_ethtool_ops(struct net_device *netdev) 389void atl1e_set_ethtool_ops(struct net_device *netdev)
390{ 390{
391 SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops); 391 netdev->ethtool_ops = &atl1e_ethtool_ops;
392} 392}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index dfd0e91fa726..b460db7919a2 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3258,8 +3258,8 @@ static int atl1_get_settings(struct net_device *netdev,
3258 else 3258 else
3259 ecmd->duplex = DUPLEX_HALF; 3259 ecmd->duplex = DUPLEX_HALF;
3260 } else { 3260 } else {
3261 ethtool_cmd_speed_set(ecmd, -1); 3261 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
3262 ecmd->duplex = -1; 3262 ecmd->duplex = DUPLEX_UNKNOWN;
3263 } 3263 }
3264 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3264 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3265 hw->media_type == MEDIA_TYPE_1000M_FULL) 3265 hw->media_type == MEDIA_TYPE_1000M_FULL)
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 78befb522a52..6746bd717146 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1396,7 +1396,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1396 atl2_setup_pcicmd(pdev); 1396 atl2_setup_pcicmd(pdev);
1397 1397
1398 netdev->netdev_ops = &atl2_netdev_ops; 1398 netdev->netdev_ops = &atl2_netdev_ops;
1399 SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops); 1399 netdev->ethtool_ops = &atl2_ethtool_ops;
1400 netdev->watchdog_timeo = 5 * HZ; 1400 netdev->watchdog_timeo = 5 * HZ;
1401 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1401 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1402 1402
@@ -1769,8 +1769,8 @@ static int atl2_get_settings(struct net_device *netdev,
1769 else 1769 else
1770 ecmd->duplex = DUPLEX_HALF; 1770 ecmd->duplex = DUPLEX_HALF;
1771 } else { 1771 } else {
1772 ethtool_cmd_speed_set(ecmd, -1); 1772 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
1773 ecmd->duplex = -1; 1773 ecmd->duplex = DUPLEX_UNKNOWN;
1774 } 1774 }
1775 1775
1776 ecmd->autoneg = AUTONEG_ENABLE; 1776 ecmd->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 85dbddd03722..3e488094b073 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -150,4 +150,15 @@ config BGMAC
150 In case of using this driver on BCM4706 it's also requires to enable 150 In case of using this driver on BCM4706 it's also requires to enable
151 BCMA_DRIVER_GMAC_CMN to make it work. 151 BCMA_DRIVER_GMAC_CMN to make it work.
152 152
153config SYSTEMPORT
154 tristate "Broadcom SYSTEMPORT internal MAC support"
155 depends on OF
156 select MII
157 select PHYLIB
158 select FIXED_PHY if SYSTEMPORT=y
159 help
160 This driver supports the built-in Ethernet MACs found in the
161 Broadcom BCM7xxx Set Top Box family chipset using an internal
162 Ethernet switch.
163
153endif # NET_VENDOR_BROADCOM 164endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index fd639a0d4c7d..e2a958a657e0 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_BNX2X) += bnx2x/
11obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o 11obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
12obj-$(CONFIG_TIGON3) += tg3.o 12obj-$(CONFIG_TIGON3) += tg3.o
13obj-$(CONFIG_BGMAC) += bgmac.o 13obj-$(CONFIG_BGMAC) += bgmac.o
14obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 05ba62589017..ca5a20a48b14 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2380,7 +2380,7 @@ static int b44_init_one(struct ssb_device *sdev,
2380 netif_napi_add(dev, &bp->napi, b44_poll, 64); 2380 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2381 dev->watchdog_timeo = B44_TX_TIMEOUT; 2381 dev->watchdog_timeo = B44_TX_TIMEOUT;
2382 dev->irq = sdev->irq; 2382 dev->irq = sdev->irq;
2383 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); 2383 dev->ethtool_ops = &b44_ethtool_ops;
2384 2384
2385 err = ssb_bus_powerup(sdev->bus, 0); 2385 err = ssb_bus_powerup(sdev->bus, 0);
2386 if (err) { 2386 if (err) {
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a7d11f5565d6..3e8d1a88ed3d 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1315,8 +1315,7 @@ static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1315 1315
1316}; 1316};
1317 1317
1318#define BCM_ENET_STATS_LEN \ 1318#define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats)
1319 (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1320 1319
1321static const u32 unused_mib_regs[] = { 1320static const u32 unused_mib_regs[] = {
1322 ETH_MIB_TX_ALL_OCTETS, 1321 ETH_MIB_TX_ALL_OCTETS,
@@ -1898,7 +1897,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
1898 dev->netdev_ops = &bcm_enet_ops; 1897 dev->netdev_ops = &bcm_enet_ops;
1899 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); 1898 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1900 1899
1901 SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops); 1900 dev->ethtool_ops = &bcm_enet_ethtool_ops;
1902 SET_NETDEV_DEV(dev, &pdev->dev); 1901 SET_NETDEV_DEV(dev, &pdev->dev);
1903 1902
1904 ret = register_netdev(dev); 1903 ret = register_netdev(dev);
@@ -2784,7 +2783,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2784 /* register netdevice */ 2783 /* register netdevice */
2785 dev->netdev_ops = &bcm_enetsw_ops; 2784 dev->netdev_ops = &bcm_enetsw_ops;
2786 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); 2785 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2787 SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops); 2786 dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
2788 SET_NETDEV_DEV(dev, &pdev->dev); 2787 SET_NETDEV_DEV(dev, &pdev->dev);
2789 2788
2790 spin_lock_init(&priv->enetsw_mdio_lock); 2789 spin_lock_init(&priv->enetsw_mdio_lock);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
new file mode 100644
index 000000000000..141160ef249a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -0,0 +1,1654 @@
1/*
2 * Broadcom BCM7xxx System Port Ethernet MAC driver
3 *
4 * Copyright (C) 2014 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/platform_device.h>
20#include <linux/of.h>
21#include <linux/of_net.h>
22#include <linux/of_mdio.h>
23#include <linux/phy.h>
24#include <linux/phy_fixed.h>
25#include <net/ip.h>
26#include <net/ipv6.h>
27
28#include "bcmsysport.h"
29
30/* I/O accessors register helpers */
31#define BCM_SYSPORT_IO_MACRO(name, offset) \
32static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
33{ \
34 u32 reg = __raw_readl(priv->base + offset + off); \
35 return reg; \
36} \
37static inline void name##_writel(struct bcm_sysport_priv *priv, \
38 u32 val, u32 off) \
39{ \
40 __raw_writel(val, priv->base + offset + off); \
41} \
42
43BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
44BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
45BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
46BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
47BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
48BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
49BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
50BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
51BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
52BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
53
54/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
55 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
56 */
57#define BCM_SYSPORT_INTR_L2(which) \
58static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
59 u32 mask) \
60{ \
61 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
62 priv->irq##which##_mask &= ~(mask); \
63} \
64static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
65 u32 mask) \
66{ \
67 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
68 priv->irq##which##_mask |= (mask); \
69} \
70
71BCM_SYSPORT_INTR_L2(0)
72BCM_SYSPORT_INTR_L2(1)
73
74/* Register accesses to GISB/RBUS registers are expensive (few hundred
75 * nanoseconds), so keep the check for 64-bits explicit here to save
76 * one register write per-packet on 32-bits platforms.
77 */
78static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
79 void __iomem *d,
80 dma_addr_t addr)
81{
82#ifdef CONFIG_PHYS_ADDR_T_64BIT
83 __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
84 d + DESC_ADDR_HI_STATUS_LEN);
85#endif
86 __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
87}
88
89static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
90 struct dma_desc *desc,
91 unsigned int port)
92{
93 /* Ports are latched, so write upper address first */
94 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
95 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
96}
97
98/* Ethtool operations */
99static int bcm_sysport_set_settings(struct net_device *dev,
100 struct ethtool_cmd *cmd)
101{
102 struct bcm_sysport_priv *priv = netdev_priv(dev);
103
104 if (!netif_running(dev))
105 return -EINVAL;
106
107 return phy_ethtool_sset(priv->phydev, cmd);
108}
109
110static int bcm_sysport_get_settings(struct net_device *dev,
111 struct ethtool_cmd *cmd)
112{
113 struct bcm_sysport_priv *priv = netdev_priv(dev);
114
115 if (!netif_running(dev))
116 return -EINVAL;
117
118 return phy_ethtool_gset(priv->phydev, cmd);
119}
120
121static int bcm_sysport_set_rx_csum(struct net_device *dev,
122 netdev_features_t wanted)
123{
124 struct bcm_sysport_priv *priv = netdev_priv(dev);
125 u32 reg;
126
127 priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
128 reg = rxchk_readl(priv, RXCHK_CONTROL);
129 if (priv->rx_csum_en)
130 reg |= RXCHK_EN;
131 else
132 reg &= ~RXCHK_EN;
133
134 /* If UniMAC forwards CRC, we need to skip over it to get
135 * a valid CHK bit to be set in the per-packet status word
136 */
137 if (priv->rx_csum_en && priv->crc_fwd)
138 reg |= RXCHK_SKIP_FCS;
139 else
140 reg &= ~RXCHK_SKIP_FCS;
141
142 rxchk_writel(priv, reg, RXCHK_CONTROL);
143
144 return 0;
145}
146
147static int bcm_sysport_set_tx_csum(struct net_device *dev,
148 netdev_features_t wanted)
149{
150 struct bcm_sysport_priv *priv = netdev_priv(dev);
151 u32 reg;
152
153 /* Hardware transmit checksum requires us to enable the Transmit status
154 * block prepended to the packet contents
155 */
156 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
157 reg = tdma_readl(priv, TDMA_CONTROL);
158 if (priv->tsb_en)
159 reg |= TSB_EN;
160 else
161 reg &= ~TSB_EN;
162 tdma_writel(priv, reg, TDMA_CONTROL);
163
164 return 0;
165}
166
167static int bcm_sysport_set_features(struct net_device *dev,
168 netdev_features_t features)
169{
170 netdev_features_t changed = features ^ dev->features;
171 netdev_features_t wanted = dev->wanted_features;
172 int ret = 0;
173
174 if (changed & NETIF_F_RXCSUM)
175 ret = bcm_sysport_set_rx_csum(dev, wanted);
176 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
177 ret = bcm_sysport_set_tx_csum(dev, wanted);
178
179 return ret;
180}
181
182/* Hardware counters must be kept in sync because the order/offset
183 * is important here (order in structure declaration = order in hardware)
184 */
185static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
186 /* general stats */
187 STAT_NETDEV(rx_packets),
188 STAT_NETDEV(tx_packets),
189 STAT_NETDEV(rx_bytes),
190 STAT_NETDEV(tx_bytes),
191 STAT_NETDEV(rx_errors),
192 STAT_NETDEV(tx_errors),
193 STAT_NETDEV(rx_dropped),
194 STAT_NETDEV(tx_dropped),
195 STAT_NETDEV(multicast),
196 /* UniMAC RSV counters */
197 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
198 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
199 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
200 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
201 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
202 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
203 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
204 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
205 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
206 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
207 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
208 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
209 STAT_MIB_RX("rx_multicast", mib.rx.mca),
210 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
211 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
212 STAT_MIB_RX("rx_control", mib.rx.cf),
213 STAT_MIB_RX("rx_pause", mib.rx.pf),
214 STAT_MIB_RX("rx_unknown", mib.rx.uo),
215 STAT_MIB_RX("rx_align", mib.rx.aln),
216 STAT_MIB_RX("rx_outrange", mib.rx.flr),
217 STAT_MIB_RX("rx_code", mib.rx.cde),
218 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
219 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
220 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
221 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
222 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
223 STAT_MIB_RX("rx_unicast", mib.rx.uc),
224 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
225 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
226 /* UniMAC TSV counters */
227 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
228 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
229 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
230 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
231 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
232 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
233 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
234 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
235 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
236 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
237 STAT_MIB_TX("tx_pkts", mib.tx.pkts),
238 STAT_MIB_TX("tx_multicast", mib.tx.mca),
239 STAT_MIB_TX("tx_broadcast", mib.tx.bca),
240 STAT_MIB_TX("tx_pause", mib.tx.pf),
241 STAT_MIB_TX("tx_control", mib.tx.cf),
242 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
243 STAT_MIB_TX("tx_oversize", mib.tx.ovr),
244 STAT_MIB_TX("tx_defer", mib.tx.drf),
245 STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
246 STAT_MIB_TX("tx_single_col", mib.tx.scl),
247 STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
248 STAT_MIB_TX("tx_late_col", mib.tx.lcl),
249 STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
250 STAT_MIB_TX("tx_frags", mib.tx.frg),
251 STAT_MIB_TX("tx_total_col", mib.tx.ncl),
252 STAT_MIB_TX("tx_jabber", mib.tx.jbr),
253 STAT_MIB_TX("tx_bytes", mib.tx.bytes),
254 STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
255 STAT_MIB_TX("tx_unicast", mib.tx.uc),
256 /* UniMAC RUNT counters */
257 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
258 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
259 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
260 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
261 /* RXCHK misc statistics */
262 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
263 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
264 RXCHK_OTHER_DISC_CNTR),
265 /* RBUF misc statistics */
266 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
267 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
268};
269
270#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
271
272static void bcm_sysport_get_drvinfo(struct net_device *dev,
273 struct ethtool_drvinfo *info)
274{
275 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
276 strlcpy(info->version, "0.1", sizeof(info->version));
277 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
278 info->n_stats = BCM_SYSPORT_STATS_LEN;
279}
280
281static u32 bcm_sysport_get_msglvl(struct net_device *dev)
282{
283 struct bcm_sysport_priv *priv = netdev_priv(dev);
284
285 return priv->msg_enable;
286}
287
288static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
289{
290 struct bcm_sysport_priv *priv = netdev_priv(dev);
291
292 priv->msg_enable = enable;
293}
294
295static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
296{
297 switch (string_set) {
298 case ETH_SS_STATS:
299 return BCM_SYSPORT_STATS_LEN;
300 default:
301 return -EOPNOTSUPP;
302 }
303}
304
305static void bcm_sysport_get_strings(struct net_device *dev,
306 u32 stringset, u8 *data)
307{
308 int i;
309
310 switch (stringset) {
311 case ETH_SS_STATS:
312 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
313 memcpy(data + i * ETH_GSTRING_LEN,
314 bcm_sysport_gstrings_stats[i].stat_string,
315 ETH_GSTRING_LEN);
316 }
317 break;
318 default:
319 break;
320 }
321}
322
323static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
324{
325 int i, j = 0;
326
327 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
328 const struct bcm_sysport_stats *s;
329 u8 offset = 0;
330 u32 val = 0;
331 char *p;
332
333 s = &bcm_sysport_gstrings_stats[i];
334 switch (s->type) {
335 case BCM_SYSPORT_STAT_NETDEV:
336 continue;
337 case BCM_SYSPORT_STAT_MIB_RX:
338 case BCM_SYSPORT_STAT_MIB_TX:
339 case BCM_SYSPORT_STAT_RUNT:
340 if (s->type != BCM_SYSPORT_STAT_MIB_RX)
341 offset = UMAC_MIB_STAT_OFFSET;
342 val = umac_readl(priv, UMAC_MIB_START + j + offset);
343 break;
344 case BCM_SYSPORT_STAT_RXCHK:
345 val = rxchk_readl(priv, s->reg_offset);
346 if (val == ~0)
347 rxchk_writel(priv, 0, s->reg_offset);
348 break;
349 case BCM_SYSPORT_STAT_RBUF:
350 val = rbuf_readl(priv, s->reg_offset);
351 if (val == ~0)
352 rbuf_writel(priv, 0, s->reg_offset);
353 break;
354 }
355
356 j += s->stat_sizeof;
357 p = (char *)priv + s->stat_offset;
358 *(u32 *)p = val;
359 }
360
361 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
362}
363
364static void bcm_sysport_get_stats(struct net_device *dev,
365 struct ethtool_stats *stats, u64 *data)
366{
367 struct bcm_sysport_priv *priv = netdev_priv(dev);
368 int i;
369
370 if (netif_running(dev))
371 bcm_sysport_update_mib_counters(priv);
372
373 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
374 const struct bcm_sysport_stats *s;
375 char *p;
376
377 s = &bcm_sysport_gstrings_stats[i];
378 if (s->type == BCM_SYSPORT_STAT_NETDEV)
379 p = (char *)&dev->stats;
380 else
381 p = (char *)priv;
382 p += s->stat_offset;
383 data[i] = *(u32 *)p;
384 }
385}
386
387static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
388{
389 dev_kfree_skb_any(cb->skb);
390 cb->skb = NULL;
391 dma_unmap_addr_set(cb, dma_addr, 0);
392}
393
394static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
395 struct bcm_sysport_cb *cb)
396{
397 struct device *kdev = &priv->pdev->dev;
398 struct net_device *ndev = priv->netdev;
399 dma_addr_t mapping;
400 int ret;
401
402 cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
403 if (!cb->skb) {
404 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
405 return -ENOMEM;
406 }
407
408 mapping = dma_map_single(kdev, cb->skb->data,
409 RX_BUF_LENGTH, DMA_FROM_DEVICE);
410 ret = dma_mapping_error(kdev, mapping);
411 if (ret) {
412 bcm_sysport_free_cb(cb);
413 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
414 return ret;
415 }
416
417 dma_unmap_addr_set(cb, dma_addr, mapping);
418 dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
419
420 priv->rx_bd_assign_index++;
421 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
422 priv->rx_bd_assign_ptr = priv->rx_bds +
423 (priv->rx_bd_assign_index * DESC_SIZE);
424
425 netif_dbg(priv, rx_status, ndev, "RX refill\n");
426
427 return 0;
428}
429
430static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
431{
432 struct bcm_sysport_cb *cb;
433 int ret = 0;
434 unsigned int i;
435
436 for (i = 0; i < priv->num_rx_bds; i++) {
437 cb = &priv->rx_cbs[priv->rx_bd_assign_index];
438 if (cb->skb)
439 continue;
440
441 ret = bcm_sysport_rx_refill(priv, cb);
442 if (ret)
443 break;
444 }
445
446 return ret;
447}
448
449/* Poll the hardware for up to budget packets to process */
450static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
451 unsigned int budget)
452{
453 struct device *kdev = &priv->pdev->dev;
454 struct net_device *ndev = priv->netdev;
455 unsigned int processed = 0, to_process;
456 struct bcm_sysport_cb *cb;
457 struct sk_buff *skb;
458 unsigned int p_index;
459 u16 len, status;
460 struct bcm_rsb *rsb;
461
462 /* Determine how much we should process since last call */
463 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
464 p_index &= RDMA_PROD_INDEX_MASK;
465
466 if (p_index < priv->rx_c_index)
467 to_process = (RDMA_CONS_INDEX_MASK + 1) -
468 priv->rx_c_index + p_index;
469 else
470 to_process = p_index - priv->rx_c_index;
471
472 netif_dbg(priv, rx_status, ndev,
473 "p_index=%d rx_c_index=%d to_process=%d\n",
474 p_index, priv->rx_c_index, to_process);
475
476 while ((processed < to_process) &&
477 (processed < budget)) {
478
479 cb = &priv->rx_cbs[priv->rx_read_ptr];
480 skb = cb->skb;
481 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
482 RX_BUF_LENGTH, DMA_FROM_DEVICE);
483
484 /* Extract the Receive Status Block prepended */
485 rsb = (struct bcm_rsb *)skb->data;
486 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
487 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
488 DESC_STATUS_MASK;
489
490 processed++;
491 priv->rx_read_ptr++;
492 if (priv->rx_read_ptr == priv->num_rx_bds)
493 priv->rx_read_ptr = 0;
494
495 netif_dbg(priv, rx_status, ndev,
496 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
497 p_index, priv->rx_c_index, priv->rx_read_ptr,
498 len, status);
499
500 if (unlikely(!skb)) {
501 netif_err(priv, rx_err, ndev, "out of memory!\n");
502 ndev->stats.rx_dropped++;
503 ndev->stats.rx_errors++;
504 goto refill;
505 }
506
507 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
508 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
509 ndev->stats.rx_dropped++;
510 ndev->stats.rx_errors++;
511 bcm_sysport_free_cb(cb);
512 goto refill;
513 }
514
515 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
516 netif_err(priv, rx_err, ndev, "error packet\n");
517 if (status & RX_STATUS_OVFLOW)
518 ndev->stats.rx_over_errors++;
519 ndev->stats.rx_dropped++;
520 ndev->stats.rx_errors++;
521 bcm_sysport_free_cb(cb);
522 goto refill;
523 }
524
525 skb_put(skb, len);
526
527 /* Hardware validated our checksum */
528 if (likely(status & DESC_L4_CSUM))
529 skb->ip_summed = CHECKSUM_UNNECESSARY;
530
531 /* Hardware pre-pends packets with 2bytes before Ethernet
532 * header plus we have the Receive Status Block, strip off all
533 * of this from the SKB.
534 */
535 skb_pull(skb, sizeof(*rsb) + 2);
536 len -= (sizeof(*rsb) + 2);
537
538 /* UniMAC may forward CRC */
539 if (priv->crc_fwd) {
540 skb_trim(skb, len - ETH_FCS_LEN);
541 len -= ETH_FCS_LEN;
542 }
543
544 skb->protocol = eth_type_trans(skb, ndev);
545 ndev->stats.rx_packets++;
546 ndev->stats.rx_bytes += len;
547
548 napi_gro_receive(&priv->napi, skb);
549refill:
550 bcm_sysport_rx_refill(priv, cb);
551 }
552
553 return processed;
554}
555
556static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
557 struct bcm_sysport_cb *cb,
558 unsigned int *bytes_compl,
559 unsigned int *pkts_compl)
560{
561 struct device *kdev = &priv->pdev->dev;
562 struct net_device *ndev = priv->netdev;
563
564 if (cb->skb) {
565 ndev->stats.tx_bytes += cb->skb->len;
566 *bytes_compl += cb->skb->len;
567 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
568 dma_unmap_len(cb, dma_len),
569 DMA_TO_DEVICE);
570 ndev->stats.tx_packets++;
571 (*pkts_compl)++;
572 bcm_sysport_free_cb(cb);
573 /* SKB fragment */
574 } else if (dma_unmap_addr(cb, dma_addr)) {
575 ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
576 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
577 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
578 dma_unmap_addr_set(cb, dma_addr, 0);
579 }
580}
581
582/* Reclaim queued SKBs for transmission completion, lockless version */
583static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
584 struct bcm_sysport_tx_ring *ring)
585{
586 struct net_device *ndev = priv->netdev;
587 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
588 unsigned int pkts_compl = 0, bytes_compl = 0;
589 struct bcm_sysport_cb *cb;
590 struct netdev_queue *txq;
591 u32 hw_ind;
592
593 txq = netdev_get_tx_queue(ndev, ring->index);
594
595 /* Compute how many descriptors have been processed since last call */
596 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
597 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
598 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
599
600 last_c_index = ring->c_index;
601 num_tx_cbs = ring->size;
602
603 c_index &= (num_tx_cbs - 1);
604
605 if (c_index >= last_c_index)
606 last_tx_cn = c_index - last_c_index;
607 else
608 last_tx_cn = num_tx_cbs - last_c_index + c_index;
609
610 netif_dbg(priv, tx_done, ndev,
611 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
612 ring->index, c_index, last_tx_cn, last_c_index);
613
614 while (last_tx_cn-- > 0) {
615 cb = ring->cbs + last_c_index;
616 bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
617
618 ring->desc_count++;
619 last_c_index++;
620 last_c_index &= (num_tx_cbs - 1);
621 }
622
623 ring->c_index = c_index;
624
625 if (netif_tx_queue_stopped(txq) && pkts_compl)
626 netif_tx_wake_queue(txq);
627
628 netif_dbg(priv, tx_done, ndev,
629 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
630 ring->index, ring->c_index, pkts_compl, bytes_compl);
631
632 return pkts_compl;
633}
634
635/* Locked version of the per-ring TX reclaim routine */
636static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
637 struct bcm_sysport_tx_ring *ring)
638{
639 unsigned int released;
640 unsigned long flags;
641
642 spin_lock_irqsave(&ring->lock, flags);
643 released = __bcm_sysport_tx_reclaim(priv, ring);
644 spin_unlock_irqrestore(&ring->lock, flags);
645
646 return released;
647}
648
649static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
650{
651 struct bcm_sysport_tx_ring *ring =
652 container_of(napi, struct bcm_sysport_tx_ring, napi);
653 unsigned int work_done = 0;
654
655 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
656
657 if (work_done < budget) {
658 napi_complete(napi);
659 /* re-enable TX interrupt */
660 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
661 }
662
663 return work_done;
664}
665
666static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
667{
668 unsigned int q;
669
670 for (q = 0; q < priv->netdev->num_tx_queues; q++)
671 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
672}
673
674static int bcm_sysport_poll(struct napi_struct *napi, int budget)
675{
676 struct bcm_sysport_priv *priv =
677 container_of(napi, struct bcm_sysport_priv, napi);
678 unsigned int work_done = 0;
679
680 work_done = bcm_sysport_desc_rx(priv, budget);
681
682 priv->rx_c_index += work_done;
683 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
684 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
685
686 if (work_done < budget) {
687 napi_complete(napi);
688 /* re-enable RX interrupts */
689 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
690 }
691
692 return work_done;
693}
694
695
696/* RX and misc interrupt routine */
697static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
698{
699 struct net_device *dev = dev_id;
700 struct bcm_sysport_priv *priv = netdev_priv(dev);
701
702 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
703 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
704 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
705
706 if (unlikely(priv->irq0_stat == 0)) {
707 netdev_warn(priv->netdev, "spurious RX interrupt\n");
708 return IRQ_NONE;
709 }
710
711 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
712 if (likely(napi_schedule_prep(&priv->napi))) {
713 /* disable RX interrupts */
714 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
715 __napi_schedule(&priv->napi);
716 }
717 }
718
719 /* TX ring is full, perform a full reclaim since we do not know
720 * which one would trigger this interrupt
721 */
722 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
723 bcm_sysport_tx_reclaim_all(priv);
724
725 return IRQ_HANDLED;
726}
727
728/* TX interrupt service routine */
729static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
730{
731 struct net_device *dev = dev_id;
732 struct bcm_sysport_priv *priv = netdev_priv(dev);
733 struct bcm_sysport_tx_ring *txr;
734 unsigned int ring;
735
736 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
737 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
738 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
739
740 if (unlikely(priv->irq1_stat == 0)) {
741 netdev_warn(priv->netdev, "spurious TX interrupt\n");
742 return IRQ_NONE;
743 }
744
745 for (ring = 0; ring < dev->num_tx_queues; ring++) {
746 if (!(priv->irq1_stat & BIT(ring)))
747 continue;
748
749 txr = &priv->tx_rings[ring];
750
751 if (likely(napi_schedule_prep(&txr->napi))) {
752 intrl2_1_mask_set(priv, BIT(ring));
753 __napi_schedule(&txr->napi);
754 }
755 }
756
757 return IRQ_HANDLED;
758}
759
760static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
761{
762 struct sk_buff *nskb;
763 struct bcm_tsb *tsb;
764 u32 csum_info;
765 u8 ip_proto;
766 u16 csum_start;
767 u16 ip_ver;
768
769 /* Re-allocate SKB if needed */
770 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
771 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
772 dev_kfree_skb(skb);
773 if (!nskb) {
774 dev->stats.tx_errors++;
775 dev->stats.tx_dropped++;
776 return -ENOMEM;
777 }
778 skb = nskb;
779 }
780
781 tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
782 /* Zero-out TSB by default */
783 memset(tsb, 0, sizeof(*tsb));
784
785 if (skb->ip_summed == CHECKSUM_PARTIAL) {
786 ip_ver = htons(skb->protocol);
787 switch (ip_ver) {
788 case ETH_P_IP:
789 ip_proto = ip_hdr(skb)->protocol;
790 break;
791 case ETH_P_IPV6:
792 ip_proto = ipv6_hdr(skb)->nexthdr;
793 break;
794 default:
795 return 0;
796 }
797
798 /* Get the checksum offset and the L4 (transport) offset */
799 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
800 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
801 csum_info |= (csum_start << L4_PTR_SHIFT);
802
803 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
804 csum_info |= L4_LENGTH_VALID;
805 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
806 csum_info |= L4_UDP;
807 } else
808 csum_info = 0;
809
810 tsb->l4_ptr_dest_map = csum_info;
811 }
812
813 return 0;
814}
815
816static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
817 struct net_device *dev)
818{
819 struct bcm_sysport_priv *priv = netdev_priv(dev);
820 struct device *kdev = &priv->pdev->dev;
821 struct bcm_sysport_tx_ring *ring;
822 struct bcm_sysport_cb *cb;
823 struct netdev_queue *txq;
824 struct dma_desc *desc;
825 unsigned int skb_len;
826 unsigned long flags;
827 dma_addr_t mapping;
828 u32 len_status;
829 u16 queue;
830 int ret;
831
832 queue = skb_get_queue_mapping(skb);
833 txq = netdev_get_tx_queue(dev, queue);
834 ring = &priv->tx_rings[queue];
835
836 /* lock against tx reclaim in BH context and TX ring full interrupt */
837 spin_lock_irqsave(&ring->lock, flags);
838 if (unlikely(ring->desc_count == 0)) {
839 netif_tx_stop_queue(txq);
840 netdev_err(dev, "queue %d awake and ring full!\n", queue);
841 ret = NETDEV_TX_BUSY;
842 goto out;
843 }
844
845 /* Insert TSB and checksum infos */
846 if (priv->tsb_en) {
847 ret = bcm_sysport_insert_tsb(skb, dev);
848 if (ret) {
849 ret = NETDEV_TX_OK;
850 goto out;
851 }
852 }
853
854 /* The Ethernet switch we are interfaced with needs packets to be at
855 * least 64 bytes (including FCS) otherwise they will be discarded when
856 * they enter the switch port logic. When Broadcom tags are enabled, we
857 * need to make sure that packets are at least 68 bytes
858 * (including FCS and tag) because the length verification is done after
859 * the Broadcom tag is stripped off the ingress packet.
860 */
861 if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
862 ret = NETDEV_TX_OK;
863 goto out;
864 }
865
866 skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
867 ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
868
869 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
870 if (dma_mapping_error(kdev, mapping)) {
871 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
872 skb->data, skb_len);
873 ret = NETDEV_TX_OK;
874 goto out;
875 }
876
877 /* Remember the SKB for future freeing */
878 cb = &ring->cbs[ring->curr_desc];
879 cb->skb = skb;
880 dma_unmap_addr_set(cb, dma_addr, mapping);
881 dma_unmap_len_set(cb, dma_len, skb_len);
882
883 /* Fetch a descriptor entry from our pool */
884 desc = ring->desc_cpu;
885
886 desc->addr_lo = lower_32_bits(mapping);
887 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
888 len_status |= (skb_len << DESC_LEN_SHIFT);
889 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
890 DESC_STATUS_SHIFT;
891 if (skb->ip_summed == CHECKSUM_PARTIAL)
892 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
893
894 ring->curr_desc++;
895 if (ring->curr_desc == ring->size)
896 ring->curr_desc = 0;
897 ring->desc_count--;
898
899 /* Ensure write completion of the descriptor status/length
900 * in DRAM before the System Port WRITE_PORT register latches
901 * the value
902 */
903 wmb();
904 desc->addr_status_len = len_status;
905 wmb();
906
907 /* Write this descriptor address to the RING write port */
908 tdma_port_write_desc_addr(priv, desc, ring->index);
909
910 /* Check ring space and update SW control flow */
911 if (ring->desc_count == 0)
912 netif_tx_stop_queue(txq);
913
914 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
915 ring->index, ring->desc_count, ring->curr_desc);
916
917 ret = NETDEV_TX_OK;
918out:
919 spin_unlock_irqrestore(&ring->lock, flags);
920 return ret;
921}
922
923static void bcm_sysport_tx_timeout(struct net_device *dev)
924{
925 netdev_warn(dev, "transmit timeout!\n");
926
927 dev->trans_start = jiffies;
928 dev->stats.tx_errors++;
929
930 netif_tx_wake_all_queues(dev);
931}
932
933/* phylib adjust link callback */
934static void bcm_sysport_adj_link(struct net_device *dev)
935{
936 struct bcm_sysport_priv *priv = netdev_priv(dev);
937 struct phy_device *phydev = priv->phydev;
938 unsigned int changed = 0;
939 u32 cmd_bits = 0, reg;
940
941 if (priv->old_link != phydev->link) {
942 changed = 1;
943 priv->old_link = phydev->link;
944 }
945
946 if (priv->old_duplex != phydev->duplex) {
947 changed = 1;
948 priv->old_duplex = phydev->duplex;
949 }
950
951 switch (phydev->speed) {
952 case SPEED_2500:
953 cmd_bits = CMD_SPEED_2500;
954 break;
955 case SPEED_1000:
956 cmd_bits = CMD_SPEED_1000;
957 break;
958 case SPEED_100:
959 cmd_bits = CMD_SPEED_100;
960 break;
961 case SPEED_10:
962 cmd_bits = CMD_SPEED_10;
963 break;
964 default:
965 break;
966 }
967 cmd_bits <<= CMD_SPEED_SHIFT;
968
969 if (phydev->duplex == DUPLEX_HALF)
970 cmd_bits |= CMD_HD_EN;
971
972 if (priv->old_pause != phydev->pause) {
973 changed = 1;
974 priv->old_pause = phydev->pause;
975 }
976
977 if (!phydev->pause)
978 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
979
980 if (changed) {
981 reg = umac_readl(priv, UMAC_CMD);
982 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
983 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
984 CMD_TX_PAUSE_IGNORE);
985 reg |= cmd_bits;
986 umac_writel(priv, reg, UMAC_CMD);
987
988 phy_print_status(priv->phydev);
989 }
990}
991
992static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
993 unsigned int index)
994{
995 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
996 struct device *kdev = &priv->pdev->dev;
997 size_t size;
998 void *p;
999 u32 reg;
1000
1001 /* Simple descriptors partitioning for now */
1002 size = 256;
1003
1004 /* We just need one DMA descriptor which is DMA-able, since writing to
1005 * the port will allocate a new descriptor in its internal linked-list
1006 */
1007 p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
1008 if (!p) {
1009 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1010 return -ENOMEM;
1011 }
1012
1013 ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
1014 if (!ring->cbs) {
1015 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1016 return -ENOMEM;
1017 }
1018
1019 /* Initialize SW view of the ring */
1020 spin_lock_init(&ring->lock);
1021 ring->priv = priv;
1022 netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1023 ring->index = index;
1024 ring->size = size;
1025 ring->alloc_size = ring->size;
1026 ring->desc_cpu = p;
1027 ring->desc_count = ring->size;
1028 ring->curr_desc = 0;
1029
1030 /* Initialize HW ring */
1031 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1032 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1033 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1034 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1035 tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
1036 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1037
1038 /* Program the number of descriptors as MAX_THRESHOLD and half of
1039 * its size for the hysteresis trigger
1040 */
1041 tdma_writel(priv, ring->size |
1042 1 << RING_HYST_THRESH_SHIFT,
1043 TDMA_DESC_RING_MAX_HYST(index));
1044
1045 /* Enable the ring queue in the arbiter */
1046 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1047 reg |= (1 << index);
1048 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1049
1050 napi_enable(&ring->napi);
1051
1052 netif_dbg(priv, hw, priv->netdev,
1053 "TDMA cfg, size=%d, desc_cpu=%p\n",
1054 ring->size, ring->desc_cpu);
1055
1056 return 0;
1057}
1058
1059static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1060 unsigned int index)
1061{
1062 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1063 struct device *kdev = &priv->pdev->dev;
1064 u32 reg;
1065
1066 /* Caller should stop the TDMA engine */
1067 reg = tdma_readl(priv, TDMA_STATUS);
1068 if (!(reg & TDMA_DISABLED))
1069 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1070
1071 napi_disable(&ring->napi);
1072 netif_napi_del(&ring->napi);
1073
1074 bcm_sysport_tx_reclaim(priv, ring);
1075
1076 kfree(ring->cbs);
1077 ring->cbs = NULL;
1078
1079 if (ring->desc_dma) {
1080 dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
1081 ring->desc_dma = 0;
1082 }
1083 ring->size = 0;
1084 ring->alloc_size = 0;
1085
1086 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1087}
1088
1089/* RDMA helper */
1090static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1091 unsigned int enable)
1092{
1093 unsigned int timeout = 1000;
1094 u32 reg;
1095
1096 reg = rdma_readl(priv, RDMA_CONTROL);
1097 if (enable)
1098 reg |= RDMA_EN;
1099 else
1100 reg &= ~RDMA_EN;
1101 rdma_writel(priv, reg, RDMA_CONTROL);
1102
1103 /* Poll for RMDA disabling completion */
1104 do {
1105 reg = rdma_readl(priv, RDMA_STATUS);
1106 if (!!(reg & RDMA_DISABLED) == !enable)
1107 return 0;
1108 usleep_range(1000, 2000);
1109 } while (timeout-- > 0);
1110
1111 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1112
1113 return -ETIMEDOUT;
1114}
1115
1116/* TDMA helper */
1117static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1118 unsigned int enable)
1119{
1120 unsigned int timeout = 1000;
1121 u32 reg;
1122
1123 reg = tdma_readl(priv, TDMA_CONTROL);
1124 if (enable)
1125 reg |= TDMA_EN;
1126 else
1127 reg &= ~TDMA_EN;
1128 tdma_writel(priv, reg, TDMA_CONTROL);
1129
1130 /* Poll for TMDA disabling completion */
1131 do {
1132 reg = tdma_readl(priv, TDMA_STATUS);
1133 if (!!(reg & TDMA_DISABLED) == !enable)
1134 return 0;
1135
1136 usleep_range(1000, 2000);
1137 } while (timeout-- > 0);
1138
1139 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1140
1141 return -ETIMEDOUT;
1142}
1143
1144static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1145{
1146 u32 reg;
1147 int ret;
1148
1149 /* Initialize SW view of the RX ring */
1150 priv->num_rx_bds = NUM_RX_DESC;
1151 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1152 priv->rx_bd_assign_ptr = priv->rx_bds;
1153 priv->rx_bd_assign_index = 0;
1154 priv->rx_c_index = 0;
1155 priv->rx_read_ptr = 0;
1156 priv->rx_cbs = kzalloc(priv->num_rx_bds *
1157 sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1158 if (!priv->rx_cbs) {
1159 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1160 return -ENOMEM;
1161 }
1162
1163 ret = bcm_sysport_alloc_rx_bufs(priv);
1164 if (ret) {
1165 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1166 return ret;
1167 }
1168
1169 /* Initialize HW, ensure RDMA is disabled */
1170 reg = rdma_readl(priv, RDMA_STATUS);
1171 if (!(reg & RDMA_DISABLED))
1172 rdma_enable_set(priv, 0);
1173
1174 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1175 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1176 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1177 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1178 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1179 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1180 /* Operate the queue in ring mode */
1181 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1182 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1183 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1184 rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
1185
1186 rdma_writel(priv, 1, RDMA_MBDONE_INTR);
1187
1188 netif_dbg(priv, hw, priv->netdev,
1189 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1190 priv->num_rx_bds, priv->rx_bds);
1191
1192 return 0;
1193}
1194
1195static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1196{
1197 struct bcm_sysport_cb *cb;
1198 unsigned int i;
1199 u32 reg;
1200
1201 /* Caller should ensure RDMA is disabled */
1202 reg = rdma_readl(priv, RDMA_STATUS);
1203 if (!(reg & RDMA_DISABLED))
1204 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1205
1206 for (i = 0; i < priv->num_rx_bds; i++) {
1207 cb = &priv->rx_cbs[i];
1208 if (dma_unmap_addr(cb, dma_addr))
1209 dma_unmap_single(&priv->pdev->dev,
1210 dma_unmap_addr(cb, dma_addr),
1211 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1212 bcm_sysport_free_cb(cb);
1213 }
1214
1215 kfree(priv->rx_cbs);
1216 priv->rx_cbs = NULL;
1217
1218 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1219}
1220
1221static void bcm_sysport_set_rx_mode(struct net_device *dev)
1222{
1223 struct bcm_sysport_priv *priv = netdev_priv(dev);
1224 u32 reg;
1225
1226 reg = umac_readl(priv, UMAC_CMD);
1227 if (dev->flags & IFF_PROMISC)
1228 reg |= CMD_PROMISC;
1229 else
1230 reg &= ~CMD_PROMISC;
1231 umac_writel(priv, reg, UMAC_CMD);
1232
1233 /* No support for ALLMULTI */
1234 if (dev->flags & IFF_ALLMULTI)
1235 return;
1236}
1237
1238static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1239 unsigned int enable)
1240{
1241 u32 reg;
1242
1243 reg = umac_readl(priv, UMAC_CMD);
1244 if (enable)
1245 reg |= CMD_RX_EN | CMD_TX_EN;
1246 else
1247 reg &= ~(CMD_RX_EN | CMD_TX_EN);
1248 umac_writel(priv, reg, UMAC_CMD);
1249
1250 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1251 * to be processed (1 msec).
1252 */
1253 if (enable == 0)
1254 usleep_range(1000, 2000);
1255}
1256
1257static inline int umac_reset(struct bcm_sysport_priv *priv)
1258{
1259 unsigned int timeout = 0;
1260 u32 reg;
1261 int ret = 0;
1262
1263 umac_writel(priv, 0, UMAC_CMD);
1264 while (timeout++ < 1000) {
1265 reg = umac_readl(priv, UMAC_CMD);
1266 if (!(reg & CMD_SW_RESET))
1267 break;
1268
1269 udelay(1);
1270 }
1271
1272 if (timeout == 1000) {
1273 dev_err(&priv->pdev->dev,
1274 "timeout waiting for MAC to come out of reset\n");
1275 ret = -ETIMEDOUT;
1276 }
1277
1278 return ret;
1279}
1280
1281static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1282 unsigned char *addr)
1283{
1284 umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
1285 (addr[2] << 8) | addr[3], UMAC_MAC0);
1286 umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
1287}
1288
1289static void topctrl_flush(struct bcm_sysport_priv *priv)
1290{
1291 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1292 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1293 mdelay(1);
1294 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1295 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1296}
1297
1298static int bcm_sysport_open(struct net_device *dev)
1299{
1300 struct bcm_sysport_priv *priv = netdev_priv(dev);
1301 unsigned int i;
1302 u32 reg;
1303 int ret;
1304
1305 /* Reset UniMAC */
1306 ret = umac_reset(priv);
1307 if (ret) {
1308 netdev_err(dev, "UniMAC reset failed\n");
1309 return ret;
1310 }
1311
1312 /* Flush TX and RX FIFOs at TOPCTRL level */
1313 topctrl_flush(priv);
1314
1315 /* Disable the UniMAC RX/TX */
1316 umac_enable_set(priv, 0);
1317
1318 /* Enable RBUF 2bytes alignment and Receive Status Block */
1319 reg = rbuf_readl(priv, RBUF_CONTROL);
1320 reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1321 rbuf_writel(priv, reg, RBUF_CONTROL);
1322
1323 /* Set maximum frame length */
1324 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1325
1326 /* Set MAC address */
1327 umac_set_hw_addr(priv, dev->dev_addr);
1328
1329 /* Read CRC forward */
1330 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1331
1332 priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1333 0, priv->phy_interface);
1334 if (!priv->phydev) {
1335 netdev_err(dev, "could not attach to PHY\n");
1336 return -ENODEV;
1337 }
1338
1339 /* Reset house keeping link status */
1340 priv->old_duplex = -1;
1341 priv->old_link = -1;
1342 priv->old_pause = -1;
1343
1344 /* mask all interrupts and request them */
1345 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1346 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1347 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1348 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1349 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1350 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1351
1352 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1353 if (ret) {
1354 netdev_err(dev, "failed to request RX interrupt\n");
1355 goto out_phy_disconnect;
1356 }
1357
1358 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
1359 if (ret) {
1360 netdev_err(dev, "failed to request TX interrupt\n");
1361 goto out_free_irq0;
1362 }
1363
1364 /* Initialize both hardware and software ring */
1365 for (i = 0; i < dev->num_tx_queues; i++) {
1366 ret = bcm_sysport_init_tx_ring(priv, i);
1367 if (ret) {
1368 netdev_err(dev, "failed to initialize TX ring %d\n",
1369 i);
1370 goto out_free_tx_ring;
1371 }
1372 }
1373
1374 /* Initialize linked-list */
1375 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1376
1377 /* Initialize RX ring */
1378 ret = bcm_sysport_init_rx_ring(priv);
1379 if (ret) {
1380 netdev_err(dev, "failed to initialize RX ring\n");
1381 goto out_free_rx_ring;
1382 }
1383
1384 /* Turn on RDMA */
1385 ret = rdma_enable_set(priv, 1);
1386 if (ret)
1387 goto out_free_rx_ring;
1388
1389 /* Enable RX interrupt and TX ring full interrupt */
1390 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1391
1392 /* Turn on TDMA */
1393 ret = tdma_enable_set(priv, 1);
1394 if (ret)
1395 goto out_clear_rx_int;
1396
1397 /* Enable NAPI */
1398 napi_enable(&priv->napi);
1399
1400 /* Turn on UniMAC TX/RX */
1401 umac_enable_set(priv, 1);
1402
1403 phy_start(priv->phydev);
1404
1405 /* Enable TX interrupts for the 32 TXQs */
1406 intrl2_1_mask_clear(priv, 0xffffffff);
1407
1408 /* Last call before we start the real business */
1409 netif_tx_start_all_queues(dev);
1410
1411 return 0;
1412
1413out_clear_rx_int:
1414 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1415out_free_rx_ring:
1416 bcm_sysport_fini_rx_ring(priv);
1417out_free_tx_ring:
1418 for (i = 0; i < dev->num_tx_queues; i++)
1419 bcm_sysport_fini_tx_ring(priv, i);
1420 free_irq(priv->irq1, dev);
1421out_free_irq0:
1422 free_irq(priv->irq0, dev);
1423out_phy_disconnect:
1424 phy_disconnect(priv->phydev);
1425 return ret;
1426}
1427
1428static int bcm_sysport_stop(struct net_device *dev)
1429{
1430 struct bcm_sysport_priv *priv = netdev_priv(dev);
1431 unsigned int i;
1432 u32 reg;
1433 int ret;
1434
1435 /* stop all software from updating hardware */
1436 netif_tx_stop_all_queues(dev);
1437 napi_disable(&priv->napi);
1438 phy_stop(priv->phydev);
1439
1440 /* mask all interrupts */
1441 intrl2_0_mask_set(priv, 0xffffffff);
1442 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1443 intrl2_1_mask_set(priv, 0xffffffff);
1444 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1445
1446 /* Disable UniMAC RX */
1447 reg = umac_readl(priv, UMAC_CMD);
1448 reg &= ~CMD_RX_EN;
1449 umac_writel(priv, reg, UMAC_CMD);
1450
1451 ret = tdma_enable_set(priv, 0);
1452 if (ret) {
1453 netdev_err(dev, "timeout disabling RDMA\n");
1454 return ret;
1455 }
1456
1457 /* Wait for a maximum packet size to be drained */
1458 usleep_range(2000, 3000);
1459
1460 ret = rdma_enable_set(priv, 0);
1461 if (ret) {
1462 netdev_err(dev, "timeout disabling TDMA\n");
1463 return ret;
1464 }
1465
1466 /* Disable UniMAC TX */
1467 reg = umac_readl(priv, UMAC_CMD);
1468 reg &= ~CMD_TX_EN;
1469 umac_writel(priv, reg, UMAC_CMD);
1470
1471 /* Free RX/TX rings SW structures */
1472 for (i = 0; i < dev->num_tx_queues; i++)
1473 bcm_sysport_fini_tx_ring(priv, i);
1474 bcm_sysport_fini_rx_ring(priv);
1475
1476 free_irq(priv->irq0, dev);
1477 free_irq(priv->irq1, dev);
1478
1479 /* Disconnect from PHY */
1480 phy_disconnect(priv->phydev);
1481
1482 return 0;
1483}
1484
1485static struct ethtool_ops bcm_sysport_ethtool_ops = {
1486 .get_settings = bcm_sysport_get_settings,
1487 .set_settings = bcm_sysport_set_settings,
1488 .get_drvinfo = bcm_sysport_get_drvinfo,
1489 .get_msglevel = bcm_sysport_get_msglvl,
1490 .set_msglevel = bcm_sysport_set_msglvl,
1491 .get_link = ethtool_op_get_link,
1492 .get_strings = bcm_sysport_get_strings,
1493 .get_ethtool_stats = bcm_sysport_get_stats,
1494 .get_sset_count = bcm_sysport_get_sset_count,
1495};
1496
1497static const struct net_device_ops bcm_sysport_netdev_ops = {
1498 .ndo_start_xmit = bcm_sysport_xmit,
1499 .ndo_tx_timeout = bcm_sysport_tx_timeout,
1500 .ndo_open = bcm_sysport_open,
1501 .ndo_stop = bcm_sysport_stop,
1502 .ndo_set_features = bcm_sysport_set_features,
1503 .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
1504};
1505
1506#define REV_FMT "v%2x.%02x"
1507
1508static int bcm_sysport_probe(struct platform_device *pdev)
1509{
1510 struct bcm_sysport_priv *priv;
1511 struct device_node *dn;
1512 struct net_device *dev;
1513 const void *macaddr;
1514 struct resource *r;
1515 u32 txq, rxq;
1516 int ret;
1517
1518 dn = pdev->dev.of_node;
1519 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1520
1521 /* Read the Transmit/Receive Queue properties */
1522 if (of_property_read_u32(dn, "systemport,num-txq", &txq))
1523 txq = TDMA_NUM_RINGS;
1524 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
1525 rxq = 1;
1526
1527 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
1528 if (!dev)
1529 return -ENOMEM;
1530
1531 /* Initialize private members */
1532 priv = netdev_priv(dev);
1533
1534 priv->irq0 = platform_get_irq(pdev, 0);
1535 priv->irq1 = platform_get_irq(pdev, 1);
1536 if (priv->irq0 <= 0 || priv->irq1 <= 0) {
1537 dev_err(&pdev->dev, "invalid interrupts\n");
1538 ret = -EINVAL;
1539 goto err;
1540 }
1541
1542 priv->base = devm_ioremap_resource(&pdev->dev, r);
1543 if (IS_ERR(priv->base)) {
1544 ret = PTR_ERR(priv->base);
1545 goto err;
1546 }
1547
1548 priv->netdev = dev;
1549 priv->pdev = pdev;
1550
1551 priv->phy_interface = of_get_phy_mode(dn);
1552 /* Default to GMII interface mode */
1553 if (priv->phy_interface < 0)
1554 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
1555
1556 /* In the case of a fixed PHY, the DT node associated
1557 * to the PHY is the Ethernet MAC DT node.
1558 */
1559 if (of_phy_is_fixed_link(dn)) {
1560 ret = of_phy_register_fixed_link(dn);
1561 if (ret) {
1562 dev_err(&pdev->dev, "failed to register fixed PHY\n");
1563 goto err;
1564 }
1565
1566 priv->phy_dn = dn;
1567 }
1568
1569 /* Initialize netdevice members */
1570 macaddr = of_get_mac_address(dn);
1571 if (!macaddr || !is_valid_ether_addr(macaddr)) {
1572 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
1573 random_ether_addr(dev->dev_addr);
1574 } else {
1575 ether_addr_copy(dev->dev_addr, macaddr);
1576 }
1577
1578 SET_NETDEV_DEV(dev, &pdev->dev);
1579 dev_set_drvdata(&pdev->dev, dev);
1580 dev->ethtool_ops = &bcm_sysport_ethtool_ops;
1581 dev->netdev_ops = &bcm_sysport_netdev_ops;
1582 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
1583
1584 /* HW supported features, none enabled by default */
1585 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
1586 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1587
1588 /* Set the needed headroom once and for all */
1589 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
1590 dev->needed_headroom += sizeof(struct bcm_tsb);
1591
1592 /* We are interfaced to a switch which handles the multicast
1593 * filtering for us, so we do not support programming any
1594 * multicast hash table in this Ethernet MAC.
1595 */
1596 dev->flags &= ~IFF_MULTICAST;
1597
1598 /* libphy will adjust the link state accordingly */
1599 netif_carrier_off(dev);
1600
1601 ret = register_netdev(dev);
1602 if (ret) {
1603 dev_err(&pdev->dev, "failed to register net_device\n");
1604 goto err;
1605 }
1606
1607 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
1608 dev_info(&pdev->dev,
1609 "Broadcom SYSTEMPORT" REV_FMT
1610 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
1611 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
1612 priv->base, priv->irq0, priv->irq1, txq, rxq);
1613
1614 return 0;
1615err:
1616 free_netdev(dev);
1617 return ret;
1618}
1619
1620static int bcm_sysport_remove(struct platform_device *pdev)
1621{
1622 struct net_device *dev = dev_get_drvdata(&pdev->dev);
1623
1624 /* Not much to do, ndo_close has been called
1625 * and we use managed allocations
1626 */
1627 unregister_netdev(dev);
1628 free_netdev(dev);
1629 dev_set_drvdata(&pdev->dev, NULL);
1630
1631 return 0;
1632}
1633
1634static const struct of_device_id bcm_sysport_of_match[] = {
1635 { .compatible = "brcm,systemport-v1.00" },
1636 { .compatible = "brcm,systemport" },
1637 { /* sentinel */ }
1638};
1639
1640static struct platform_driver bcm_sysport_driver = {
1641 .probe = bcm_sysport_probe,
1642 .remove = bcm_sysport_remove,
1643 .driver = {
1644 .name = "brcm-systemport",
1645 .owner = THIS_MODULE,
1646 .of_match_table = bcm_sysport_of_match,
1647 },
1648};
1649module_platform_driver(bcm_sysport_driver);
1650
1651MODULE_AUTHOR("Broadcom Corporation");
1652MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
1653MODULE_ALIAS("platform:brcm-systemport");
1654MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
new file mode 100644
index 000000000000..281c08246037
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -0,0 +1,678 @@
1/*
2 * Broadcom BCM7xxx System Port Ethernet MAC driver
3 *
4 * Copyright (C) 2014 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __BCM_SYSPORT_H
12#define __BCM_SYSPORT_H
13
14#include <linux/if_vlan.h>
15
16/* Receive/transmit descriptor format */
17#define DESC_ADDR_HI_STATUS_LEN 0x00
18#define DESC_ADDR_HI_SHIFT 0
19#define DESC_ADDR_HI_MASK 0xff
20#define DESC_STATUS_SHIFT 8
21#define DESC_STATUS_MASK 0x3ff
22#define DESC_LEN_SHIFT 18
23#define DESC_LEN_MASK 0x7fff
24#define DESC_ADDR_LO 0x04
25
26/* HW supports 40-bit addressing hence the */
27#define DESC_SIZE (WORDS_PER_DESC * sizeof(u32))
28
29/* Default RX buffer allocation size */
30#define RX_BUF_LENGTH 2048
31
32/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(4) + FCS(4) = 1526.
33 * 1536 is multiple of 256 bytes
34 */
35#define ENET_BRCM_TAG_LEN 4
36#define ENET_PAD 10
37#define UMAC_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
38 ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
39
40/* Transmit status block */
41struct bcm_tsb {
42 u32 pcp_dei_vid;
43#define PCP_DEI_MASK 0xf
44#define VID_SHIFT 4
45#define VID_MASK 0xfff
46 u32 l4_ptr_dest_map;
47#define L4_CSUM_PTR_MASK 0x1ff
48#define L4_PTR_SHIFT 9
49#define L4_PTR_MASK 0x1ff
50#define L4_UDP (1 << 18)
51#define L4_LENGTH_VALID (1 << 19)
52#define DEST_MAP_SHIFT 20
53#define DEST_MAP_MASK 0x1ff
54};
55
56/* Receive status block uses the same
57 * definitions as the DMA descriptor
58 */
59struct bcm_rsb {
60 u32 rx_status_len;
61 u32 brcm_egress_tag;
62};
63
64/* Common Receive/Transmit status bits */
65#define DESC_L4_CSUM (1 << 7)
66#define DESC_SOP (1 << 8)
67#define DESC_EOP (1 << 9)
68
69/* Receive Status bits */
70#define RX_STATUS_UCAST 0
71#define RX_STATUS_BCAST 0x04
72#define RX_STATUS_MCAST 0x08
73#define RX_STATUS_L2_MCAST 0x0c
74#define RX_STATUS_ERR (1 << 4)
75#define RX_STATUS_OVFLOW (1 << 5)
76#define RX_STATUS_PARSE_FAIL (1 << 6)
77
78/* Transmit Status bits */
79#define TX_STATUS_VLAN_NO_ACT 0x00
80#define TX_STATUS_VLAN_PCP_TSB 0x01
81#define TX_STATUS_VLAN_QUEUE 0x02
82#define TX_STATUS_VLAN_VID_TSB 0x03
83#define TX_STATUS_OWR_CRC (1 << 2)
84#define TX_STATUS_APP_CRC (1 << 3)
85#define TX_STATUS_BRCM_TAG_NO_ACT 0
86#define TX_STATUS_BRCM_TAG_ZERO 0x10
87#define TX_STATUS_BRCM_TAG_ONE_QUEUE 0x20
88#define TX_STATUS_BRCM_TAG_ONE_TSB 0x30
89#define TX_STATUS_SKIP_BYTES (1 << 6)
90
91/* Specific register definitions */
92#define SYS_PORT_TOPCTRL_OFFSET 0
93#define REV_CNTL 0x00
94#define REV_MASK 0xffff
95
96#define RX_FLUSH_CNTL 0x04
97#define RX_FLUSH (1 << 0)
98
99#define TX_FLUSH_CNTL 0x08
100#define TX_FLUSH (1 << 0)
101
102#define MISC_CNTL 0x0c
103#define SYS_CLK_SEL (1 << 0)
104#define TDMA_EOP_SEL (1 << 1)
105
106/* Level-2 Interrupt controller offsets and defines */
107#define SYS_PORT_INTRL2_0_OFFSET 0x200
108#define SYS_PORT_INTRL2_1_OFFSET 0x240
109#define INTRL2_CPU_STATUS 0x00
110#define INTRL2_CPU_SET 0x04
111#define INTRL2_CPU_CLEAR 0x08
112#define INTRL2_CPU_MASK_STATUS 0x0c
113#define INTRL2_CPU_MASK_SET 0x10
114#define INTRL2_CPU_MASK_CLEAR 0x14
115
116/* Level-2 instance 0 interrupt bits */
117#define INTRL2_0_GISB_ERR (1 << 0)
118#define INTRL2_0_RBUF_OVFLOW (1 << 1)
119#define INTRL2_0_TBUF_UNDFLOW (1 << 2)
120#define INTRL2_0_MPD (1 << 3)
121#define INTRL2_0_BRCM_MATCH_TAG (1 << 4)
122#define INTRL2_0_RDMA_MBDONE (1 << 5)
123#define INTRL2_0_OVER_MAX_THRESH (1 << 6)
124#define INTRL2_0_BELOW_HYST_THRESH (1 << 7)
125#define INTRL2_0_FREE_LIST_EMPTY (1 << 8)
126#define INTRL2_0_TX_RING_FULL (1 << 9)
127#define INTRL2_0_DESC_ALLOC_ERR (1 << 10)
128#define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11)
129
130/* RXCHK offset and defines */
131#define SYS_PORT_RXCHK_OFFSET 0x300
132
133#define RXCHK_CONTROL 0x00
134#define RXCHK_EN (1 << 0)
135#define RXCHK_SKIP_FCS (1 << 1)
136#define RXCHK_BAD_CSUM_DIS (1 << 2)
137#define RXCHK_BRCM_TAG_EN (1 << 3)
138#define RXCHK_BRCM_TAG_MATCH_SHIFT 4
139#define RXCHK_BRCM_TAG_MATCH_MASK 0xff
140#define RXCHK_PARSE_TNL (1 << 12)
141#define RXCHK_VIOL_EN (1 << 13)
142#define RXCHK_VIOL_DIS (1 << 14)
143#define RXCHK_INCOM_PKT (1 << 15)
144#define RXCHK_V6_DUPEXT_EN (1 << 16)
145#define RXCHK_V6_DUPEXT_DIS (1 << 17)
146#define RXCHK_ETHERTYPE_DIS (1 << 18)
147#define RXCHK_L2_HDR_DIS (1 << 19)
148#define RXCHK_L3_HDR_DIS (1 << 20)
149#define RXCHK_MAC_RX_ERR_DIS (1 << 21)
150#define RXCHK_PARSE_AUTH (1 << 22)
151
152#define RXCHK_BRCM_TAG0 0x04
153#define RXCHK_BRCM_TAG(i) ((i) * RXCHK_BRCM_TAG0)
154#define RXCHK_BRCM_TAG0_MASK 0x24
155#define RXCHK_BRCM_TAG_MASK(i) ((i) * RXCHK_BRCM_TAG0_MASK)
156#define RXCHK_BRCM_TAG_MATCH_STATUS 0x44
157#define RXCHK_ETHERTYPE 0x48
158#define RXCHK_BAD_CSUM_CNTR 0x4C
159#define RXCHK_OTHER_DISC_CNTR 0x50
160
161/* TXCHCK offsets and defines */
162#define SYS_PORT_TXCHK_OFFSET 0x380
163#define TXCHK_PKT_RDY_THRESH 0x00
164
165/* Receive buffer offset and defines */
166#define SYS_PORT_RBUF_OFFSET 0x400
167
168#define RBUF_CONTROL 0x00
169#define RBUF_RSB_EN (1 << 0)
170#define RBUF_4B_ALGN (1 << 1)
171#define RBUF_BRCM_TAG_STRIP (1 << 2)
172#define RBUF_BAD_PKT_DISC (1 << 3)
173#define RBUF_RESUME_THRESH_SHIFT 4
174#define RBUF_RESUME_THRESH_MASK 0xff
175#define RBUF_OK_TO_SEND_SHIFT 12
176#define RBUF_OK_TO_SEND_MASK 0xff
177#define RBUF_CRC_REPLACE (1 << 20)
178#define RBUF_OK_TO_SEND_MODE (1 << 21)
179#define RBUF_RSB_SWAP (1 << 22)
180#define RBUF_ACPI_EN (1 << 23)
181
182#define RBUF_PKT_RDY_THRESH 0x04
183
184#define RBUF_STATUS 0x08
185#define RBUF_WOL_MODE (1 << 0)
186#define RBUF_MPD (1 << 1)
187#define RBUF_ACPI (1 << 2)
188
189#define RBUF_OVFL_DISC_CNTR 0x0c
190#define RBUF_ERR_PKT_CNTR 0x10
191
192/* Transmit buffer offset and defines */
193#define SYS_PORT_TBUF_OFFSET 0x600
194
195#define TBUF_CONTROL 0x00
196#define TBUF_BP_EN (1 << 0)
197#define TBUF_MAX_PKT_THRESH_SHIFT 1
198#define TBUF_MAX_PKT_THRESH_MASK 0x1f
199#define TBUF_FULL_THRESH_SHIFT 8
200#define TBUF_FULL_THRESH_MASK 0x1f
201
202/* UniMAC offset and defines */
203#define SYS_PORT_UMAC_OFFSET 0x800
204
205#define UMAC_CMD 0x008
206#define CMD_TX_EN (1 << 0)
207#define CMD_RX_EN (1 << 1)
208#define CMD_SPEED_SHIFT 2
209#define CMD_SPEED_10 0
210#define CMD_SPEED_100 1
211#define CMD_SPEED_1000 2
212#define CMD_SPEED_2500 3
213#define CMD_SPEED_MASK 3
214#define CMD_PROMISC (1 << 4)
215#define CMD_PAD_EN (1 << 5)
216#define CMD_CRC_FWD (1 << 6)
217#define CMD_PAUSE_FWD (1 << 7)
218#define CMD_RX_PAUSE_IGNORE (1 << 8)
219#define CMD_TX_ADDR_INS (1 << 9)
220#define CMD_HD_EN (1 << 10)
221#define CMD_SW_RESET (1 << 13)
222#define CMD_LCL_LOOP_EN (1 << 15)
223#define CMD_AUTO_CONFIG (1 << 22)
224#define CMD_CNTL_FRM_EN (1 << 23)
225#define CMD_NO_LEN_CHK (1 << 24)
226#define CMD_RMT_LOOP_EN (1 << 25)
227#define CMD_PRBL_EN (1 << 27)
228#define CMD_TX_PAUSE_IGNORE (1 << 28)
229#define CMD_TX_RX_EN (1 << 29)
230#define CMD_RUNT_FILTER_DIS (1 << 30)
231
232#define UMAC_MAC0 0x00c
233#define UMAC_MAC1 0x010
234#define UMAC_MAX_FRAME_LEN 0x014
235
236#define UMAC_TX_FLUSH 0x334
237
238#define UMAC_MIB_START 0x400
239
240/* There is a 0xC gap between the end of RX and beginning of TX stats and then
241 * between the end of TX stats and the beginning of the RX RUNT
242 */
243#define UMAC_MIB_STAT_OFFSET 0xc
244
245#define UMAC_MIB_CTRL 0x580
246#define MIB_RX_CNT_RST (1 << 0)
247#define MIB_RUNT_CNT_RST (1 << 1)
248#define MIB_TX_CNT_RST (1 << 2)
249#define UMAC_MDF_CTRL 0x650
250#define UMAC_MDF_ADDR 0x654
251
252/* Receive DMA offset and defines */
253#define SYS_PORT_RDMA_OFFSET 0x2000
254
255#define RDMA_CONTROL 0x1000
256#define RDMA_EN (1 << 0)
257#define RDMA_RING_CFG (1 << 1)
258#define RDMA_DISC_EN (1 << 2)
259#define RDMA_BUF_DATA_OFFSET_SHIFT 4
260#define RDMA_BUF_DATA_OFFSET_MASK 0x3ff
261
262#define RDMA_STATUS 0x1004
263#define RDMA_DISABLED (1 << 0)
264#define RDMA_DESC_RAM_INIT_BUSY (1 << 1)
265#define RDMA_BP_STATUS (1 << 2)
266
267#define RDMA_SCB_BURST_SIZE 0x1008
268
269#define RDMA_RING_BUF_SIZE 0x100c
270#define RDMA_RING_SIZE_SHIFT 16
271
272#define RDMA_WRITE_PTR_HI 0x1010
273#define RDMA_WRITE_PTR_LO 0x1014
274#define RDMA_PROD_INDEX 0x1018
275#define RDMA_PROD_INDEX_MASK 0xffff
276
277#define RDMA_CONS_INDEX 0x101c
278#define RDMA_CONS_INDEX_MASK 0xffff
279
280#define RDMA_START_ADDR_HI 0x1020
281#define RDMA_START_ADDR_LO 0x1024
282#define RDMA_END_ADDR_HI 0x1028
283#define RDMA_END_ADDR_LO 0x102c
284
285#define RDMA_MBDONE_INTR 0x1030
286#define RDMA_INTR_THRESH_MASK 0xff
287#define RDMA_TIMEOUT_SHIFT 16
288#define RDMA_TIMEOUT_MASK 0xffff
289
290#define RDMA_XON_XOFF_THRESH 0x1034
291#define RDMA_XON_XOFF_THRESH_MASK 0xffff
292#define RDMA_XOFF_THRESH_SHIFT 16
293
294#define RDMA_READ_PTR_HI 0x1038
295#define RDMA_READ_PTR_LO 0x103c
296
297#define RDMA_OVERRIDE 0x1040
298#define RDMA_LE_MODE (1 << 0)
299#define RDMA_REG_MODE (1 << 1)
300
301#define RDMA_TEST 0x1044
302#define RDMA_TP_OUT_SEL (1 << 0)
303#define RDMA_MEM_SEL (1 << 1)
304
305#define RDMA_DEBUG 0x1048
306
307/* Transmit DMA offset and defines */
308#define TDMA_NUM_RINGS 32 /* rings = queues */
309#define TDMA_PORT_SIZE DESC_SIZE /* two 32-bits words */
310
311#define SYS_PORT_TDMA_OFFSET 0x4000
312#define TDMA_WRITE_PORT_OFFSET 0x0000
313#define TDMA_WRITE_PORT_HI(i) (TDMA_WRITE_PORT_OFFSET + \
314 (i) * TDMA_PORT_SIZE)
315#define TDMA_WRITE_PORT_LO(i) (TDMA_WRITE_PORT_OFFSET + \
316 sizeof(u32) + (i) * TDMA_PORT_SIZE)
317
318#define TDMA_READ_PORT_OFFSET (TDMA_WRITE_PORT_OFFSET + \
319 (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
320#define TDMA_READ_PORT_HI(i) (TDMA_READ_PORT_OFFSET + \
321 (i) * TDMA_PORT_SIZE)
322#define TDMA_READ_PORT_LO(i) (TDMA_READ_PORT_OFFSET + \
323 sizeof(u32) + (i) * TDMA_PORT_SIZE)
324
325#define TDMA_READ_PORT_CMD_OFFSET (TDMA_READ_PORT_OFFSET + \
326 (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
327#define TDMA_READ_PORT_CMD(i) (TDMA_READ_PORT_CMD_OFFSET + \
328 (i) * sizeof(u32))
329
330#define TDMA_DESC_RING_00_BASE (TDMA_READ_PORT_CMD_OFFSET + \
331 (TDMA_NUM_RINGS * sizeof(u32)))
332
333/* Register offsets and defines relatives to a specific ring number */
334#define RING_HEAD_TAIL_PTR 0x00
335#define RING_HEAD_MASK 0x7ff
336#define RING_TAIL_SHIFT 11
337#define RING_TAIL_MASK 0x7ff
338#define RING_FLUSH (1 << 24)
339#define RING_EN (1 << 25)
340
341#define RING_COUNT 0x04
342#define RING_COUNT_MASK 0x7ff
343#define RING_BUFF_DONE_SHIFT 11
344#define RING_BUFF_DONE_MASK 0x7ff
345
346#define RING_MAX_HYST 0x08
347#define RING_MAX_THRESH_MASK 0x7ff
348#define RING_HYST_THRESH_SHIFT 11
349#define RING_HYST_THRESH_MASK 0x7ff
350
351#define RING_INTR_CONTROL 0x0c
352#define RING_INTR_THRESH_MASK 0x7ff
353#define RING_EMPTY_INTR_EN (1 << 15)
354#define RING_TIMEOUT_SHIFT 16
355#define RING_TIMEOUT_MASK 0xffff
356
357#define RING_PROD_CONS_INDEX 0x10
358#define RING_PROD_INDEX_MASK 0xffff
359#define RING_CONS_INDEX_SHIFT 16
360#define RING_CONS_INDEX_MASK 0xffff
361
362#define RING_MAPPING 0x14
363#define RING_QID_MASK 0x3
364#define RING_PORT_ID_SHIFT 3
365#define RING_PORT_ID_MASK 0x7
366#define RING_IGNORE_STATUS (1 << 6)
367#define RING_FAILOVER_EN (1 << 7)
368#define RING_CREDIT_SHIFT 8
369#define RING_CREDIT_MASK 0xffff
370
371#define RING_PCP_DEI_VID 0x18
372#define RING_VID_MASK 0x7ff
373#define RING_DEI (1 << 12)
374#define RING_PCP_SHIFT 13
375#define RING_PCP_MASK 0x7
376#define RING_PKT_SIZE_ADJ_SHIFT 16
377#define RING_PKT_SIZE_ADJ_MASK 0xf
378
379#define TDMA_DESC_RING_SIZE 28
380
381/* Defininition for a given TX ring base address */
382#define TDMA_DESC_RING_BASE(i) (TDMA_DESC_RING_00_BASE + \
383 ((i) * TDMA_DESC_RING_SIZE))
384
385/* Ring indexed register addreses */
386#define TDMA_DESC_RING_HEAD_TAIL_PTR(i) (TDMA_DESC_RING_BASE(i) + \
387 RING_HEAD_TAIL_PTR)
388#define TDMA_DESC_RING_COUNT(i) (TDMA_DESC_RING_BASE(i) + \
389 RING_COUNT)
390#define TDMA_DESC_RING_MAX_HYST(i) (TDMA_DESC_RING_BASE(i) + \
391 RING_MAX_HYST)
392#define TDMA_DESC_RING_INTR_CONTROL(i) (TDMA_DESC_RING_BASE(i) + \
393 RING_INTR_CONTROL)
394#define TDMA_DESC_RING_PROD_CONS_INDEX(i) \
395 (TDMA_DESC_RING_BASE(i) + \
396 RING_PROD_CONS_INDEX)
397#define TDMA_DESC_RING_MAPPING(i) (TDMA_DESC_RING_BASE(i) + \
398 RING_MAPPING)
399#define TDMA_DESC_RING_PCP_DEI_VID(i) (TDMA_DESC_RING_BASE(i) + \
400 RING_PCP_DEI_VID)
401
402#define TDMA_CONTROL 0x600
403#define TDMA_EN (1 << 0)
404#define TSB_EN (1 << 1)
405#define TSB_SWAP (1 << 2)
406#define ACB_ALGO (1 << 3)
407#define BUF_DATA_OFFSET_SHIFT 4
408#define BUF_DATA_OFFSET_MASK 0x3ff
409#define VLAN_EN (1 << 14)
410#define SW_BRCM_TAG (1 << 15)
411#define WNC_KPT_SIZE_UPDATE (1 << 16)
412#define SYNC_PKT_SIZE (1 << 17)
413#define ACH_TXDONE_DELAY_SHIFT 18
414#define ACH_TXDONE_DELAY_MASK 0xff
415
416#define TDMA_STATUS 0x604
417#define TDMA_DISABLED (1 << 0)
418#define TDMA_LL_RAM_INIT_BUSY (1 << 1)
419
420#define TDMA_SCB_BURST_SIZE 0x608
421#define TDMA_OVER_MAX_THRESH_STATUS 0x60c
422#define TDMA_OVER_HYST_THRESH_STATUS 0x610
423#define TDMA_TPID 0x614
424
425#define TDMA_FREE_LIST_HEAD_TAIL_PTR 0x618
426#define TDMA_FREE_HEAD_MASK 0x7ff
427#define TDMA_FREE_TAIL_SHIFT 11
428#define TDMA_FREE_TAIL_MASK 0x7ff
429
430#define TDMA_FREE_LIST_COUNT 0x61c
431#define TDMA_FREE_LIST_COUNT_MASK 0x7ff
432
433#define TDMA_TIER2_ARB_CTRL 0x620
434#define TDMA_ARB_MODE_RR 0
435#define TDMA_ARB_MODE_WEIGHT_RR 0x1
436#define TDMA_ARB_MODE_STRICT 0x2
437#define TDMA_ARB_MODE_DEFICIT_RR 0x3
438#define TDMA_CREDIT_SHIFT 4
439#define TDMA_CREDIT_MASK 0xffff
440
441#define TDMA_TIER1_ARB_0_CTRL 0x624
442#define TDMA_ARB_EN (1 << 0)
443
444#define TDMA_TIER1_ARB_0_QUEUE_EN 0x628
445#define TDMA_TIER1_ARB_1_CTRL 0x62c
446#define TDMA_TIER1_ARB_1_QUEUE_EN 0x630
447#define TDMA_TIER1_ARB_2_CTRL 0x634
448#define TDMA_TIER1_ARB_2_QUEUE_EN 0x638
449#define TDMA_TIER1_ARB_3_CTRL 0x63c
450#define TDMA_TIER1_ARB_3_QUEUE_EN 0x640
451
452#define TDMA_SCB_ENDIAN_OVERRIDE 0x644
453#define TDMA_LE_MODE (1 << 0)
454#define TDMA_REG_MODE (1 << 1)
455
456#define TDMA_TEST 0x648
457#define TDMA_TP_OUT_SEL (1 << 0)
458#define TDMA_MEM_TM (1 << 1)
459
460#define TDMA_DEBUG 0x64c
461
462/* Transmit/Receive descriptor */
463struct dma_desc {
464 u32 addr_status_len;
465 u32 addr_lo;
466};
467
468/* Number of Receive hardware descriptor words */
469#define NUM_HW_RX_DESC_WORDS 1024
470/* Real number of usable descriptors */
471#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
472
473/* Internal linked-list RAM has up to 1536 entries */
474#define NUM_TX_DESC 1536
475
476#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32))
477
478/* Rx/Tx common counter group.*/
479struct bcm_sysport_pkt_counters {
480 u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
481 u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
482 u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
483 u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
484 u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
485 u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
486 u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
487 u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
488 u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
489 u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
490};
491
492/* RSV, Receive Status Vector */
493struct bcm_sysport_rx_counters {
494 struct bcm_sysport_pkt_counters pkt_cnt;
495 u32 pkt; /* RO (0x428) Received pkt count*/
496 u32 bytes; /* RO Received byte count */
497 u32 mca; /* RO # of Received multicast pkt */
498 u32 bca; /* RO # of Receive broadcast pkt */
499 u32 fcs; /* RO # of Received FCS error */
500 u32 cf; /* RO # of Received control frame pkt*/
501 u32 pf; /* RO # of Received pause frame pkt */
502 u32 uo; /* RO # of unknown op code pkt */
503 u32 aln; /* RO # of alignment error count */
504 u32 flr; /* RO # of frame length out of range count */
505 u32 cde; /* RO # of code error pkt */
506 u32 fcr; /* RO # of carrier sense error pkt */
507 u32 ovr; /* RO # of oversize pkt*/
508 u32 jbr; /* RO # of jabber count */
509 u32 mtue; /* RO # of MTU error pkt*/
510 u32 pok; /* RO # of Received good pkt */
511 u32 uc; /* RO # of unicast pkt */
512 u32 ppp; /* RO # of PPP pkt */
513 u32 rcrc; /* RO (0x470),# of CRC match pkt */
514};
515
516/* TSV, Transmit Status Vector */
517struct bcm_sysport_tx_counters {
518 struct bcm_sysport_pkt_counters pkt_cnt;
519 u32 pkts; /* RO (0x4a8) Transmited pkt */
520 u32 mca; /* RO # of xmited multicast pkt */
521 u32 bca; /* RO # of xmited broadcast pkt */
522 u32 pf; /* RO # of xmited pause frame count */
523 u32 cf; /* RO # of xmited control frame count */
524 u32 fcs; /* RO # of xmited FCS error count */
525 u32 ovr; /* RO # of xmited oversize pkt */
526 u32 drf; /* RO # of xmited deferral pkt */
527 u32 edf; /* RO # of xmited Excessive deferral pkt*/
528 u32 scl; /* RO # of xmited single collision pkt */
529 u32 mcl; /* RO # of xmited multiple collision pkt*/
530 u32 lcl; /* RO # of xmited late collision pkt */
531 u32 ecl; /* RO # of xmited excessive collision pkt*/
532 u32 frg; /* RO # of xmited fragments pkt*/
533 u32 ncl; /* RO # of xmited total collision count */
534 u32 jbr; /* RO # of xmited jabber count*/
535 u32 bytes; /* RO # of xmited byte count */
536 u32 pok; /* RO # of xmited good pkt */
537 u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
538};
539
540struct bcm_sysport_mib {
541 struct bcm_sysport_rx_counters rx;
542 struct bcm_sysport_tx_counters tx;
543 u32 rx_runt_cnt;
544 u32 rx_runt_fcs;
545 u32 rx_runt_fcs_align;
546 u32 rx_runt_bytes;
547 u32 rxchk_bad_csum;
548 u32 rxchk_other_pkt_disc;
549 u32 rbuf_ovflow_cnt;
550 u32 rbuf_err_cnt;
551};
552
553/* HW maintains a large list of counters */
554enum bcm_sysport_stat_type {
555 BCM_SYSPORT_STAT_NETDEV = -1,
556 BCM_SYSPORT_STAT_MIB_RX,
557 BCM_SYSPORT_STAT_MIB_TX,
558 BCM_SYSPORT_STAT_RUNT,
559 BCM_SYSPORT_STAT_RXCHK,
560 BCM_SYSPORT_STAT_RBUF,
561};
562
563/* Macros to help define ethtool statistics */
564#define STAT_NETDEV(m) { \
565 .stat_string = __stringify(m), \
566 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
567 .stat_offset = offsetof(struct net_device_stats, m), \
568 .type = BCM_SYSPORT_STAT_NETDEV, \
569}
570
571#define STAT_MIB(str, m, _type) { \
572 .stat_string = str, \
573 .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
574 .stat_offset = offsetof(struct bcm_sysport_priv, m), \
575 .type = _type, \
576}
577
578#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
579#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
580#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
581
582#define STAT_RXCHK(str, m, ofs) { \
583 .stat_string = str, \
584 .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
585 .stat_offset = offsetof(struct bcm_sysport_priv, m), \
586 .type = BCM_SYSPORT_STAT_RXCHK, \
587 .reg_offset = ofs, \
588}
589
590#define STAT_RBUF(str, m, ofs) { \
591 .stat_string = str, \
592 .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
593 .stat_offset = offsetof(struct bcm_sysport_priv, m), \
594 .type = BCM_SYSPORT_STAT_RBUF, \
595 .reg_offset = ofs, \
596}
597
598struct bcm_sysport_stats {
599 char stat_string[ETH_GSTRING_LEN];
600 int stat_sizeof;
601 int stat_offset;
602 enum bcm_sysport_stat_type type;
603 /* reg offset from UMAC base for misc counters */
604 u16 reg_offset;
605};
606
607/* Software house keeping helper structure */
608struct bcm_sysport_cb {
609 struct sk_buff *skb; /* SKB for RX packets */
610 void __iomem *bd_addr; /* Buffer descriptor PHYS addr */
611
612 DEFINE_DMA_UNMAP_ADDR(dma_addr);
613 DEFINE_DMA_UNMAP_LEN(dma_len);
614};
615
616/* Software view of the TX ring */
617struct bcm_sysport_tx_ring {
618 spinlock_t lock; /* Ring lock for tx reclaim/xmit */
619 struct napi_struct napi; /* NAPI per tx queue */
620 dma_addr_t desc_dma; /* DMA cookie */
621 unsigned int index; /* Ring index */
622 unsigned int size; /* Ring current size */
623 unsigned int alloc_size; /* Ring one-time allocated size */
624 unsigned int desc_count; /* Number of descriptors */
625 unsigned int curr_desc; /* Current descriptor */
626 unsigned int c_index; /* Last consumer index */
627 unsigned int p_index; /* Current producer index */
628 struct bcm_sysport_cb *cbs; /* Transmit control blocks */
629 struct dma_desc *desc_cpu; /* CPU view of the descriptor */
630 struct bcm_sysport_priv *priv; /* private context backpointer */
631};
632
633/* Driver private structure */
634struct bcm_sysport_priv {
635 void __iomem *base;
636 u32 irq0_stat;
637 u32 irq0_mask;
638 u32 irq1_stat;
639 u32 irq1_mask;
640 struct napi_struct napi ____cacheline_aligned;
641 struct net_device *netdev;
642 struct platform_device *pdev;
643 int irq0;
644 int irq1;
645
646 /* Transmit rings */
647 struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
648
649 /* Receive queue */
650 void __iomem *rx_bds;
651 void __iomem *rx_bd_assign_ptr;
652 unsigned int rx_bd_assign_index;
653 struct bcm_sysport_cb *rx_cbs;
654 unsigned int num_rx_bds;
655 unsigned int rx_read_ptr;
656 unsigned int rx_c_index;
657
658 /* PHY device */
659 struct device_node *phy_dn;
660 struct phy_device *phydev;
661 phy_interface_t phy_interface;
662 int old_pause;
663 int old_link;
664 int old_duplex;
665
666 /* Misc fields */
667 unsigned int rx_csum_en:1;
668 unsigned int tsb_en:1;
669 unsigned int crc_fwd:1;
670 u16 rev;
671
672 /* MIB related fields */
673 struct bcm_sysport_mib mib;
674
675 /* Ethtool */
676 u32 msg_enable;
677};
678#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 0297a79a38e1..05c6af6c418f 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1436,7 +1436,7 @@ static int bgmac_probe(struct bcma_device *core)
1436 return -ENOMEM; 1436 return -ENOMEM;
1437 net_dev->netdev_ops = &bgmac_netdev_ops; 1437 net_dev->netdev_ops = &bgmac_netdev_ops;
1438 net_dev->irq = core->irq; 1438 net_dev->irq = core->irq;
1439 SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops); 1439 net_dev->ethtool_ops = &bgmac_ethtool_ops;
1440 bgmac = netdev_priv(net_dev); 1440 bgmac = netdev_priv(net_dev);
1441 bgmac->net_dev = net_dev; 1441 bgmac->net_dev = net_dev;
1442 bgmac->core = core; 1442 bgmac->core = core;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 0ab83708b6a1..67d2b0047371 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6916,8 +6916,8 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6916 } 6916 }
6917 } 6917 }
6918 else { 6918 else {
6919 ethtool_cmd_speed_set(cmd, -1); 6919 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
6920 cmd->duplex = -1; 6920 cmd->duplex = DUPLEX_UNKNOWN;
6921 } 6921 }
6922 spin_unlock_bh(&bp->phy_lock); 6922 spin_unlock_bh(&bp->phy_lock);
6923 6923
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 4d8f8aba0ea5..4cab09d3f807 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 */ 12 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index dd57c7c5a3da..47c5814114e1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
@@ -906,6 +906,18 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
906 bd_prod = RX_BD(bd_prod); 906 bd_prod = RX_BD(bd_prod);
907 bd_cons = RX_BD(bd_cons); 907 bd_cons = RX_BD(bd_cons);
908 908
909 /* A rmb() is required to ensure that the CQE is not read
910 * before it is written by the adapter DMA. PCI ordering
911 * rules will make sure the other fields are written before
912 * the marker at the end of struct eth_fast_path_rx_cqe
913 * but without rmb() a weakly ordered processor can process
914 * stale data. Without the barrier TPA state-machine might
915 * enter inconsistent state and kernel stack might be
916 * provided with incorrect packet description - these lead
917 * to various kernel crashed.
918 */
919 rmb();
920
909 cqe_fp_flags = cqe_fp->type_error_flags; 921 cqe_fp_flags = cqe_fp->type_error_flags;
910 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 922 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
911 923
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 3448cc033ca5..571427c7226b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 97ea5421dd96..51a952c51cb1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -12,7 +12,7 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Dmitry Kravkov 16 * Written by: Dmitry Kravkov
17 * 17 *
18 */ 18 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 804b8f64463e..c6939ecb02c5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -12,7 +12,7 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Dmitry Kravkov 16 * Written by: Dmitry Kravkov
17 * 17 *
18 */ 18 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index b6de05e3149b..bd0600cf7266 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
@@ -3316,7 +3316,7 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
3316 return T_ETH_INDIRECTION_TABLE_SIZE; 3316 return T_ETH_INDIRECTION_TABLE_SIZE;
3317} 3317}
3318 3318
3319static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) 3319static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
3320{ 3320{
3321 struct bnx2x *bp = netdev_priv(dev); 3321 struct bnx2x *bp = netdev_priv(dev);
3322 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; 3322 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
@@ -3340,14 +3340,15 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
3340 return 0; 3340 return 0;
3341} 3341}
3342 3342
3343static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) 3343static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
3344 const u8 *key)
3344{ 3345{
3345 struct bnx2x *bp = netdev_priv(dev); 3346 struct bnx2x *bp = netdev_priv(dev);
3346 size_t i; 3347 size_t i;
3347 3348
3348 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 3349 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
3349 /* 3350 /*
3350 * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy() 3351 * The same as in bnx2x_get_rxfh: we can't use a memcpy()
3351 * as an internal storage of an indirection table is a u8 array 3352 * as an internal storage of an indirection table is a u8 array
3352 * while indir->ring_index points to an array of u32. 3353 * while indir->ring_index points to an array of u32.
3353 * 3354 *
@@ -3471,8 +3472,8 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
3471 .get_rxnfc = bnx2x_get_rxnfc, 3472 .get_rxnfc = bnx2x_get_rxnfc,
3472 .set_rxnfc = bnx2x_set_rxnfc, 3473 .set_rxnfc = bnx2x_set_rxnfc,
3473 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, 3474 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
3474 .get_rxfh_indir = bnx2x_get_rxfh_indir, 3475 .get_rxfh = bnx2x_get_rxfh,
3475 .set_rxfh_indir = bnx2x_set_rxfh_indir, 3476 .set_rxfh = bnx2x_set_rxfh,
3476 .get_channels = bnx2x_get_channels, 3477 .get_channels = bnx2x_get_channels,
3477 .set_channels = bnx2x_set_channels, 3478 .set_channels = bnx2x_set_channels,
3478 .get_module_info = bnx2x_get_module_info, 3479 .get_module_info = bnx2x_get_module_info,
@@ -3498,16 +3499,14 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
3498 .get_rxnfc = bnx2x_get_rxnfc, 3499 .get_rxnfc = bnx2x_get_rxnfc,
3499 .set_rxnfc = bnx2x_set_rxnfc, 3500 .set_rxnfc = bnx2x_set_rxnfc,
3500 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, 3501 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
3501 .get_rxfh_indir = bnx2x_get_rxfh_indir, 3502 .get_rxfh = bnx2x_get_rxfh,
3502 .set_rxfh_indir = bnx2x_set_rxfh_indir, 3503 .set_rxfh = bnx2x_set_rxfh,
3503 .get_channels = bnx2x_get_channels, 3504 .get_channels = bnx2x_get_channels,
3504 .set_channels = bnx2x_set_channels, 3505 .set_channels = bnx2x_set_channels,
3505}; 3506};
3506 3507
3507void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev) 3508void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
3508{ 3509{
3509 if (IS_PF(bp)) 3510 netdev->ethtool_ops = (IS_PF(bp)) ?
3510 SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops); 3511 &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;
3511 else /* vf */
3512 SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
3513} 3512}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index f572ae164fce..8aafd9b5d6a2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -6,8 +6,8 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Vladislav Zolotarov <vladz@broadcom.com> 10 * Written by: Vladislav Zolotarov
11 * Based on the original idea of John Wright <john.wright@hp.com>. 11 * Based on the original idea of John Wright <john.wright@hp.com>.
12 */ 12 */
13 13
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index c2dfea7968f4..bd90e50bd8e6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -7,9 +7,9 @@
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation. 8 * the Free Software Foundation.
9 * 9 *
10 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
11 * Written by: Eliezer Tamir 11 * Written by: Eliezer Tamir
12 * Modified by: Vladislav Zolotarov <vladz@broadcom.com> 12 * Modified by: Vladislav Zolotarov
13 */ 13 */
14 14
15#ifndef BNX2X_INIT_H 15#ifndef BNX2X_INIT_H
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 8ab0dd900960..5669ed2e87d0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -8,8 +8,8 @@
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 * 10 *
11 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Vladislav Zolotarov <vladz@broadcom.com> 12 * Written by: Vladislav Zolotarov
13 */ 13 */
14 14
15#ifndef BNX2X_INIT_OPS_H 15#ifndef BNX2X_INIT_OPS_H
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9b6b3d7304b6..53fb4fa61b40 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -2218,7 +2218,6 @@ int bnx2x_update_pfc(struct link_params *params,
2218 */ 2218 */
2219 u32 val; 2219 u32 val;
2220 struct bnx2x *bp = params->bp; 2220 struct bnx2x *bp = params->bp;
2221 int bnx2x_status = 0;
2222 u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC); 2221 u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
2223 2222
2224 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) 2223 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
@@ -2232,7 +2231,7 @@ int bnx2x_update_pfc(struct link_params *params,
2232 bnx2x_update_pfc_nig(params, vars, pfc_params); 2231 bnx2x_update_pfc_nig(params, vars, pfc_params);
2233 2232
2234 if (!vars->link_up) 2233 if (!vars->link_up)
2235 return bnx2x_status; 2234 return 0;
2236 2235
2237 DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n"); 2236 DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
2238 2237
@@ -2246,7 +2245,7 @@ int bnx2x_update_pfc(struct link_params *params,
2246 == 0) { 2245 == 0) {
2247 DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n"); 2246 DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
2248 bnx2x_emac_enable(params, vars, 0); 2247 bnx2x_emac_enable(params, vars, 0);
2249 return bnx2x_status; 2248 return 0;
2250 } 2249 }
2251 if (CHIP_IS_E2(bp)) 2250 if (CHIP_IS_E2(bp))
2252 bnx2x_update_pfc_bmac2(params, vars, bmac_loopback); 2251 bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
@@ -2260,7 +2259,7 @@ int bnx2x_update_pfc(struct link_params *params,
2260 val = 1; 2259 val = 1;
2261 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val); 2260 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
2262 } 2261 }
2263 return bnx2x_status; 2262 return 0;
2264} 2263}
2265 2264
2266static int bnx2x_bmac1_enable(struct link_params *params, 2265static int bnx2x_bmac1_enable(struct link_params *params,
@@ -3703,7 +3702,8 @@ static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
3703static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, 3702static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3704 struct link_params *params, 3703 struct link_params *params,
3705 struct link_vars *vars) { 3704 struct link_vars *vars) {
3706 u16 lane, i, cl72_ctrl, an_adv = 0; 3705 u16 lane, i, cl72_ctrl, an_adv = 0, val;
3706 u32 wc_lane_config;
3707 struct bnx2x *bp = params->bp; 3707 struct bnx2x *bp = params->bp;
3708 static struct bnx2x_reg_set reg_set[] = { 3708 static struct bnx2x_reg_set reg_set[] = {
3709 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3709 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3822,15 +3822,27 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3822 /* Enable Auto-Detect to support 1G over CL37 as well */ 3822 /* Enable Auto-Detect to support 1G over CL37 as well */
3823 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3823 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3824 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10); 3824 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
3825 3825 wc_lane_config = REG_RD(bp, params->shmem_base +
3826 offsetof(struct shmem_region, dev_info.
3827 shared_hw_config.wc_lane_config));
3828 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3829 MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val);
3826 /* Force cl48 sync_status LOW to avoid getting stuck in CL73 3830 /* Force cl48 sync_status LOW to avoid getting stuck in CL73
3827 * parallel-detect loop when CL73 and CL37 are enabled. 3831 * parallel-detect loop when CL73 and CL37 are enabled.
3828 */ 3832 */
3829 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 3833 val |= 1 << 11;
3830 MDIO_AER_BLOCK_AER_REG, 0); 3834
3835 /* Restore Polarity settings in case it was run over by
3836 * previous link owner
3837 */
3838 if (wc_lane_config &
3839 (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane))
3840 val |= 3 << 2;
3841 else
3842 val &= ~(3 << 2);
3831 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3843 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3832 MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800); 3844 MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4),
3833 bnx2x_set_aer_mmd(params, phy); 3845 val);
3834 3846
3835 bnx2x_disable_kr2(params, vars, phy); 3847 bnx2x_disable_kr2(params, vars, phy);
3836 } 3848 }
@@ -6473,7 +6485,6 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
6473static int bnx2x_link_initialize(struct link_params *params, 6485static int bnx2x_link_initialize(struct link_params *params,
6474 struct link_vars *vars) 6486 struct link_vars *vars)
6475{ 6487{
6476 int rc = 0;
6477 u8 phy_index, non_ext_phy; 6488 u8 phy_index, non_ext_phy;
6478 struct bnx2x *bp = params->bp; 6489 struct bnx2x *bp = params->bp;
6479 /* In case of external phy existence, the line speed would be the 6490 /* In case of external phy existence, the line speed would be the
@@ -6546,7 +6557,7 @@ static int bnx2x_link_initialize(struct link_params *params,
6546 NIG_STATUS_XGXS0_LINK_STATUS | 6557 NIG_STATUS_XGXS0_LINK_STATUS |
6547 NIG_STATUS_SERDES0_LINK_STATUS | 6558 NIG_STATUS_SERDES0_LINK_STATUS |
6548 NIG_MASK_MI_INT)); 6559 NIG_MASK_MI_INT));
6549 return rc; 6560 return 0;
6550} 6561}
6551 6562
6552static void bnx2x_int_link_reset(struct bnx2x_phy *phy, 6563static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
@@ -12461,6 +12472,7 @@ static int bnx2x_avoid_link_flap(struct link_params *params,
12461 u32 dont_clear_stat, lfa_sts; 12472 u32 dont_clear_stat, lfa_sts;
12462 struct bnx2x *bp = params->bp; 12473 struct bnx2x *bp = params->bp;
12463 12474
12475 bnx2x_set_mdio_emac_per_phy(bp, params);
12464 /* Sync the link parameters */ 12476 /* Sync the link parameters */
12465 bnx2x_link_status_update(params, vars); 12477 bnx2x_link_status_update(params, vars);
12466 12478
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 3a8e51ed5bec..2887034523e0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
@@ -10053,6 +10053,24 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10053#define BCM_5710_UNDI_FW_MF_VERS (0x05) 10053#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10054#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4)) 10054#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4)) 10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
10056
10057static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10058{
10059 /* UNDI marks its presence in DORQ -
10060 * it initializes CID offset for normal bell to 0x7
10061 */
10062 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10063 MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10064 return false;
10065
10066 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10067 BNX2X_DEV_INFO("UNDI previously loaded\n");
10068 return true;
10069 }
10070
10071 return false;
10072}
10073
10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) 10074static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
10057{ 10075{
10058 u8 major, minor, version; 10076 u8 major, minor, version;
@@ -10302,6 +10320,10 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10302 10320
10303 BNX2X_DEV_INFO("Path is unmarked\n"); 10321 BNX2X_DEV_INFO("Path is unmarked\n");
10304 10322
10323 /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
10324 if (bnx2x_prev_is_after_undi(bp))
10325 goto out;
10326
10305 /* If function has FLR capabilities, and existing FW version matches 10327 /* If function has FLR capabilities, and existing FW version matches
10306 * the one required, then FLR will be sufficient to clean any residue 10328 * the one required, then FLR will be sufficient to clean any residue
10307 * left by previous driver 10329 * left by previous driver
@@ -10322,6 +10344,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10322 10344
10323 BNX2X_DEV_INFO("Could not FLR\n"); 10345 BNX2X_DEV_INFO("Could not FLR\n");
10324 10346
10347out:
10325 /* Close the MCP request, return failure*/ 10348 /* Close the MCP request, return failure*/
10326 rc = bnx2x_prev_mcp_done(bp); 10349 rc = bnx2x_prev_mcp_done(bp);
10327 if (!rc) 10350 if (!rc)
@@ -10360,19 +10383,13 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10360 /* close LLH filters towards the BRB */ 10383 /* close LLH filters towards the BRB */
10361 bnx2x_set_rx_filter(&bp->link_params, 0); 10384 bnx2x_set_rx_filter(&bp->link_params, 0);
10362 10385
10363 /* Check if the UNDI driver was previously loaded 10386 /* Check if the UNDI driver was previously loaded */
10364 * UNDI driver initializes CID offset for normal bell to 0x7 10387 if (bnx2x_prev_is_after_undi(bp)) {
10365 */ 10388 prev_undi = true;
10366 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 10389 /* clear the UNDI indication */
10367 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 10390 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10368 if (tmp_reg == 0x7) { 10391 /* clear possible idle check errors */
10369 BNX2X_DEV_INFO("UNDI previously loaded\n"); 10392 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10370 prev_undi = true;
10371 /* clear the UNDI indication */
10372 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10373 /* clear possible idle check errors */
10374 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10375 }
10376 } 10393 }
10377 if (!CHIP_IS_E1x(bp)) 10394 if (!CHIP_IS_E1x(bp))
10378 /* block FW from writing to host */ 10395 /* block FW from writing to host */
@@ -13283,8 +13300,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13283 netdev_reset_tc(bp->dev); 13300 netdev_reset_tc(bp->dev);
13284 13301
13285 del_timer_sync(&bp->timer); 13302 del_timer_sync(&bp->timer);
13286 cancel_delayed_work(&bp->sp_task); 13303 cancel_delayed_work_sync(&bp->sp_task);
13287 cancel_delayed_work(&bp->period_task); 13304 cancel_delayed_work_sync(&bp->period_task);
13288 13305
13289 spin_lock_bh(&bp->stats_lock); 13306 spin_lock_bh(&bp->stats_lock);
13290 bp->stats_state = STATS_STATE_DISABLED; 13307 bp->stats_state = STATS_STATE_DISABLED;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index d725317c4277..b1936044767a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -12,7 +12,7 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Vladislav Zolotarov 16 * Written by: Vladislav Zolotarov
17 * 17 *
18 */ 18 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 80f6c790ed88..718ecd294661 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -12,7 +12,7 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Vladislav Zolotarov 16 * Written by: Vladislav Zolotarov
17 * 17 *
18 */ 18 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index faf01488d26e..eda8583f6fc0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -12,9 +12,9 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 16 * Written by: Shmulik Ravid
17 * Ariel Elior <ariele@broadcom.com> 17 * Ariel Elior <ariel.elior@qlogic.com>
18 * 18 *
19 */ 19 */
20#include "bnx2x.h" 20#include "bnx2x.h"
@@ -1071,8 +1071,10 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
1071 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1071 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1072 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1072 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1073 1073
1074 /* set the VF doorbell threshold */ 1074 /* set the VF doorbell threshold. This threshold represents the amount
1075 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1075 * of doorbells allowed in the main DORQ fifo for a specific VF.
1076 */
1077 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
1076} 1078}
1077 1079
1078void bnx2x_iov_init_dmae(struct bnx2x *bp) 1080void bnx2x_iov_init_dmae(struct bnx2x *bp)
@@ -2576,7 +2578,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
2576 2578
2577 ivi->vf = vfidx; 2579 ivi->vf = vfidx;
2578 ivi->qos = 0; 2580 ivi->qos = 0;
2579 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ 2581 ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
2582 ivi->min_tx_rate = 0;
2580 ivi->spoofchk = 1; /*always enabled */ 2583 ivi->spoofchk = 1; /*always enabled */
2581 if (vf->state == VF_ENABLED) { 2584 if (vf->state == VF_ENABLED) {
2582 /* mac and vlan are in vlan_mac objects */ 2585 /* mac and vlan are in vlan_mac objects */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 6929adba52f9..96c575e147a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -12,9 +12,9 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 16 * Written by: Shmulik Ravid
17 * Ariel Elior <ariele@broadcom.com> 17 * Ariel Elior <ariel.elior@qlogic.com>
18 */ 18 */
19#ifndef BNX2X_SRIOV_H 19#ifndef BNX2X_SRIOV_H
20#define BNX2X_SRIOV_H 20#define BNX2X_SRIOV_H
@@ -571,7 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
571 return NULL; 571 return NULL;
572} 572}
573 573
574static inline void bnx2x_vf_pci_dealloc(struct bnx2 *bp) {return 0; } 574static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {}
575static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } 575static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
576static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} 576static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
577static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } 577static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 3b75070411aa..ca47665f94bf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index f35845006cdd..2beceaefdeea 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 784c7155b98a..d712d0ddd719 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -12,9 +12,9 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 16 * Written by: Shmulik Ravid
17 * Ariel Elior <ariele@broadcom.com> 17 * Ariel Elior <ariel.elior@qlogic.com>
18 */ 18 */
19 19
20#include "bnx2x.h" 20#include "bnx2x.h"
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index c922b81170e5..e21e706762c9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -12,8 +12,8 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Ariel Elior <ariele@broadcom.com> 16 * Written by: Ariel Elior <ariel.elior@qlogic.com>
17 */ 17 */
18#ifndef VF_PF_IF_H 18#ifndef VF_PF_IF_H
19#define VF_PF_IF_H 19#define VF_PF_IF_H
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 4dd48d2fa804..8244e2b14bb4 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -608,6 +608,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
608 pr_err("%s: Bad type %d\n", __func__, ulp_type); 608 pr_err("%s: Bad type %d\n", __func__, ulp_type);
609 return -EINVAL; 609 return -EINVAL;
610 } 610 }
611
612 if (ulp_type == CNIC_ULP_ISCSI)
613 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
614
611 mutex_lock(&cnic_lock); 615 mutex_lock(&cnic_lock);
612 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 616 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
613 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); 617 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
@@ -620,9 +624,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
620 } 624 }
621 mutex_unlock(&cnic_lock); 625 mutex_unlock(&cnic_lock);
622 626
623 if (ulp_type == CNIC_ULP_ISCSI) 627 if (ulp_type == CNIC_ULP_FCOE)
624 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
625 else if (ulp_type == CNIC_ULP_FCOE)
626 dev->fcoe_cap = NULL; 628 dev->fcoe_cap = NULL;
627 629
628 synchronize_rcu(); 630 synchronize_rcu();
@@ -1039,21 +1041,17 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1039 struct cnic_local *cp = dev->cnic_priv; 1041 struct cnic_local *cp = dev->cnic_priv;
1040 struct cnic_uio_dev *udev; 1042 struct cnic_uio_dev *udev;
1041 1043
1042 read_lock(&cnic_dev_lock);
1043 list_for_each_entry(udev, &cnic_udev_list, list) { 1044 list_for_each_entry(udev, &cnic_udev_list, list) {
1044 if (udev->pdev == dev->pcidev) { 1045 if (udev->pdev == dev->pcidev) {
1045 udev->dev = dev; 1046 udev->dev = dev;
1046 if (__cnic_alloc_uio_rings(udev, pages)) { 1047 if (__cnic_alloc_uio_rings(udev, pages)) {
1047 udev->dev = NULL; 1048 udev->dev = NULL;
1048 read_unlock(&cnic_dev_lock);
1049 return -ENOMEM; 1049 return -ENOMEM;
1050 } 1050 }
1051 cp->udev = udev; 1051 cp->udev = udev;
1052 read_unlock(&cnic_dev_lock);
1053 return 0; 1052 return 0;
1054 } 1053 }
1055 } 1054 }
1056 read_unlock(&cnic_dev_lock);
1057 1055
1058 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); 1056 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1059 if (!udev) 1057 if (!udev)
@@ -1067,9 +1065,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1067 if (__cnic_alloc_uio_rings(udev, pages)) 1065 if (__cnic_alloc_uio_rings(udev, pages))
1068 goto err_udev; 1066 goto err_udev;
1069 1067
1070 write_lock(&cnic_dev_lock);
1071 list_add(&udev->list, &cnic_udev_list); 1068 list_add(&udev->list, &cnic_udev_list);
1072 write_unlock(&cnic_dev_lock);
1073 1069
1074 pci_dev_get(udev->pdev); 1070 pci_dev_get(udev->pdev);
1075 1071
@@ -5624,20 +5620,27 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5624{ 5620{
5625 int if_type; 5621 int if_type;
5626 5622
5627 rcu_read_lock();
5628 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 5623 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5629 struct cnic_ulp_ops *ulp_ops; 5624 struct cnic_ulp_ops *ulp_ops;
5630 void *ctx; 5625 void *ctx;
5631 5626
5632 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 5627 mutex_lock(&cnic_lock);
5633 if (!ulp_ops || !ulp_ops->indicate_netevent) 5628 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5629 lockdep_is_held(&cnic_lock));
5630 if (!ulp_ops || !ulp_ops->indicate_netevent) {
5631 mutex_unlock(&cnic_lock);
5634 continue; 5632 continue;
5633 }
5635 5634
5636 ctx = cp->ulp_handle[if_type]; 5635 ctx = cp->ulp_handle[if_type];
5637 5636
5637 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5638 mutex_unlock(&cnic_lock);
5639
5638 ulp_ops->indicate_netevent(ctx, event, vlan_id); 5640 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5641
5642 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5639 } 5643 }
5640 rcu_read_unlock();
5641} 5644}
5642 5645
5643/* netdev event handler */ 5646/* netdev event handler */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 0966bd04375f..5ba1cfbd60da 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2481,7 +2481,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
2481 dev_set_drvdata(&pdev->dev, dev); 2481 dev_set_drvdata(&pdev->dev, dev);
2482 ether_addr_copy(dev->dev_addr, macaddr); 2482 ether_addr_copy(dev->dev_addr, macaddr);
2483 dev->watchdog_timeo = 2 * HZ; 2483 dev->watchdog_timeo = 2 * HZ;
2484 SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops); 2484 dev->ethtool_ops = &bcmgenet_ethtool_ops;
2485 dev->netdev_ops = &bcmgenet_netdev_ops; 2485 dev->netdev_ops = &bcmgenet_netdev_ops;
2486 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64); 2486 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
2487 2487
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 4608673beaff..add8d8596084 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -298,6 +298,7 @@ int bcmgenet_mii_config(struct net_device *dev)
298static int bcmgenet_mii_probe(struct net_device *dev) 298static int bcmgenet_mii_probe(struct net_device *dev)
299{ 299{
300 struct bcmgenet_priv *priv = netdev_priv(dev); 300 struct bcmgenet_priv *priv = netdev_priv(dev);
301 struct device_node *dn = priv->pdev->dev.of_node;
301 struct phy_device *phydev; 302 struct phy_device *phydev;
302 unsigned int phy_flags; 303 unsigned int phy_flags;
303 int ret; 304 int ret;
@@ -307,15 +308,19 @@ static int bcmgenet_mii_probe(struct net_device *dev)
307 return 0; 308 return 0;
308 } 309 }
309 310
310 if (priv->phy_dn) 311 /* In the case of a fixed PHY, the DT node associated
311 phydev = of_phy_connect(dev, priv->phy_dn, 312 * to the PHY is the Ethernet MAC DT node.
312 bcmgenet_mii_setup, 0, 313 */
313 priv->phy_interface); 314 if (of_phy_is_fixed_link(dn)) {
314 else 315 ret = of_phy_register_fixed_link(dn);
315 phydev = of_phy_connect_fixed_link(dev, 316 if (ret)
316 bcmgenet_mii_setup, 317 return ret;
317 priv->phy_interface); 318
319 priv->phy_dn = dn;
320 }
318 321
322 phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0,
323 priv->phy_interface);
319 if (!phydev) { 324 if (!phydev) {
320 pr_err("could not attach to PHY\n"); 325 pr_err("could not attach to PHY\n");
321 return -ENODEV; 326 return -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e5d95c5ce1ad..df2792d8383d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation. 7 * Copyright (C) 2005-2014 Broadcom Corporation.
8 * 8 *
9 * Firmware is: 9 * Firmware is:
10 * Derived from proprietary unpublished source code, 10 * Derived from proprietary unpublished source code,
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
94 94
95#define DRV_MODULE_NAME "tg3" 95#define DRV_MODULE_NAME "tg3"
96#define TG3_MAJ_NUM 3 96#define TG3_MAJ_NUM 3
97#define TG3_MIN_NUM 136 97#define TG3_MIN_NUM 137
98#define DRV_MODULE_VERSION \ 98#define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100#define DRV_MODULE_RELDATE "Jan 03, 2014" 100#define DRV_MODULE_RELDATE "May 11, 2014"
101 101
102#define RESET_KIND_SHUTDOWN 0 102#define RESET_KIND_SHUTDOWN 0
103#define RESET_KIND_INIT 1 103#define RESET_KIND_INIT 1
@@ -3224,7 +3224,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3224 return 0; 3224 return 0;
3225} 3225}
3226 3226
3227#define NVRAM_CMD_TIMEOUT 10000 3227#define NVRAM_CMD_TIMEOUT 100
3228 3228
3229static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3229static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3230{ 3230{
@@ -7871,9 +7871,7 @@ tg3_tso_bug_end:
7871 return NETDEV_TX_OK; 7871 return NETDEV_TX_OK;
7872} 7872}
7873 7873
7874/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and 7874/* hard_start_xmit for all devices */
7875 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7876 */
7877static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7875static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7878{ 7876{
7879 struct tg3 *tp = netdev_priv(dev); 7877 struct tg3 *tp = netdev_priv(dev);
@@ -7884,6 +7882,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7884 struct tg3_napi *tnapi; 7882 struct tg3_napi *tnapi;
7885 struct netdev_queue *txq; 7883 struct netdev_queue *txq;
7886 unsigned int last; 7884 unsigned int last;
7885 struct iphdr *iph = NULL;
7886 struct tcphdr *tcph = NULL;
7887 __sum16 tcp_csum = 0, ip_csum = 0;
7888 __be16 ip_tot_len = 0;
7887 7889
7888 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 7890 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7889 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 7891 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
@@ -7915,7 +7917,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7915 7917
7916 mss = skb_shinfo(skb)->gso_size; 7918 mss = skb_shinfo(skb)->gso_size;
7917 if (mss) { 7919 if (mss) {
7918 struct iphdr *iph;
7919 u32 tcp_opt_len, hdr_len; 7920 u32 tcp_opt_len, hdr_len;
7920 7921
7921 if (skb_cow_head(skb, 0)) 7922 if (skb_cow_head(skb, 0))
@@ -7927,27 +7928,31 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7927 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; 7928 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7928 7929
7929 if (!skb_is_gso_v6(skb)) { 7930 if (!skb_is_gso_v6(skb)) {
7931 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7932 tg3_flag(tp, TSO_BUG))
7933 return tg3_tso_bug(tp, skb);
7934
7935 ip_csum = iph->check;
7936 ip_tot_len = iph->tot_len;
7930 iph->check = 0; 7937 iph->check = 0;
7931 iph->tot_len = htons(mss + hdr_len); 7938 iph->tot_len = htons(mss + hdr_len);
7932 } 7939 }
7933 7940
7934 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7935 tg3_flag(tp, TSO_BUG))
7936 return tg3_tso_bug(tp, skb);
7937
7938 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 7941 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7939 TXD_FLAG_CPU_POST_DMA); 7942 TXD_FLAG_CPU_POST_DMA);
7940 7943
7944 tcph = tcp_hdr(skb);
7945 tcp_csum = tcph->check;
7946
7941 if (tg3_flag(tp, HW_TSO_1) || 7947 if (tg3_flag(tp, HW_TSO_1) ||
7942 tg3_flag(tp, HW_TSO_2) || 7948 tg3_flag(tp, HW_TSO_2) ||
7943 tg3_flag(tp, HW_TSO_3)) { 7949 tg3_flag(tp, HW_TSO_3)) {
7944 tcp_hdr(skb)->check = 0; 7950 tcph->check = 0;
7945 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 7951 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7946 } else 7952 } else {
7947 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 7953 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7948 iph->daddr, 0, 7954 0, IPPROTO_TCP, 0);
7949 IPPROTO_TCP, 7955 }
7950 0);
7951 7956
7952 if (tg3_flag(tp, HW_TSO_3)) { 7957 if (tg3_flag(tp, HW_TSO_3)) {
7953 mss |= (hdr_len & 0xc) << 12; 7958 mss |= (hdr_len & 0xc) << 12;
@@ -8047,6 +8052,18 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8047 if (would_hit_hwbug) { 8052 if (would_hit_hwbug) {
8048 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8053 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8049 8054
8055 if (mss) {
8056 /* If it's a TSO packet, do GSO instead of
8057 * allocating and copying to a large linear SKB
8058 */
8059 if (ip_tot_len) {
8060 iph->check = ip_csum;
8061 iph->tot_len = ip_tot_len;
8062 }
8063 tcph->check = tcp_csum;
8064 return tg3_tso_bug(tp, skb);
8065 }
8066
8050 /* If the workaround fails due to memory/mapping 8067 /* If the workaround fails due to memory/mapping
8051 * failure, silently drop this packet. 8068 * failure, silently drop this packet.
8052 */ 8069 */
@@ -11876,9 +11893,9 @@ static int tg3_get_eeprom_len(struct net_device *dev)
11876static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 11893static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11877{ 11894{
11878 struct tg3 *tp = netdev_priv(dev); 11895 struct tg3 *tp = netdev_priv(dev);
11879 int ret; 11896 int ret, cpmu_restore = 0;
11880 u8 *pd; 11897 u8 *pd;
11881 u32 i, offset, len, b_offset, b_count; 11898 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11882 __be32 val; 11899 __be32 val;
11883 11900
11884 if (tg3_flag(tp, NO_NVRAM)) 11901 if (tg3_flag(tp, NO_NVRAM))
@@ -11890,6 +11907,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11890 11907
11891 eeprom->magic = TG3_EEPROM_MAGIC; 11908 eeprom->magic = TG3_EEPROM_MAGIC;
11892 11909
11910 /* Override clock, link aware and link idle modes */
11911 if (tg3_flag(tp, CPMU_PRESENT)) {
11912 cpmu_val = tr32(TG3_CPMU_CTRL);
11913 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11914 CPMU_CTRL_LINK_IDLE_MODE)) {
11915 tw32(TG3_CPMU_CTRL, cpmu_val &
11916 ~(CPMU_CTRL_LINK_AWARE_MODE |
11917 CPMU_CTRL_LINK_IDLE_MODE));
11918 cpmu_restore = 1;
11919 }
11920 }
11921 tg3_override_clk(tp);
11922
11893 if (offset & 3) { 11923 if (offset & 3) {
11894 /* adjustments to start on required 4 byte boundary */ 11924 /* adjustments to start on required 4 byte boundary */
11895 b_offset = offset & 3; 11925 b_offset = offset & 3;
@@ -11900,7 +11930,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11900 } 11930 }
11901 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 11931 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11902 if (ret) 11932 if (ret)
11903 return ret; 11933 goto eeprom_done;
11904 memcpy(data, ((char *)&val) + b_offset, b_count); 11934 memcpy(data, ((char *)&val) + b_offset, b_count);
11905 len -= b_count; 11935 len -= b_count;
11906 offset += b_count; 11936 offset += b_count;
@@ -11912,10 +11942,20 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11912 for (i = 0; i < (len - (len & 3)); i += 4) { 11942 for (i = 0; i < (len - (len & 3)); i += 4) {
11913 ret = tg3_nvram_read_be32(tp, offset + i, &val); 11943 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11914 if (ret) { 11944 if (ret) {
11945 if (i)
11946 i -= 4;
11915 eeprom->len += i; 11947 eeprom->len += i;
11916 return ret; 11948 goto eeprom_done;
11917 } 11949 }
11918 memcpy(pd + i, &val, 4); 11950 memcpy(pd + i, &val, 4);
11951 if (need_resched()) {
11952 if (signal_pending(current)) {
11953 eeprom->len += i;
11954 ret = -EINTR;
11955 goto eeprom_done;
11956 }
11957 cond_resched();
11958 }
11919 } 11959 }
11920 eeprom->len += i; 11960 eeprom->len += i;
11921 11961
@@ -11926,11 +11966,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11926 b_offset = offset + len - b_count; 11966 b_offset = offset + len - b_count;
11927 ret = tg3_nvram_read_be32(tp, b_offset, &val); 11967 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11928 if (ret) 11968 if (ret)
11929 return ret; 11969 goto eeprom_done;
11930 memcpy(pd, &val, b_count); 11970 memcpy(pd, &val, b_count);
11931 eeprom->len += b_count; 11971 eeprom->len += b_count;
11932 } 11972 }
11933 return 0; 11973 ret = 0;
11974
11975eeprom_done:
11976 /* Restore clock, link aware and link idle modes */
11977 tg3_restore_clk(tp);
11978 if (cpmu_restore)
11979 tw32(TG3_CPMU_CTRL, cpmu_val);
11980
11981 return ret;
11934} 11982}
11935 11983
11936static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 11984static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
@@ -12484,7 +12532,7 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12484 return size; 12532 return size;
12485} 12533}
12486 12534
12487static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir) 12535static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
12488{ 12536{
12489 struct tg3 *tp = netdev_priv(dev); 12537 struct tg3 *tp = netdev_priv(dev);
12490 int i; 12538 int i;
@@ -12495,7 +12543,7 @@ static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12495 return 0; 12543 return 0;
12496} 12544}
12497 12545
12498static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir) 12546static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key)
12499{ 12547{
12500 struct tg3 *tp = netdev_priv(dev); 12548 struct tg3 *tp = netdev_priv(dev);
12501 size_t i; 12549 size_t i;
@@ -14027,8 +14075,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
14027 .get_sset_count = tg3_get_sset_count, 14075 .get_sset_count = tg3_get_sset_count,
14028 .get_rxnfc = tg3_get_rxnfc, 14076 .get_rxnfc = tg3_get_rxnfc,
14029 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 14077 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14030 .get_rxfh_indir = tg3_get_rxfh_indir, 14078 .get_rxfh = tg3_get_rxfh,
14031 .set_rxfh_indir = tg3_set_rxfh_indir, 14079 .set_rxfh = tg3_set_rxfh,
14032 .get_channels = tg3_get_channels, 14080 .get_channels = tg3_get_channels,
14033 .set_channels = tg3_set_channels, 14081 .set_channels = tg3_set_channels,
14034 .get_ts_info = tg3_get_ts_info, 14082 .get_ts_info = tg3_get_ts_info,
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 04321e5a356e..461accaf0aa4 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2007-2013 Broadcom Corporation. 7 * Copyright (C) 2007-2014 Broadcom Corporation.
8 */ 8 */
9 9
10#ifndef _T3_H 10#ifndef _T3_H
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index f9e150825bb5..882cad71ad62 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -266,8 +266,8 @@ bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
266 ethtool_cmd_speed_set(cmd, SPEED_10000); 266 ethtool_cmd_speed_set(cmd, SPEED_10000);
267 cmd->duplex = DUPLEX_FULL; 267 cmd->duplex = DUPLEX_FULL;
268 } else { 268 } else {
269 ethtool_cmd_speed_set(cmd, -1); 269 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
270 cmd->duplex = -1; 270 cmd->duplex = DUPLEX_UNKNOWN;
271 } 271 }
272 cmd->transceiver = XCVR_EXTERNAL; 272 cmd->transceiver = XCVR_EXTERNAL;
273 cmd->maxtxpkt = 0; 273 cmd->maxtxpkt = 0;
@@ -1137,5 +1137,5 @@ static const struct ethtool_ops bnad_ethtool_ops = {
1137void 1137void
1138bnad_set_ethtool_ops(struct net_device *netdev) 1138bnad_set_ethtool_ops(struct net_device *netdev)
1139{ 1139{
1140 SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops); 1140 netdev->ethtool_ops = &bnad_ethtool_ops;
1141} 1141}
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 521dfea44b83..25d6b2a10e4e 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1737,7 +1737,7 @@ static int xgmac_probe(struct platform_device *pdev)
1737 platform_set_drvdata(pdev, ndev); 1737 platform_set_drvdata(pdev, ndev);
1738 ether_setup(ndev); 1738 ether_setup(ndev);
1739 ndev->netdev_ops = &xgmac_netdev_ops; 1739 ndev->netdev_ops = &xgmac_netdev_ops;
1740 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); 1740 ndev->ethtool_ops = &xgmac_ethtool_ops;
1741 spin_lock_init(&priv->stats_lock); 1741 spin_lock_init(&priv->stats_lock);
1742 INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work); 1742 INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
1743 1743
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 05613a85ce61..186566bfdbc8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -580,8 +580,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
580 ethtool_cmd_speed_set(cmd, p->link_config.speed); 580 ethtool_cmd_speed_set(cmd, p->link_config.speed);
581 cmd->duplex = p->link_config.duplex; 581 cmd->duplex = p->link_config.duplex;
582 } else { 582 } else {
583 ethtool_cmd_speed_set(cmd, -1); 583 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
584 cmd->duplex = -1; 584 cmd->duplex = DUPLEX_UNKNOWN;
585 } 585 }
586 586
587 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 587 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -1100,7 +1100,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1100 1100
1101 netif_napi_add(netdev, &adapter->napi, t1_poll, 64); 1101 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1102 1102
1103 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); 1103 netdev->ethtool_ops = &t1_ethtool_ops;
1104 } 1104 }
1105 1105
1106 if (t1_init_sw_modules(adapter, bi) < 0) { 1106 if (t1_init_sw_modules(adapter, bi) < 0) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 07bbb711b7e5..5d9cce053cc9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1809,8 +1809,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1809 ethtool_cmd_speed_set(cmd, p->link_config.speed); 1809 ethtool_cmd_speed_set(cmd, p->link_config.speed);
1810 cmd->duplex = p->link_config.duplex; 1810 cmd->duplex = p->link_config.duplex;
1811 } else { 1811 } else {
1812 ethtool_cmd_speed_set(cmd, -1); 1812 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
1813 cmd->duplex = -1; 1813 cmd->duplex = DUPLEX_UNKNOWN;
1814 } 1814 }
1815 1815
1816 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 1816 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -3291,7 +3291,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3291 netdev->features |= NETIF_F_HIGHDMA; 3291 netdev->features |= NETIF_F_HIGHDMA;
3292 3292
3293 netdev->netdev_ops = &cxgb_netdev_ops; 3293 netdev->netdev_ops = &cxgb_netdev_ops;
3294 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); 3294 netdev->ethtool_ops = &cxgb_ethtool_ops;
3295 } 3295 }
3296 3296
3297 pci_set_drvdata(pdev, adapter); 3297 pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index c0a9dd55f4e5..b0cbb2b7fd48 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -185,7 +185,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
185 if (ether_addr_equal(dev->dev_addr, mac)) { 185 if (ether_addr_equal(dev->dev_addr, mac)) {
186 rcu_read_lock(); 186 rcu_read_lock();
187 if (vlan && vlan != VLAN_VID_MASK) { 187 if (vlan && vlan != VLAN_VID_MASK) {
188 dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan); 188 dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan);
189 } else if (netif_is_bond_slave(dev)) { 189 } else if (netif_is_bond_slave(dev)) {
190 struct net_device *upper_dev; 190 struct net_device *upper_dev;
191 191
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 32db37709263..f503dce4ab17 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -357,11 +357,17 @@ enum {
357 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ 357 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
358 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ 358 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
359 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ 359 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
360 MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */
361 MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
360}; 362};
361 363
362enum { 364enum {
363 MAX_EGRQ = 128, /* max # of egress queues, including FLs */ 365 INGQ_EXTRAS = 2, /* firmware event queue and */
364 MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */ 366 /* forwarded interrupts */
367 MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
368 + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
369 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
370 + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
365}; 371};
366 372
367struct adapter; 373struct adapter;
@@ -538,6 +544,7 @@ struct sge {
538 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; 544 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
539 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; 545 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
540 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; 546 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
547 struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
541 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; 548 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
542 549
543 struct sge_rspq intrq ____cacheline_aligned_in_smp; 550 struct sge_rspq intrq ____cacheline_aligned_in_smp;
@@ -548,8 +555,10 @@ struct sge {
548 u16 ethtxq_rover; /* Tx queue to clean up next */ 555 u16 ethtxq_rover; /* Tx queue to clean up next */
549 u16 ofldqsets; /* # of active offload queue sets */ 556 u16 ofldqsets; /* # of active offload queue sets */
550 u16 rdmaqs; /* # of available RDMA Rx queues */ 557 u16 rdmaqs; /* # of available RDMA Rx queues */
558 u16 rdmaciqs; /* # of available RDMA concentrator IQs */
551 u16 ofld_rxq[MAX_OFLD_QSETS]; 559 u16 ofld_rxq[MAX_OFLD_QSETS];
552 u16 rdma_rxq[NCHAN]; 560 u16 rdma_rxq[NCHAN];
561 u16 rdma_ciq[NCHAN];
553 u16 timer_val[SGE_NTIMERS]; 562 u16 timer_val[SGE_NTIMERS];
554 u8 counter_val[SGE_NCOUNTERS]; 563 u8 counter_val[SGE_NCOUNTERS];
555 u32 fl_pg_order; /* large page allocation size */ 564 u32 fl_pg_order; /* large page allocation size */
@@ -577,6 +586,7 @@ struct sge {
577#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) 586#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
578#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) 587#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
579#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) 588#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
589#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
580 590
581struct l2t_data; 591struct l2t_data;
582 592
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 24e16e3301e0..2f8d6b910383 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -818,12 +818,17 @@ static void name_msix_vecs(struct adapter *adap)
818 for_each_rdmarxq(&adap->sge, i) 818 for_each_rdmarxq(&adap->sge, i)
819 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", 819 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
820 adap->port[0]->name, i); 820 adap->port[0]->name, i);
821
822 for_each_rdmaciq(&adap->sge, i)
823 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
824 adap->port[0]->name, i);
821} 825}
822 826
823static int request_msix_queue_irqs(struct adapter *adap) 827static int request_msix_queue_irqs(struct adapter *adap)
824{ 828{
825 struct sge *s = &adap->sge; 829 struct sge *s = &adap->sge;
826 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2; 830 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
831 int msi_index = 2;
827 832
828 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, 833 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
829 adap->msix_info[1].desc, &s->fw_evtq); 834 adap->msix_info[1].desc, &s->fw_evtq);
@@ -857,9 +862,21 @@ static int request_msix_queue_irqs(struct adapter *adap)
857 goto unwind; 862 goto unwind;
858 msi_index++; 863 msi_index++;
859 } 864 }
865 for_each_rdmaciq(s, rdmaciqqidx) {
866 err = request_irq(adap->msix_info[msi_index].vec,
867 t4_sge_intr_msix, 0,
868 adap->msix_info[msi_index].desc,
869 &s->rdmaciq[rdmaciqqidx].rspq);
870 if (err)
871 goto unwind;
872 msi_index++;
873 }
860 return 0; 874 return 0;
861 875
862unwind: 876unwind:
877 while (--rdmaciqqidx >= 0)
878 free_irq(adap->msix_info[--msi_index].vec,
879 &s->rdmaciq[rdmaciqqidx].rspq);
863 while (--rdmaqidx >= 0) 880 while (--rdmaqidx >= 0)
864 free_irq(adap->msix_info[--msi_index].vec, 881 free_irq(adap->msix_info[--msi_index].vec,
865 &s->rdmarxq[rdmaqidx].rspq); 882 &s->rdmarxq[rdmaqidx].rspq);
@@ -885,6 +902,8 @@ static void free_msix_queue_irqs(struct adapter *adap)
885 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); 902 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
886 for_each_rdmarxq(s, i) 903 for_each_rdmarxq(s, i)
887 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); 904 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
905 for_each_rdmaciq(s, i)
906 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
888} 907}
889 908
890/** 909/**
@@ -1047,7 +1066,8 @@ freeout: t4_free_sge_resources(adap);
1047 if (msi_idx > 0) 1066 if (msi_idx > 0)
1048 msi_idx++; 1067 msi_idx++;
1049 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, 1068 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1050 &q->fl, uldrx_handler); 1069 q->fl.size ? &q->fl : NULL,
1070 uldrx_handler);
1051 if (err) 1071 if (err)
1052 goto freeout; 1072 goto freeout;
1053 memset(&q->stats, 0, sizeof(q->stats)); 1073 memset(&q->stats, 0, sizeof(q->stats));
@@ -1064,13 +1084,28 @@ freeout: t4_free_sge_resources(adap);
1064 if (msi_idx > 0) 1084 if (msi_idx > 0)
1065 msi_idx++; 1085 msi_idx++;
1066 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], 1086 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1067 msi_idx, &q->fl, uldrx_handler); 1087 msi_idx, q->fl.size ? &q->fl : NULL,
1088 uldrx_handler);
1068 if (err) 1089 if (err)
1069 goto freeout; 1090 goto freeout;
1070 memset(&q->stats, 0, sizeof(q->stats)); 1091 memset(&q->stats, 0, sizeof(q->stats));
1071 s->rdma_rxq[i] = q->rspq.abs_id; 1092 s->rdma_rxq[i] = q->rspq.abs_id;
1072 } 1093 }
1073 1094
1095 for_each_rdmaciq(s, i) {
1096 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1097
1098 if (msi_idx > 0)
1099 msi_idx++;
1100 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1101 msi_idx, q->fl.size ? &q->fl : NULL,
1102 uldrx_handler);
1103 if (err)
1104 goto freeout;
1105 memset(&q->stats, 0, sizeof(q->stats));
1106 s->rdma_ciq[i] = q->rspq.abs_id;
1107 }
1108
1074 for_each_port(adap, i) { 1109 for_each_port(adap, i) {
1075 /* 1110 /*
1076 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't 1111 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
@@ -2252,12 +2287,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2252 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || 2287 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2253 p->port_type == FW_PORT_TYPE_FIBER_XAUI) 2288 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2254 cmd->port = PORT_FIBRE; 2289 cmd->port = PORT_FIBRE;
2255 else if (p->port_type == FW_PORT_TYPE_SFP) { 2290 else if (p->port_type == FW_PORT_TYPE_SFP ||
2256 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || 2291 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2257 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) 2292 p->port_type == FW_PORT_TYPE_QSFP) {
2293 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2294 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2295 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2296 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2297 cmd->port = PORT_FIBRE;
2298 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2299 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2258 cmd->port = PORT_DA; 2300 cmd->port = PORT_DA;
2259 else 2301 else
2260 cmd->port = PORT_FIBRE; 2302 cmd->port = PORT_OTHER;
2261 } else 2303 } else
2262 cmd->port = PORT_OTHER; 2304 cmd->port = PORT_OTHER;
2263 2305
@@ -2461,8 +2503,7 @@ static unsigned int qtimer_val(const struct adapter *adap,
2461} 2503}
2462 2504
2463/** 2505/**
2464 * set_rxq_intr_params - set a queue's interrupt holdoff parameters 2506 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2465 * @adap: the adapter
2466 * @q: the Rx queue 2507 * @q: the Rx queue
2467 * @us: the hold-off time in us, or 0 to disable timer 2508 * @us: the hold-off time in us, or 0 to disable timer
2468 * @cnt: the hold-off packet count, or 0 to disable counter 2509 * @cnt: the hold-off packet count, or 0 to disable counter
@@ -2470,9 +2511,11 @@ static unsigned int qtimer_val(const struct adapter *adap,
2470 * Sets an Rx queue's interrupt hold-off time and packet count. At least 2511 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2471 * one of the two needs to be enabled for the queue to generate interrupts. 2512 * one of the two needs to be enabled for the queue to generate interrupts.
2472 */ 2513 */
2473static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, 2514static int set_rspq_intr_params(struct sge_rspq *q,
2474 unsigned int us, unsigned int cnt) 2515 unsigned int us, unsigned int cnt)
2475{ 2516{
2517 struct adapter *adap = q->adap;
2518
2476 if ((us | cnt) == 0) 2519 if ((us | cnt) == 0)
2477 cnt = 1; 2520 cnt = 1;
2478 2521
@@ -2499,24 +2542,34 @@ static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2499 return 0; 2542 return 0;
2500} 2543}
2501 2544
2502static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 2545/**
2546 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2547 * @dev: the network device
2548 * @us: the hold-off time in us, or 0 to disable timer
2549 * @cnt: the hold-off packet count, or 0 to disable counter
2550 *
2551 * Set the RX interrupt hold-off parameters for a network device.
2552 */
2553static int set_rx_intr_params(struct net_device *dev,
2554 unsigned int us, unsigned int cnt)
2503{ 2555{
2504 const struct port_info *pi = netdev_priv(dev); 2556 int i, err;
2557 struct port_info *pi = netdev_priv(dev);
2505 struct adapter *adap = pi->adapter; 2558 struct adapter *adap = pi->adapter;
2506 struct sge_rspq *q; 2559 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2507 int i; 2560
2508 int r = 0; 2561 for (i = 0; i < pi->nqsets; i++, q++) {
2509 2562 err = set_rspq_intr_params(&q->rspq, us, cnt);
2510 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) { 2563 if (err)
2511 q = &adap->sge.ethrxq[i].rspq; 2564 return err;
2512 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2513 c->rx_max_coalesced_frames);
2514 if (r) {
2515 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2516 break;
2517 }
2518 } 2565 }
2519 return r; 2566 return 0;
2567}
2568
2569static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2570{
2571 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2572 c->rx_max_coalesced_frames);
2520} 2573}
2521 2574
2522static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 2575static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
@@ -2732,7 +2785,7 @@ static u32 get_rss_table_size(struct net_device *dev)
2732 return pi->rss_size; 2785 return pi->rss_size;
2733} 2786}
2734 2787
2735static int get_rss_table(struct net_device *dev, u32 *p) 2788static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
2736{ 2789{
2737 const struct port_info *pi = netdev_priv(dev); 2790 const struct port_info *pi = netdev_priv(dev);
2738 unsigned int n = pi->rss_size; 2791 unsigned int n = pi->rss_size;
@@ -2742,7 +2795,7 @@ static int get_rss_table(struct net_device *dev, u32 *p)
2742 return 0; 2795 return 0;
2743} 2796}
2744 2797
2745static int set_rss_table(struct net_device *dev, const u32 *p) 2798static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
2746{ 2799{
2747 unsigned int i; 2800 unsigned int i;
2748 struct port_info *pi = netdev_priv(dev); 2801 struct port_info *pi = netdev_priv(dev);
@@ -2844,8 +2897,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
2844 .set_wol = set_wol, 2897 .set_wol = set_wol,
2845 .get_rxnfc = get_rxnfc, 2898 .get_rxnfc = get_rxnfc,
2846 .get_rxfh_indir_size = get_rss_table_size, 2899 .get_rxfh_indir_size = get_rss_table_size,
2847 .get_rxfh_indir = get_rss_table, 2900 .get_rxfh = get_rss_table,
2848 .set_rxfh_indir = set_rss_table, 2901 .set_rxfh = set_rss_table,
2849 .flash_device = set_flash, 2902 .flash_device = set_flash,
2850}; 2903};
2851 2904
@@ -3386,6 +3439,77 @@ unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3386EXPORT_SYMBOL(cxgb4_best_mtu); 3439EXPORT_SYMBOL(cxgb4_best_mtu);
3387 3440
3388/** 3441/**
3442 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3443 * @mtus: the HW MTU table
3444 * @header_size: Header Size
3445 * @data_size_max: maximum Data Segment Size
3446 * @data_size_align: desired Data Segment Size Alignment (2^N)
3447 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3448 *
3449 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3450 * MTU Table based solely on a Maximum MTU parameter, we break that
3451 * parameter up into a Header Size and Maximum Data Segment Size, and
3452 * provide a desired Data Segment Size Alignment. If we find an MTU in
3453 * the Hardware MTU Table which will result in a Data Segment Size with
3454 * the requested alignment _and_ that MTU isn't "too far" from the
3455 * closest MTU, then we'll return that rather than the closest MTU.
3456 */
3457unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3458 unsigned short header_size,
3459 unsigned short data_size_max,
3460 unsigned short data_size_align,
3461 unsigned int *mtu_idxp)
3462{
3463 unsigned short max_mtu = header_size + data_size_max;
3464 unsigned short data_size_align_mask = data_size_align - 1;
3465 int mtu_idx, aligned_mtu_idx;
3466
3467 /* Scan the MTU Table till we find an MTU which is larger than our
3468 * Maximum MTU or we reach the end of the table. Along the way,
3469 * record the last MTU found, if any, which will result in a Data
3470 * Segment Length matching the requested alignment.
3471 */
3472 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3473 unsigned short data_size = mtus[mtu_idx] - header_size;
3474
3475 /* If this MTU minus the Header Size would result in a
3476 * Data Segment Size of the desired alignment, remember it.
3477 */
3478 if ((data_size & data_size_align_mask) == 0)
3479 aligned_mtu_idx = mtu_idx;
3480
3481 /* If we're not at the end of the Hardware MTU Table and the
3482 * next element is larger than our Maximum MTU, drop out of
3483 * the loop.
3484 */
3485 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3486 break;
3487 }
3488
3489 /* If we fell out of the loop because we ran to the end of the table,
3490 * then we just have to use the last [largest] entry.
3491 */
3492 if (mtu_idx == NMTUS)
3493 mtu_idx--;
3494
3495 /* If we found an MTU which resulted in the requested Data Segment
3496 * Length alignment and that's "not far" from the largest MTU which is
3497 * less than or equal to the maximum MTU, then use that.
3498 */
3499 if (aligned_mtu_idx >= 0 &&
3500 mtu_idx - aligned_mtu_idx <= 1)
3501 mtu_idx = aligned_mtu_idx;
3502
3503 /* If the caller has passed in an MTU Index pointer, pass the
3504 * MTU Index back. Return the MTU value.
3505 */
3506 if (mtu_idxp)
3507 *mtu_idxp = mtu_idx;
3508 return mtus[mtu_idx];
3509}
3510EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3511
3512/**
3389 * cxgb4_port_chan - get the HW channel of a port 3513 * cxgb4_port_chan - get the HW channel of a port
3390 * @dev: the net device for the port 3514 * @dev: the net device for the port
3391 * 3515 *
@@ -3782,7 +3906,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
3782 lli.mtus = adap->params.mtus; 3906 lli.mtus = adap->params.mtus;
3783 if (uld == CXGB4_ULD_RDMA) { 3907 if (uld == CXGB4_ULD_RDMA) {
3784 lli.rxq_ids = adap->sge.rdma_rxq; 3908 lli.rxq_ids = adap->sge.rdma_rxq;
3909 lli.ciq_ids = adap->sge.rdma_ciq;
3785 lli.nrxq = adap->sge.rdmaqs; 3910 lli.nrxq = adap->sge.rdmaqs;
3911 lli.nciq = adap->sge.rdmaciqs;
3786 } else if (uld == CXGB4_ULD_ISCSI) { 3912 } else if (uld == CXGB4_ULD_ISCSI) {
3787 lli.rxq_ids = adap->sge.ofld_rxq; 3913 lli.rxq_ids = adap->sge.ofld_rxq;
3788 lli.nrxq = adap->sge.ofldqsets; 3914 lli.nrxq = adap->sge.ofldqsets;
@@ -4061,7 +4187,7 @@ static int update_root_dev_clip(struct net_device *dev)
4061 4187
4062 /* Parse all bond and vlan devices layered on top of the physical dev */ 4188 /* Parse all bond and vlan devices layered on top of the physical dev */
4063 for (i = 0; i < VLAN_N_VID; i++) { 4189 for (i = 0; i < VLAN_N_VID; i++) {
4064 root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i); 4190 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4065 if (!root_dev) 4191 if (!root_dev)
4066 continue; 4192 continue;
4067 4193
@@ -5528,13 +5654,41 @@ static int adap_init0(struct adapter *adap)
5528#undef FW_PARAM_PFVF 5654#undef FW_PARAM_PFVF
5529#undef FW_PARAM_DEV 5655#undef FW_PARAM_DEV
5530 5656
5531 /* 5657 /* The MTU/MSS Table is initialized by now, so load their values. If
5532 * These are finalized by FW initialization, load their values now. 5658 * we're initializing the adapter, then we'll make any modifications
5659 * we want to the MTU/MSS Table and also initialize the congestion
5660 * parameters.
5533 */ 5661 */
5534 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 5662 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5535 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 5663 if (state != DEV_STATE_INIT) {
5536 adap->params.b_wnd); 5664 int i;
5665
5666 /* The default MTU Table contains values 1492 and 1500.
5667 * However, for TCP, it's better to have two values which are
5668 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5669 * This allows us to have a TCP Data Payload which is a
5670 * multiple of 8 regardless of what combination of TCP Options
5671 * are in use (always a multiple of 4 bytes) which is
5672 * important for performance reasons. For instance, if no
5673 * options are in use, then we have a 20-byte IP header and a
5674 * 20-byte TCP header. In this case, a 1500-byte MSS would
5675 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5676 * which is not a multiple of 8. So using an MSS of 1488 in
5677 * this case results in a TCP Data Payload of 1448 bytes which
5678 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5679 * Stamps have been negotiated, then an MTU of 1500 bytes
5680 * results in a TCP Data Payload of 1448 bytes which, as
5681 * above, is a multiple of 8 bytes ...
5682 */
5683 for (i = 0; i < NMTUS; i++)
5684 if (adap->params.mtus[i] == 1492) {
5685 adap->params.mtus[i] = 1488;
5686 break;
5687 }
5537 5688
5689 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5690 adap->params.b_wnd);
5691 }
5538 t4_init_tp_params(adap); 5692 t4_init_tp_params(adap);
5539 adap->flags |= FW_OK; 5693 adap->flags |= FW_OK;
5540 return 0; 5694 return 0;
@@ -5669,12 +5823,12 @@ static inline bool is_x_10g_port(const struct link_config *lc)
5669 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; 5823 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
5670} 5824}
5671 5825
5672static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, 5826static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
5827 unsigned int us, unsigned int cnt,
5673 unsigned int size, unsigned int iqe_size) 5828 unsigned int size, unsigned int iqe_size)
5674{ 5829{
5675 q->intr_params = QINTR_TIMER_IDX(timer_idx) | 5830 q->adap = adap;
5676 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0); 5831 set_rspq_intr_params(q, us, cnt);
5677 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5678 q->iqe_len = iqe_size; 5832 q->iqe_len = iqe_size;
5679 q->size = size; 5833 q->size = size;
5680} 5834}
@@ -5688,6 +5842,7 @@ static void cfg_queues(struct adapter *adap)
5688{ 5842{
5689 struct sge *s = &adap->sge; 5843 struct sge *s = &adap->sge;
5690 int i, q10g = 0, n10g = 0, qidx = 0; 5844 int i, q10g = 0, n10g = 0, qidx = 0;
5845 int ciq_size;
5691 5846
5692 for_each_port(adap, i) 5847 for_each_port(adap, i)
5693 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); 5848 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
@@ -5726,12 +5881,13 @@ static void cfg_queues(struct adapter *adap)
5726 s->ofldqsets = adap->params.nports; 5881 s->ofldqsets = adap->params.nports;
5727 /* For RDMA one Rx queue per channel suffices */ 5882 /* For RDMA one Rx queue per channel suffices */
5728 s->rdmaqs = adap->params.nports; 5883 s->rdmaqs = adap->params.nports;
5884 s->rdmaciqs = adap->params.nports;
5729 } 5885 }
5730 5886
5731 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { 5887 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5732 struct sge_eth_rxq *r = &s->ethrxq[i]; 5888 struct sge_eth_rxq *r = &s->ethrxq[i];
5733 5889
5734 init_rspq(&r->rspq, 0, 0, 1024, 64); 5890 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
5735 r->fl.size = 72; 5891 r->fl.size = 72;
5736 } 5892 }
5737 5893
@@ -5747,7 +5903,7 @@ static void cfg_queues(struct adapter *adap)
5747 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { 5903 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5748 struct sge_ofld_rxq *r = &s->ofldrxq[i]; 5904 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5749 5905
5750 init_rspq(&r->rspq, 0, 0, 1024, 64); 5906 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
5751 r->rspq.uld = CXGB4_ULD_ISCSI; 5907 r->rspq.uld = CXGB4_ULD_ISCSI;
5752 r->fl.size = 72; 5908 r->fl.size = 72;
5753 } 5909 }
@@ -5755,13 +5911,26 @@ static void cfg_queues(struct adapter *adap)
5755 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { 5911 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5756 struct sge_ofld_rxq *r = &s->rdmarxq[i]; 5912 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5757 5913
5758 init_rspq(&r->rspq, 0, 0, 511, 64); 5914 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
5759 r->rspq.uld = CXGB4_ULD_RDMA; 5915 r->rspq.uld = CXGB4_ULD_RDMA;
5760 r->fl.size = 72; 5916 r->fl.size = 72;
5761 } 5917 }
5762 5918
5763 init_rspq(&s->fw_evtq, 6, 0, 512, 64); 5919 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
5764 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); 5920 if (ciq_size > SGE_MAX_IQ_SIZE) {
5921 CH_WARN(adap, "CIQ size too small for available IQs\n");
5922 ciq_size = SGE_MAX_IQ_SIZE;
5923 }
5924
5925 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
5926 struct sge_ofld_rxq *r = &s->rdmaciq[i];
5927
5928 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
5929 r->rspq.uld = CXGB4_ULD_RDMA;
5930 }
5931
5932 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5933 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
5765} 5934}
5766 5935
5767/* 5936/*
@@ -5808,9 +5977,9 @@ static int enable_msix(struct adapter *adap)
5808 5977
5809 want = s->max_ethqsets + EXTRA_VECS; 5978 want = s->max_ethqsets + EXTRA_VECS;
5810 if (is_offload(adap)) { 5979 if (is_offload(adap)) {
5811 want += s->rdmaqs + s->ofldqsets; 5980 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
5812 /* need nchan for each possible ULD */ 5981 /* need nchan for each possible ULD */
5813 ofld_need = 2 * nchan; 5982 ofld_need = 3 * nchan;
5814 } 5983 }
5815 need = adap->params.nports + EXTRA_VECS + ofld_need; 5984 need = adap->params.nports + EXTRA_VECS + ofld_need;
5816 5985
@@ -6076,7 +6245,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6076 netdev->priv_flags |= IFF_UNICAST_FLT; 6245 netdev->priv_flags |= IFF_UNICAST_FLT;
6077 6246
6078 netdev->netdev_ops = &cxgb4_netdev_ops; 6247 netdev->netdev_ops = &cxgb4_netdev_ops;
6079 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); 6248 netdev->ethtool_ops = &cxgb_ethtool_ops;
6080 } 6249 }
6081 6250
6082 pci_set_drvdata(pdev, adapter); 6251 pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index e274a047528f..55e9daf7f9d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -232,8 +232,10 @@ struct cxgb4_lld_info {
232 const struct cxgb4_virt_res *vr; /* assorted HW resources */ 232 const struct cxgb4_virt_res *vr; /* assorted HW resources */
233 const unsigned short *mtus; /* MTU table */ 233 const unsigned short *mtus; /* MTU table */
234 const unsigned short *rxq_ids; /* the ULD's Rx queue ids */ 234 const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
235 const unsigned short *ciq_ids; /* the ULD's concentrator IQ ids */
235 unsigned short nrxq; /* # of Rx queues */ 236 unsigned short nrxq; /* # of Rx queues */
236 unsigned short ntxq; /* # of Tx queues */ 237 unsigned short ntxq; /* # of Tx queues */
238 unsigned short nciq; /* # of concentrator IQ */
237 unsigned char nchan:4; /* # of channels */ 239 unsigned char nchan:4; /* # of channels */
238 unsigned char nports:4; /* # of ports */ 240 unsigned char nports:4; /* # of ports */
239 unsigned char wr_cred; /* WR 16-byte credits */ 241 unsigned char wr_cred; /* WR 16-byte credits */
@@ -274,6 +276,11 @@ unsigned int cxgb4_port_viid(const struct net_device *dev);
274unsigned int cxgb4_port_idx(const struct net_device *dev); 276unsigned int cxgb4_port_idx(const struct net_device *dev);
275unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, 277unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
276 unsigned int *idx); 278 unsigned int *idx);
279unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
280 unsigned short header_size,
281 unsigned short data_size_max,
282 unsigned short data_size_align,
283 unsigned int *mtu_idxp);
277void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, 284void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
278 struct tp_tcp_stats *v6); 285 struct tp_tcp_stats *v6);
279void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, 286void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index e249528c8e60..dd4355d248e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1697,7 +1697,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1697 return handle_trace_pkt(q->adap, si); 1697 return handle_trace_pkt(q->adap, si);
1698 1698
1699 pkt = (const struct cpl_rx_pkt *)rsp; 1699 pkt = (const struct cpl_rx_pkt *)rsp;
1700 csum_ok = pkt->csum_calc && !pkt->err_vec; 1700 csum_ok = pkt->csum_calc && !pkt->err_vec &&
1701 (q->netdev->features & NETIF_F_RXCSUM);
1701 if ((pkt->l2info & htonl(RXF_TCP)) && 1702 if ((pkt->l2info & htonl(RXF_TCP)) &&
1702 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 1703 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1703 do_gro(rxq, si, pkt); 1704 do_gro(rxq, si, pkt);
@@ -1720,8 +1721,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1720 1721
1721 rxq->stats.pkts++; 1722 rxq->stats.pkts++;
1722 1723
1723 if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) && 1724 if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1724 (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1725 if (!pkt->ip_frag) { 1725 if (!pkt->ip_frag) {
1726 skb->ip_summed = CHECKSUM_UNNECESSARY; 1726 skb->ip_summed = CHECKSUM_UNNECESSARY;
1727 rxq->stats.rx_cso++; 1727 rxq->stats.rx_cso++;
@@ -2215,7 +2215,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2215 iq->cntxt_id = ntohs(c.iqid); 2215 iq->cntxt_id = ntohs(c.iqid);
2216 iq->abs_id = ntohs(c.physiqid); 2216 iq->abs_id = ntohs(c.physiqid);
2217 iq->size--; /* subtract status entry */ 2217 iq->size--; /* subtract status entry */
2218 iq->adap = adap;
2219 iq->netdev = dev; 2218 iq->netdev = dev;
2220 iq->handler = hnd; 2219 iq->handler = hnd;
2221 2220
@@ -2515,6 +2514,10 @@ void t4_free_sge_resources(struct adapter *adap)
2515 if (oq->rspq.desc) 2514 if (oq->rspq.desc)
2516 free_rspq_fl(adap, &oq->rspq, &oq->fl); 2515 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2517 } 2516 }
2517 for (i = 0, oq = adap->sge.rdmaciq; i < adap->sge.rdmaciqs; i++, oq++) {
2518 if (oq->rspq.desc)
2519 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2520 }
2518 2521
2519 /* clean up offload Tx queues */ 2522 /* clean up offload Tx queues */
2520 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { 2523 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 1d1623be9f1e..71b799b5b0f4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -68,6 +68,7 @@ enum {
68 SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ 68 SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
69 SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ 69 SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
70 SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ 70 SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
71 SGE_MAX_IQ_SIZE = 65520,
71 72
72 SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */ 73 SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
73 SGE_TIMER_UPD_CIDX = 7, /* update cidx only */ 74 SGE_TIMER_UPD_CIDX = 7, /* update cidx only */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index f2738c710789..973eb11aa98a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -227,6 +227,7 @@ struct cpl_pass_open_req {
227#define DELACK(x) ((x) << 5) 227#define DELACK(x) ((x) << 5)
228#define ULP_MODE(x) ((x) << 8) 228#define ULP_MODE(x) ((x) << 8)
229#define RCV_BUFSIZ(x) ((x) << 12) 229#define RCV_BUFSIZ(x) ((x) << 12)
230#define RCV_BUFSIZ_MASK 0x3FFU
230#define DSCP(x) ((x) << 22) 231#define DSCP(x) ((x) << 22)
231#define SMAC_SEL(x) ((u64)(x) << 28) 232#define SMAC_SEL(x) ((u64)(x) << 28)
232#define L2T_IDX(x) ((u64)(x) << 36) 233#define L2T_IDX(x) ((u64)(x) << 36)
@@ -278,6 +279,15 @@ struct cpl_pass_accept_rpl {
278 __be64 opt0; 279 __be64 opt0;
279}; 280};
280 281
282struct cpl_t5_pass_accept_rpl {
283 WR_HDR;
284 union opcode_tid ot;
285 __be32 opt2;
286 __be64 opt0;
287 __be32 iss;
288 __be32 rsvd;
289};
290
281struct cpl_act_open_req { 291struct cpl_act_open_req {
282 WR_HDR; 292 WR_HDR;
283 union opcode_tid ot; 293 union opcode_tid ot;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 52859288de7b..ff1cdd1788b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2664,7 +2664,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2664 netdev->priv_flags |= IFF_UNICAST_FLT; 2664 netdev->priv_flags |= IFF_UNICAST_FLT;
2665 2665
2666 netdev->netdev_ops = &cxgb4vf_netdev_ops; 2666 netdev->netdev_ops = &cxgb4vf_netdev_ops;
2667 SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops); 2667 netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
2668 2668
2669 /* 2669 /*
2670 * Initialize the hardware/software state for the port. 2670 * Initialize the hardware/software state for the port.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 9d88c1d50b49..bdfa80ca5e31 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1510,7 +1510,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1510{ 1510{
1511 struct sk_buff *skb; 1511 struct sk_buff *skb;
1512 const struct cpl_rx_pkt *pkt = (void *)rsp; 1512 const struct cpl_rx_pkt *pkt = (void *)rsp;
1513 bool csum_ok = pkt->csum_calc && !pkt->err_vec; 1513 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1514 (rspq->netdev->features & NETIF_F_RXCSUM);
1514 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1515 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1515 1516
1516 /* 1517 /*
@@ -1538,8 +1539,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1538 skb_record_rx_queue(skb, rspq->idx); 1539 skb_record_rx_queue(skb, rspq->idx);
1539 rxq->stats.pkts++; 1540 rxq->stats.pkts++;
1540 1541
1541 if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) && 1542 if (csum_ok && !pkt->err_vec &&
1542 !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { 1543 (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
1543 if (!pkt->ip_frag) 1544 if (!pkt->ip_frag)
1544 skb->ip_summed = CHECKSUM_UNNECESSARY; 1545 skb->ip_summed = CHECKSUM_UNNECESSARY;
1545 else { 1546 else {
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index e35c8e0202ad..14f465f239d6 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -43,6 +43,8 @@
43#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) 43#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
44#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) 44#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
45 45
46#define ENIC_AIC_LARGE_PKT_DIFF 3
47
46struct enic_msix_entry { 48struct enic_msix_entry {
47 int requested; 49 int requested;
48 char devname[IFNAMSIZ]; 50 char devname[IFNAMSIZ];
@@ -50,6 +52,33 @@ struct enic_msix_entry {
50 void *devid; 52 void *devid;
51}; 53};
52 54
55/* Store only the lower range. Higher range is given by fw. */
56struct enic_intr_mod_range {
57 u32 small_pkt_range_start;
58 u32 large_pkt_range_start;
59};
60
61struct enic_intr_mod_table {
62 u32 rx_rate;
63 u32 range_percent;
64};
65
66#define ENIC_MAX_LINK_SPEEDS 3
67#define ENIC_LINK_SPEED_10G 10000
68#define ENIC_LINK_SPEED_4G 4000
69#define ENIC_LINK_40G_INDEX 2
70#define ENIC_LINK_10G_INDEX 1
71#define ENIC_LINK_4G_INDEX 0
72#define ENIC_RX_COALESCE_RANGE_END 125
73#define ENIC_AIC_TS_BREAK 100
74
75struct enic_rx_coal {
76 u32 small_pkt_range_start;
77 u32 large_pkt_range_start;
78 u32 range_end;
79 u32 use_adaptive_rx_coalesce;
80};
81
53/* priv_flags */ 82/* priv_flags */
54#define ENIC_SRIOV_ENABLED (1 << 0) 83#define ENIC_SRIOV_ENABLED (1 << 0)
55 84
@@ -85,13 +114,12 @@ struct enic {
85 u32 msg_enable; 114 u32 msg_enable;
86 spinlock_t devcmd_lock; 115 spinlock_t devcmd_lock;
87 u8 mac_addr[ETH_ALEN]; 116 u8 mac_addr[ETH_ALEN];
88 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
89 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
90 unsigned int flags; 117 unsigned int flags;
91 unsigned int priv_flags; 118 unsigned int priv_flags;
92 unsigned int mc_count; 119 unsigned int mc_count;
93 unsigned int uc_count; 120 unsigned int uc_count;
94 u32 port_mtu; 121 u32 port_mtu;
122 struct enic_rx_coal rx_coalesce_setting;
95 u32 rx_coalesce_usecs; 123 u32 rx_coalesce_usecs;
96 u32 tx_coalesce_usecs; 124 u32 tx_coalesce_usecs;
97#ifdef CONFIG_PCI_IOV 125#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index 4b6e5695b263..3e27df522847 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -88,7 +88,7 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
88 return err; 88 return err;
89} 89}
90 90
91int enic_dev_add_addr(struct enic *enic, u8 *addr) 91int enic_dev_add_addr(struct enic *enic, const u8 *addr)
92{ 92{
93 int err; 93 int err;
94 94
@@ -99,7 +99,7 @@ int enic_dev_add_addr(struct enic *enic, u8 *addr)
99 return err; 99 return err;
100} 100}
101 101
102int enic_dev_del_addr(struct enic *enic, u8 *addr) 102int enic_dev_del_addr(struct enic *enic, const u8 *addr)
103{ 103{
104 int err; 104 int err;
105 105
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 129b14a4efb0..36ea1ab25f6a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -45,8 +45,8 @@ int enic_dev_add_station_addr(struct enic *enic);
45int enic_dev_del_station_addr(struct enic *enic); 45int enic_dev_del_station_addr(struct enic *enic);
46int enic_dev_packet_filter(struct enic *enic, int directed, int multicast, 46int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
47 int broadcast, int promisc, int allmulti); 47 int broadcast, int promisc, int allmulti);
48int enic_dev_add_addr(struct enic *enic, u8 *addr); 48int enic_dev_add_addr(struct enic *enic, const u8 *addr);
49int enic_dev_del_addr(struct enic *enic, u8 *addr); 49int enic_dev_del_addr(struct enic *enic, const u8 *addr);
50int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); 50int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
51int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); 51int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
52int enic_dev_notify_unset(struct enic *enic); 52int enic_dev_notify_unset(struct enic *enic);
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 47e3562f4866..2e50b5489d20 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -79,6 +79,17 @@ static const struct enic_stat enic_rx_stats[] = {
79static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); 79static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
80static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); 80static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
81 81
82void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
83{
84 int i;
85 int intr;
86
87 for (i = 0; i < enic->rq_count; i++) {
88 intr = enic_msix_rq_intr(enic, i);
89 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
90 }
91}
92
82static int enic_get_settings(struct net_device *netdev, 93static int enic_get_settings(struct net_device *netdev,
83 struct ethtool_cmd *ecmd) 94 struct ethtool_cmd *ecmd)
84{ 95{
@@ -93,8 +104,8 @@ static int enic_get_settings(struct net_device *netdev,
93 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); 104 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
94 ecmd->duplex = DUPLEX_FULL; 105 ecmd->duplex = DUPLEX_FULL;
95 } else { 106 } else {
96 ethtool_cmd_speed_set(ecmd, -1); 107 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
97 ecmd->duplex = -1; 108 ecmd->duplex = DUPLEX_UNKNOWN;
98 } 109 }
99 110
100 ecmd->autoneg = AUTONEG_DISABLE; 111 ecmd->autoneg = AUTONEG_DISABLE;
@@ -178,9 +189,14 @@ static int enic_get_coalesce(struct net_device *netdev,
178 struct ethtool_coalesce *ecmd) 189 struct ethtool_coalesce *ecmd)
179{ 190{
180 struct enic *enic = netdev_priv(netdev); 191 struct enic *enic = netdev_priv(netdev);
192 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
181 193
182 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; 194 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
183 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; 195 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
196 if (rxcoal->use_adaptive_rx_coalesce)
197 ecmd->use_adaptive_rx_coalesce = 1;
198 ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
199 ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
184 200
185 return 0; 201 return 0;
186} 202}
@@ -191,17 +207,31 @@ static int enic_set_coalesce(struct net_device *netdev,
191 struct enic *enic = netdev_priv(netdev); 207 struct enic *enic = netdev_priv(netdev);
192 u32 tx_coalesce_usecs; 208 u32 tx_coalesce_usecs;
193 u32 rx_coalesce_usecs; 209 u32 rx_coalesce_usecs;
210 u32 rx_coalesce_usecs_low;
211 u32 rx_coalesce_usecs_high;
212 u32 coalesce_usecs_max;
194 unsigned int i, intr; 213 unsigned int i, intr;
214 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
195 215
216 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
196 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, 217 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
197 vnic_dev_get_intr_coal_timer_max(enic->vdev)); 218 coalesce_usecs_max);
198 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs, 219 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
199 vnic_dev_get_intr_coal_timer_max(enic->vdev)); 220 coalesce_usecs_max);
221
222 rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
223 coalesce_usecs_max);
224 rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
225 coalesce_usecs_max);
200 226
201 switch (vnic_dev_get_intr_mode(enic->vdev)) { 227 switch (vnic_dev_get_intr_mode(enic->vdev)) {
202 case VNIC_DEV_INTR_MODE_INTX: 228 case VNIC_DEV_INTR_MODE_INTX:
203 if (tx_coalesce_usecs != rx_coalesce_usecs) 229 if (tx_coalesce_usecs != rx_coalesce_usecs)
204 return -EINVAL; 230 return -EINVAL;
231 if (ecmd->use_adaptive_rx_coalesce ||
232 ecmd->rx_coalesce_usecs_low ||
233 ecmd->rx_coalesce_usecs_high)
234 return -EOPNOTSUPP;
205 235
206 intr = enic_legacy_io_intr(); 236 intr = enic_legacy_io_intr();
207 vnic_intr_coalescing_timer_set(&enic->intr[intr], 237 vnic_intr_coalescing_timer_set(&enic->intr[intr],
@@ -210,6 +240,10 @@ static int enic_set_coalesce(struct net_device *netdev,
210 case VNIC_DEV_INTR_MODE_MSI: 240 case VNIC_DEV_INTR_MODE_MSI:
211 if (tx_coalesce_usecs != rx_coalesce_usecs) 241 if (tx_coalesce_usecs != rx_coalesce_usecs)
212 return -EINVAL; 242 return -EINVAL;
243 if (ecmd->use_adaptive_rx_coalesce ||
244 ecmd->rx_coalesce_usecs_low ||
245 ecmd->rx_coalesce_usecs_high)
246 return -EOPNOTSUPP;
213 247
214 vnic_intr_coalescing_timer_set(&enic->intr[0], 248 vnic_intr_coalescing_timer_set(&enic->intr[0],
215 tx_coalesce_usecs); 249 tx_coalesce_usecs);
@@ -221,12 +255,27 @@ static int enic_set_coalesce(struct net_device *netdev,
221 tx_coalesce_usecs); 255 tx_coalesce_usecs);
222 } 256 }
223 257
224 for (i = 0; i < enic->rq_count; i++) { 258 if (rxcoal->use_adaptive_rx_coalesce) {
225 intr = enic_msix_rq_intr(enic, i); 259 if (!ecmd->use_adaptive_rx_coalesce) {
226 vnic_intr_coalescing_timer_set(&enic->intr[intr], 260 rxcoal->use_adaptive_rx_coalesce = 0;
227 rx_coalesce_usecs); 261 enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
262 }
263 } else {
264 if (ecmd->use_adaptive_rx_coalesce)
265 rxcoal->use_adaptive_rx_coalesce = 1;
266 else
267 enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
228 } 268 }
229 269
270 if (ecmd->rx_coalesce_usecs_high) {
271 if (rx_coalesce_usecs_high <
272 (rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
273 return -EINVAL;
274 rxcoal->range_end = rx_coalesce_usecs_high;
275 rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
276 rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
277 ENIC_AIC_LARGE_PKT_DIFF;
278 }
230 break; 279 break;
231 default: 280 default:
232 break; 281 break;
@@ -253,5 +302,5 @@ static const struct ethtool_ops enic_ethtool_ops = {
253 302
254void enic_set_ethtool_ops(struct net_device *netdev) 303void enic_set_ethtool_ops(struct net_device *netdev)
255{ 304{
256 SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops); 305 netdev->ethtool_ops = &enic_ethtool_ops;
257} 306}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 2945718ce806..f32f828b7f3d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -38,6 +38,7 @@
38#include <linux/rtnetlink.h> 38#include <linux/rtnetlink.h>
39#include <linux/prefetch.h> 39#include <linux/prefetch.h>
40#include <net/ip6_checksum.h> 40#include <net/ip6_checksum.h>
41#include <linux/ktime.h>
41 42
42#include "cq_enet_desc.h" 43#include "cq_enet_desc.h"
43#include "vnic_dev.h" 44#include "vnic_dev.h"
@@ -72,6 +73,35 @@ MODULE_LICENSE("GPL");
72MODULE_VERSION(DRV_VERSION); 73MODULE_VERSION(DRV_VERSION);
73MODULE_DEVICE_TABLE(pci, enic_id_table); 74MODULE_DEVICE_TABLE(pci, enic_id_table);
74 75
76#define ENIC_LARGE_PKT_THRESHOLD 1000
77#define ENIC_MAX_COALESCE_TIMERS 10
78/* Interrupt moderation table, which will be used to decide the
79 * coalescing timer values
80 * {rx_rate in Mbps, mapping percentage of the range}
81 */
82struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
83 {4000, 0},
84 {4400, 10},
85 {5060, 20},
86 {5230, 30},
87 {5540, 40},
88 {5820, 50},
89 {6120, 60},
90 {6435, 70},
91 {6745, 80},
92 {7000, 90},
93 {0xFFFFFFFF, 100}
94};
95
96/* This table helps the driver to pick different ranges for rx coalescing
97 * timer depending on the link speed.
98 */
99struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
100 {0, 0}, /* 0 - 4 Gbps */
101 {0, 3}, /* 4 - 10 Gbps */
102 {3, 6}, /* 10 - 40 Gbps */
103};
104
75int enic_is_dynamic(struct enic *enic) 105int enic_is_dynamic(struct enic *enic)
76{ 106{
77 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; 107 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -586,8 +616,71 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
586 return net_stats; 616 return net_stats;
587} 617}
588 618
619static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
620{
621 struct enic *enic = netdev_priv(netdev);
622
623 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
624 unsigned int mc_count = netdev_mc_count(netdev);
625
626 netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
627 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
628
629 return -ENOSPC;
630 }
631
632 enic_dev_add_addr(enic, mc_addr);
633 enic->mc_count++;
634
635 return 0;
636}
637
638static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
639{
640 struct enic *enic = netdev_priv(netdev);
641
642 enic_dev_del_addr(enic, mc_addr);
643 enic->mc_count--;
644
645 return 0;
646}
647
648static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
649{
650 struct enic *enic = netdev_priv(netdev);
651
652 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
653 unsigned int uc_count = netdev_uc_count(netdev);
654
655 netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
656 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
657
658 return -ENOSPC;
659 }
660
661 enic_dev_add_addr(enic, uc_addr);
662 enic->uc_count++;
663
664 return 0;
665}
666
667static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
668{
669 struct enic *enic = netdev_priv(netdev);
670
671 enic_dev_del_addr(enic, uc_addr);
672 enic->uc_count--;
673
674 return 0;
675}
676
589void enic_reset_addr_lists(struct enic *enic) 677void enic_reset_addr_lists(struct enic *enic)
590{ 678{
679 struct net_device *netdev = enic->netdev;
680
681 __dev_uc_unsync(netdev, NULL);
682 __dev_mc_unsync(netdev, NULL);
683
591 enic->mc_count = 0; 684 enic->mc_count = 0;
592 enic->uc_count = 0; 685 enic->uc_count = 0;
593 enic->flags = 0; 686 enic->flags = 0;
@@ -654,112 +747,6 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
654 return enic_dev_add_station_addr(enic); 747 return enic_dev_add_station_addr(enic);
655} 748}
656 749
657static void enic_update_multicast_addr_list(struct enic *enic)
658{
659 struct net_device *netdev = enic->netdev;
660 struct netdev_hw_addr *ha;
661 unsigned int mc_count = netdev_mc_count(netdev);
662 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
663 unsigned int i, j;
664
665 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
666 netdev_warn(netdev, "Registering only %d out of %d "
667 "multicast addresses\n",
668 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
669 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
670 }
671
672 /* Is there an easier way? Trying to minimize to
673 * calls to add/del multicast addrs. We keep the
674 * addrs from the last call in enic->mc_addr and
675 * look for changes to add/del.
676 */
677
678 i = 0;
679 netdev_for_each_mc_addr(ha, netdev) {
680 if (i == mc_count)
681 break;
682 memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
683 }
684
685 for (i = 0; i < enic->mc_count; i++) {
686 for (j = 0; j < mc_count; j++)
687 if (ether_addr_equal(enic->mc_addr[i], mc_addr[j]))
688 break;
689 if (j == mc_count)
690 enic_dev_del_addr(enic, enic->mc_addr[i]);
691 }
692
693 for (i = 0; i < mc_count; i++) {
694 for (j = 0; j < enic->mc_count; j++)
695 if (ether_addr_equal(mc_addr[i], enic->mc_addr[j]))
696 break;
697 if (j == enic->mc_count)
698 enic_dev_add_addr(enic, mc_addr[i]);
699 }
700
701 /* Save the list to compare against next time
702 */
703
704 for (i = 0; i < mc_count; i++)
705 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
706
707 enic->mc_count = mc_count;
708}
709
710static void enic_update_unicast_addr_list(struct enic *enic)
711{
712 struct net_device *netdev = enic->netdev;
713 struct netdev_hw_addr *ha;
714 unsigned int uc_count = netdev_uc_count(netdev);
715 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
716 unsigned int i, j;
717
718 if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
719 netdev_warn(netdev, "Registering only %d out of %d "
720 "unicast addresses\n",
721 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
722 uc_count = ENIC_UNICAST_PERFECT_FILTERS;
723 }
724
725 /* Is there an easier way? Trying to minimize to
726 * calls to add/del unicast addrs. We keep the
727 * addrs from the last call in enic->uc_addr and
728 * look for changes to add/del.
729 */
730
731 i = 0;
732 netdev_for_each_uc_addr(ha, netdev) {
733 if (i == uc_count)
734 break;
735 memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
736 }
737
738 for (i = 0; i < enic->uc_count; i++) {
739 for (j = 0; j < uc_count; j++)
740 if (ether_addr_equal(enic->uc_addr[i], uc_addr[j]))
741 break;
742 if (j == uc_count)
743 enic_dev_del_addr(enic, enic->uc_addr[i]);
744 }
745
746 for (i = 0; i < uc_count; i++) {
747 for (j = 0; j < enic->uc_count; j++)
748 if (ether_addr_equal(uc_addr[i], enic->uc_addr[j]))
749 break;
750 if (j == enic->uc_count)
751 enic_dev_add_addr(enic, uc_addr[i]);
752 }
753
754 /* Save the list to compare against next time
755 */
756
757 for (i = 0; i < uc_count; i++)
758 memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
759
760 enic->uc_count = uc_count;
761}
762
763/* netif_tx_lock held, BHs disabled */ 750/* netif_tx_lock held, BHs disabled */
764static void enic_set_rx_mode(struct net_device *netdev) 751static void enic_set_rx_mode(struct net_device *netdev)
765{ 752{
@@ -782,9 +769,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
782 } 769 }
783 770
784 if (!promisc) { 771 if (!promisc) {
785 enic_update_unicast_addr_list(enic); 772 __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
786 if (!allmulti) 773 if (!allmulti)
787 enic_update_multicast_addr_list(enic); 774 __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
788 } 775 }
789} 776}
790 777
@@ -979,6 +966,15 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
979 return 0; 966 return 0;
980} 967}
981 968
969static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
970 u32 pkt_len)
971{
972 if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
973 pkt_size->large_pkt_bytes_cnt += pkt_len;
974 else
975 pkt_size->small_pkt_bytes_cnt += pkt_len;
976}
977
982static void enic_rq_indicate_buf(struct vnic_rq *rq, 978static void enic_rq_indicate_buf(struct vnic_rq *rq,
983 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 979 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
984 int skipped, void *opaque) 980 int skipped, void *opaque)
@@ -986,6 +982,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
986 struct enic *enic = vnic_dev_priv(rq->vdev); 982 struct enic *enic = vnic_dev_priv(rq->vdev);
987 struct net_device *netdev = enic->netdev; 983 struct net_device *netdev = enic->netdev;
988 struct sk_buff *skb; 984 struct sk_buff *skb;
985 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
989 986
990 u8 type, color, eop, sop, ingress_port, vlan_stripped; 987 u8 type, color, eop, sop, ingress_port, vlan_stripped;
991 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; 988 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -1056,6 +1053,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1056 napi_gro_receive(&enic->napi[q_number], skb); 1053 napi_gro_receive(&enic->napi[q_number], skb);
1057 else 1054 else
1058 netif_receive_skb(skb); 1055 netif_receive_skb(skb);
1056 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1057 enic_intr_update_pkt_size(&cq->pkt_size_counter,
1058 bytes_written);
1059 } else { 1059 } else {
1060 1060
1061 /* Buffer overflow 1061 /* Buffer overflow
@@ -1134,6 +1134,64 @@ static int enic_poll(struct napi_struct *napi, int budget)
1134 return rq_work_done; 1134 return rq_work_done;
1135} 1135}
1136 1136
1137static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
1138{
1139 unsigned int intr = enic_msix_rq_intr(enic, rq->index);
1140 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1141 u32 timer = cq->tobe_rx_coal_timeval;
1142
1143 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
1144 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
1145 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
1146 }
1147}
1148
1149static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
1150{
1151 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1152 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1153 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
1154 int index;
1155 u32 timer;
1156 u32 range_start;
1157 u32 traffic;
1158 u64 delta;
1159 ktime_t now = ktime_get();
1160
1161 delta = ktime_us_delta(now, cq->prev_ts);
1162 if (delta < ENIC_AIC_TS_BREAK)
1163 return;
1164 cq->prev_ts = now;
1165
1166 traffic = pkt_size_counter->large_pkt_bytes_cnt +
1167 pkt_size_counter->small_pkt_bytes_cnt;
1168 /* The table takes Mbps
1169 * traffic *= 8 => bits
1170 * traffic *= (10^6 / delta) => bps
1171 * traffic /= 10^6 => Mbps
1172 *
1173 * Combining, traffic *= (8 / delta)
1174 */
1175
1176 traffic <<= 3;
1177 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
1178
1179 for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
1180 if (traffic < mod_table[index].rx_rate)
1181 break;
1182 range_start = (pkt_size_counter->small_pkt_bytes_cnt >
1183 pkt_size_counter->large_pkt_bytes_cnt << 1) ?
1184 rx_coal->small_pkt_range_start :
1185 rx_coal->large_pkt_range_start;
1186 timer = range_start + ((rx_coal->range_end - range_start) *
1187 mod_table[index].range_percent / 100);
1188 /* Damping */
1189 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
1190
1191 pkt_size_counter->large_pkt_bytes_cnt = 0;
1192 pkt_size_counter->small_pkt_bytes_cnt = 0;
1193}
1194
1137static int enic_poll_msix(struct napi_struct *napi, int budget) 1195static int enic_poll_msix(struct napi_struct *napi, int budget)
1138{ 1196{
1139 struct net_device *netdev = napi->dev; 1197 struct net_device *netdev = napi->dev;
@@ -1171,6 +1229,13 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1171 1229
1172 if (err) 1230 if (err)
1173 work_done = work_to_do; 1231 work_done = work_to_do;
1232 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1233 /* Call the function which refreshes
1234 * the intr coalescing timer value based on
1235 * the traffic. This is supported only in
1236 * the case of MSI-x mode
1237 */
1238 enic_calc_int_moderation(enic, &enic->rq[rq]);
1174 1239
1175 if (work_done < work_to_do) { 1240 if (work_done < work_to_do) {
1176 1241
@@ -1179,6 +1244,8 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1179 */ 1244 */
1180 1245
1181 napi_complete(napi); 1246 napi_complete(napi);
1247 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1248 enic_set_int_moderation(enic, &enic->rq[rq]);
1182 vnic_intr_unmask(&enic->intr[intr]); 1249 vnic_intr_unmask(&enic->intr[intr]);
1183 } 1250 }
1184 1251
@@ -1314,6 +1381,42 @@ static void enic_synchronize_irqs(struct enic *enic)
1314 } 1381 }
1315} 1382}
1316 1383
1384static void enic_set_rx_coal_setting(struct enic *enic)
1385{
1386 unsigned int speed;
1387 int index = -1;
1388 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1389
1390 /* If intr mode is not MSIX, do not do adaptive coalescing */
1391 if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
1392 netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
1393 return;
1394 }
1395
1396 /* 1. Read the link speed from fw
1397 * 2. Pick the default range for the speed
1398 * 3. Update it in enic->rx_coalesce_setting
1399 */
1400 speed = vnic_dev_port_speed(enic->vdev);
1401 if (ENIC_LINK_SPEED_10G < speed)
1402 index = ENIC_LINK_40G_INDEX;
1403 else if (ENIC_LINK_SPEED_4G < speed)
1404 index = ENIC_LINK_10G_INDEX;
1405 else
1406 index = ENIC_LINK_4G_INDEX;
1407
1408 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
1409 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
1410 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
1411
1412 /* Start with the value provided by UCSM */
1413 for (index = 0; index < enic->rq_count; index++)
1414 enic->cq[index].cur_rx_coal_timeval =
1415 enic->config.intr_timer_usec;
1416
1417 rx_coal->use_adaptive_rx_coalesce = 1;
1418}
1419
1317static int enic_dev_notify_set(struct enic *enic) 1420static int enic_dev_notify_set(struct enic *enic)
1318{ 1421{
1319 int err; 1422 int err;
@@ -2231,6 +2334,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2231 enic->notify_timer.function = enic_notify_timer; 2334 enic->notify_timer.function = enic_notify_timer;
2232 enic->notify_timer.data = (unsigned long)enic; 2335 enic->notify_timer.data = (unsigned long)enic;
2233 2336
2337 enic_set_rx_coal_setting(enic);
2234 INIT_WORK(&enic->reset, enic_reset); 2338 INIT_WORK(&enic->reset, enic_reset);
2235 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); 2339 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
2236 2340
@@ -2250,6 +2354,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2250 } 2354 }
2251 2355
2252 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; 2356 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2357 /* rx coalesce time already got initialized. This gets used
2358 * if adaptive coal is turned off
2359 */
2253 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; 2360 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2254 2361
2255 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 2362 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index 579315cbe803..4e6aa65857f7 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -50,6 +50,11 @@ struct vnic_cq_ctrl {
50 u32 pad10; 50 u32 pad10;
51}; 51};
52 52
53struct vnic_rx_bytes_counter {
54 unsigned int small_pkt_bytes_cnt;
55 unsigned int large_pkt_bytes_cnt;
56};
57
53struct vnic_cq { 58struct vnic_cq {
54 unsigned int index; 59 unsigned int index;
55 struct vnic_dev *vdev; 60 struct vnic_dev *vdev;
@@ -58,6 +63,10 @@ struct vnic_cq {
58 unsigned int to_clean; 63 unsigned int to_clean;
59 unsigned int last_color; 64 unsigned int last_color;
60 unsigned int interrupt_offset; 65 unsigned int interrupt_offset;
66 struct vnic_rx_bytes_counter pkt_size_counter;
67 unsigned int cur_rx_coal_timeval;
68 unsigned int tobe_rx_coal_timeval;
69 ktime_t prev_ts;
61}; 70};
62 71
63static inline unsigned int vnic_cq_service(struct vnic_cq *cq, 72static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 69dd92598b7e..e86a45cb9e68 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -657,7 +657,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
657 return err; 657 return err;
658} 658}
659 659
660int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) 660int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
661{ 661{
662 u64 a0 = 0, a1 = 0; 662 u64 a0 = 0, a1 = 0;
663 int wait = 1000; 663 int wait = 1000;
@@ -674,7 +674,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
674 return err; 674 return err;
675} 675}
676 676
677int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) 677int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
678{ 678{
679 u64 a0 = 0, a1 = 0; 679 u64 a0 = 0, a1 = 0;
680 int wait = 1000; 680 int wait = 1000;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index e670029862a1..1f3b301f8225 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -95,8 +95,8 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
95int vnic_dev_hang_notify(struct vnic_dev *vdev); 95int vnic_dev_hang_notify(struct vnic_dev *vdev);
96int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, 96int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
97 int broadcast, int promisc, int allmulti); 97 int broadcast, int promisc, int allmulti);
98int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); 98int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr);
99int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); 99int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr);
100int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); 100int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
101int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); 101int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
102int vnic_dev_notify_unset(struct vnic_dev *vdev); 102int vnic_dev_notify_unset(struct vnic_dev *vdev);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8c4b93be333b..13723c96d1a2 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -109,6 +109,7 @@ typedef struct board_info {
109 u8 imr_all; 109 u8 imr_all;
110 110
111 unsigned int flags; 111 unsigned int flags;
112 unsigned int in_timeout:1;
112 unsigned int in_suspend:1; 113 unsigned int in_suspend:1;
113 unsigned int wake_supported:1; 114 unsigned int wake_supported:1;
114 115
@@ -187,13 +188,13 @@ dm9000_reset(board_info_t *db)
187 * The essential point is that we have to do a double reset, and the 188 * The essential point is that we have to do a double reset, and the
188 * instruction is to set LBK into MAC internal loopback mode. 189 * instruction is to set LBK into MAC internal loopback mode.
189 */ 190 */
190 iow(db, DM9000_NCR, 0x03); 191 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
191 udelay(100); /* Application note says at least 20 us */ 192 udelay(100); /* Application note says at least 20 us */
192 if (ior(db, DM9000_NCR) & 1) 193 if (ior(db, DM9000_NCR) & 1)
193 dev_err(db->dev, "dm9000 did not respond to first reset\n"); 194 dev_err(db->dev, "dm9000 did not respond to first reset\n");
194 195
195 iow(db, DM9000_NCR, 0); 196 iow(db, DM9000_NCR, 0);
196 iow(db, DM9000_NCR, 0x03); 197 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
197 udelay(100); 198 udelay(100);
198 if (ior(db, DM9000_NCR) & 1) 199 if (ior(db, DM9000_NCR) & 1)
199 dev_err(db->dev, "dm9000 did not respond to second reset\n"); 200 dev_err(db->dev, "dm9000 did not respond to second reset\n");
@@ -273,7 +274,7 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
273 */ 274 */
274static void dm9000_msleep(board_info_t *db, unsigned int ms) 275static void dm9000_msleep(board_info_t *db, unsigned int ms)
275{ 276{
276 if (db->in_suspend) 277 if (db->in_suspend || db->in_timeout)
277 mdelay(ms); 278 mdelay(ms);
278 else 279 else
279 msleep(ms); 280 msleep(ms);
@@ -334,7 +335,8 @@ dm9000_phy_write(struct net_device *dev,
334 unsigned long reg_save; 335 unsigned long reg_save;
335 336
336 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); 337 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
337 mutex_lock(&db->addr_lock); 338 if (!db->in_timeout)
339 mutex_lock(&db->addr_lock);
338 340
339 spin_lock_irqsave(&db->lock, flags); 341 spin_lock_irqsave(&db->lock, flags);
340 342
@@ -365,7 +367,8 @@ dm9000_phy_write(struct net_device *dev,
365 writeb(reg_save, db->io_addr); 367 writeb(reg_save, db->io_addr);
366 368
367 spin_unlock_irqrestore(&db->lock, flags); 369 spin_unlock_irqrestore(&db->lock, flags);
368 mutex_unlock(&db->addr_lock); 370 if (!db->in_timeout)
371 mutex_unlock(&db->addr_lock);
369} 372}
370 373
371/* dm9000_set_io 374/* dm9000_set_io
@@ -882,6 +885,18 @@ dm9000_hash_table(struct net_device *dev)
882 spin_unlock_irqrestore(&db->lock, flags); 885 spin_unlock_irqrestore(&db->lock, flags);
883} 886}
884 887
888static void
889dm9000_mask_interrupts(board_info_t *db)
890{
891 iow(db, DM9000_IMR, IMR_PAR);
892}
893
894static void
895dm9000_unmask_interrupts(board_info_t *db)
896{
897 iow(db, DM9000_IMR, db->imr_all);
898}
899
885/* 900/*
886 * Initialize dm9000 board 901 * Initialize dm9000 board
887 */ 902 */
@@ -894,6 +909,9 @@ dm9000_init_dm9000(struct net_device *dev)
894 909
895 dm9000_dbg(db, 1, "entering %s\n", __func__); 910 dm9000_dbg(db, 1, "entering %s\n", __func__);
896 911
912 dm9000_reset(db);
913 dm9000_mask_interrupts(db);
914
897 /* I/O mode */ 915 /* I/O mode */
898 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ 916 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
899 917
@@ -941,9 +959,6 @@ dm9000_init_dm9000(struct net_device *dev)
941 959
942 db->imr_all = imr; 960 db->imr_all = imr;
943 961
944 /* Enable TX/RX interrupt mask */
945 iow(db, DM9000_IMR, imr);
946
947 /* Init Driver variable */ 962 /* Init Driver variable */
948 db->tx_pkt_cnt = 0; 963 db->tx_pkt_cnt = 0;
949 db->queue_pkt_len = 0; 964 db->queue_pkt_len = 0;
@@ -959,17 +974,19 @@ static void dm9000_timeout(struct net_device *dev)
959 974
960 /* Save previous register address */ 975 /* Save previous register address */
961 spin_lock_irqsave(&db->lock, flags); 976 spin_lock_irqsave(&db->lock, flags);
977 db->in_timeout = 1;
962 reg_save = readb(db->io_addr); 978 reg_save = readb(db->io_addr);
963 979
964 netif_stop_queue(dev); 980 netif_stop_queue(dev);
965 dm9000_reset(db);
966 dm9000_init_dm9000(dev); 981 dm9000_init_dm9000(dev);
982 dm9000_unmask_interrupts(db);
967 /* We can accept TX packets again */ 983 /* We can accept TX packets again */
968 dev->trans_start = jiffies; /* prevent tx timeout */ 984 dev->trans_start = jiffies; /* prevent tx timeout */
969 netif_wake_queue(dev); 985 netif_wake_queue(dev);
970 986
971 /* Restore previous register address */ 987 /* Restore previous register address */
972 writeb(reg_save, db->io_addr); 988 writeb(reg_save, db->io_addr);
989 db->in_timeout = 0;
973 spin_unlock_irqrestore(&db->lock, flags); 990 spin_unlock_irqrestore(&db->lock, flags);
974} 991}
975 992
@@ -1093,7 +1110,6 @@ dm9000_rx(struct net_device *dev)
1093 if (rxbyte & DM9000_PKT_ERR) { 1110 if (rxbyte & DM9000_PKT_ERR) {
1094 dev_warn(db->dev, "status check fail: %d\n", rxbyte); 1111 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
1095 iow(db, DM9000_RCR, 0x00); /* Stop Device */ 1112 iow(db, DM9000_RCR, 0x00); /* Stop Device */
1096 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
1097 return; 1113 return;
1098 } 1114 }
1099 1115
@@ -1193,9 +1209,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1193 /* Save previous register address */ 1209 /* Save previous register address */
1194 reg_save = readb(db->io_addr); 1210 reg_save = readb(db->io_addr);
1195 1211
1196 /* Disable all interrupts */ 1212 dm9000_mask_interrupts(db);
1197 iow(db, DM9000_IMR, IMR_PAR);
1198
1199 /* Got DM9000 interrupt status */ 1213 /* Got DM9000 interrupt status */
1200 int_status = ior(db, DM9000_ISR); /* Got ISR */ 1214 int_status = ior(db, DM9000_ISR); /* Got ISR */
1201 iow(db, DM9000_ISR, int_status); /* Clear ISR status */ 1215 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
@@ -1218,9 +1232,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1218 } 1232 }
1219 } 1233 }
1220 1234
1221 /* Re-enable interrupt mask */ 1235 dm9000_unmask_interrupts(db);
1222 iow(db, DM9000_IMR, db->imr_all);
1223
1224 /* Restore previous register address */ 1236 /* Restore previous register address */
1225 writeb(reg_save, db->io_addr); 1237 writeb(reg_save, db->io_addr);
1226 1238
@@ -1292,6 +1304,9 @@ dm9000_open(struct net_device *dev)
1292 * may work, and tell the user that this is a problem */ 1304 * may work, and tell the user that this is a problem */
1293 1305
1294 if (irqflags == IRQF_TRIGGER_NONE) 1306 if (irqflags == IRQF_TRIGGER_NONE)
1307 irqflags = irq_get_trigger_type(dev->irq);
1308
1309 if (irqflags == IRQF_TRIGGER_NONE)
1295 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1310 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1296 1311
1297 irqflags |= IRQF_SHARED; 1312 irqflags |= IRQF_SHARED;
@@ -1301,11 +1316,14 @@ dm9000_open(struct net_device *dev)
1301 mdelay(1); /* delay needs by DM9000B */ 1316 mdelay(1); /* delay needs by DM9000B */
1302 1317
1303 /* Initialize DM9000 board */ 1318 /* Initialize DM9000 board */
1304 dm9000_reset(db);
1305 dm9000_init_dm9000(dev); 1319 dm9000_init_dm9000(dev);
1306 1320
1307 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) 1321 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1308 return -EAGAIN; 1322 return -EAGAIN;
1323 /* Now that we have an interrupt handler hooked up we can unmask
1324 * our interrupts
1325 */
1326 dm9000_unmask_interrupts(db);
1309 1327
1310 /* Init driver variable */ 1328 /* Init driver variable */
1311 db->dbug_cnt = 0; 1329 db->dbug_cnt = 0;
@@ -1313,7 +1331,8 @@ dm9000_open(struct net_device *dev)
1313 mii_check_media(&db->mii, netif_msg_link(db), 1); 1331 mii_check_media(&db->mii, netif_msg_link(db), 1);
1314 netif_start_queue(dev); 1332 netif_start_queue(dev);
1315 1333
1316 dm9000_schedule_poll(db); 1334 /* Poll initial link status */
1335 schedule_delayed_work(&db->phy_poll, 1);
1317 1336
1318 return 0; 1337 return 0;
1319} 1338}
@@ -1326,7 +1345,7 @@ dm9000_shutdown(struct net_device *dev)
1326 /* RESET device */ 1345 /* RESET device */
1327 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ 1346 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
1328 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */ 1347 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
1329 iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */ 1348 dm9000_mask_interrupts(db);
1330 iow(db, DM9000_RCR, 0x00); /* Disable RX */ 1349 iow(db, DM9000_RCR, 0x00); /* Disable RX */
1331} 1350}
1332 1351
@@ -1547,12 +1566,7 @@ dm9000_probe(struct platform_device *pdev)
1547 db->flags |= DM9000_PLATF_SIMPLE_PHY; 1566 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1548#endif 1567#endif
1549 1568
1550 /* Fixing bug on dm9000_probe, takeover dm9000_reset(db), 1569 dm9000_reset(db);
1551 * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
1552 * while probe stage.
1553 */
1554
1555 iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
1556 1570
1557 /* try multiple times, DM9000 sometimes gets the read wrong */ 1571 /* try multiple times, DM9000 sometimes gets the read wrong */
1558 for (i = 0; i < 8; i++) { 1572 for (i = 0; i < 8; i++) {
@@ -1695,8 +1709,8 @@ dm9000_drv_resume(struct device *dev)
1695 /* reset if we were not in wake mode to ensure if 1709 /* reset if we were not in wake mode to ensure if
1696 * the device was powered off it is in a known state */ 1710 * the device was powered off it is in a known state */
1697 if (!db->wake_state) { 1711 if (!db->wake_state) {
1698 dm9000_reset(db);
1699 dm9000_init_dm9000(ndev); 1712 dm9000_init_dm9000(ndev);
1713 dm9000_unmask_interrupts(db);
1700 } 1714 }
1701 1715
1702 netif_device_attach(ndev); 1716 netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 1642de78aac8..861660841ce2 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1703,7 +1703,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1703#ifdef CONFIG_TULIP_NAPI 1703#ifdef CONFIG_TULIP_NAPI
1704 netif_napi_add(dev, &tp->napi, tulip_poll, 16); 1704 netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1705#endif 1705#endif
1706 SET_ETHTOOL_OPS(dev, &ops); 1706 dev->ethtool_ops = &ops;
1707 1707
1708 if (register_netdev(dev)) 1708 if (register_netdev(dev))
1709 goto err_out_free_ring; 1709 goto err_out_free_ring;
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index aa801a6af7b9..80afec335a11 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -962,8 +962,8 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
962 } 962 }
963 if(db->link_failed) 963 if(db->link_failed)
964 { 964 {
965 ethtool_cmd_speed_set(ecmd, -1); 965 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
966 ecmd->duplex = -1; 966 ecmd->duplex = DUPLEX_UNKNOWN;
967 } 967 }
968 968
969 if (db->media_mode & ULI526X_AUTO) 969 if (db->media_mode & ULI526X_AUTO)
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 4fb756d219f7..1274b6fdac8a 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -227,7 +227,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
227 } 227 }
228 dev->netdev_ops = &netdev_ops; 228 dev->netdev_ops = &netdev_ops;
229 dev->watchdog_timeo = TX_TIMEOUT; 229 dev->watchdog_timeo = TX_TIMEOUT;
230 SET_ETHTOOL_OPS(dev, &ethtool_ops); 230 dev->ethtool_ops = &ethtool_ops;
231#if 0 231#if 0
232 dev->features = NETIF_F_IP_CSUM; 232 dev->features = NETIF_F_IP_CSUM;
233#endif 233#endif
@@ -1185,8 +1185,8 @@ static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1185 ethtool_cmd_speed_set(cmd, np->speed); 1185 ethtool_cmd_speed_set(cmd, np->speed);
1186 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1186 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1187 } else { 1187 } else {
1188 ethtool_cmd_speed_set(cmd, -1); 1188 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
1189 cmd->duplex = -1; 1189 cmd->duplex = DUPLEX_UNKNOWN;
1190 } 1190 }
1191 if ( np->an_enable) 1191 if ( np->an_enable)
1192 cmd->autoneg = AUTONEG_ENABLE; 1192 cmd->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index d9e5ca0d48c1..433c1e185442 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -577,7 +577,7 @@ static int sundance_probe1(struct pci_dev *pdev,
577 577
578 /* The chip-specific entries in the device structure. */ 578 /* The chip-specific entries in the device structure. */
579 dev->netdev_ops = &netdev_ops; 579 dev->netdev_ops = &netdev_ops;
580 SET_ETHTOOL_OPS(dev, &ethtool_ops); 580 dev->ethtool_ops = &ethtool_ops;
581 dev->watchdog_timeo = TX_TIMEOUT; 581 dev->watchdog_timeo = TX_TIMEOUT;
582 582
583 pci_set_drvdata(pdev, dev); 583 pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 4884205e56ee..056b44b93477 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -134,17 +134,17 @@ struct ec_bhf_priv {
134 134
135 struct pci_dev *dev; 135 struct pci_dev *dev;
136 136
137 void * __iomem io; 137 void __iomem *io;
138 void * __iomem dma_io; 138 void __iomem *dma_io;
139 139
140 struct hrtimer hrtimer; 140 struct hrtimer hrtimer;
141 141
142 int tx_dma_chan; 142 int tx_dma_chan;
143 int rx_dma_chan; 143 int rx_dma_chan;
144 void * __iomem ec_io; 144 void __iomem *ec_io;
145 void * __iomem fifo_io; 145 void __iomem *fifo_io;
146 void * __iomem mii_io; 146 void __iomem *mii_io;
147 void * __iomem mac_io; 147 void __iomem *mac_io;
148 148
149 struct bhf_dma rx_buf; 149 struct bhf_dma rx_buf;
150 struct rx_desc *rx_descs; 150 struct rx_desc *rx_descs;
@@ -297,7 +297,7 @@ static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
297{ 297{
298 struct device *dev = PRIV_TO_DEV(priv); 298 struct device *dev = PRIV_TO_DEV(priv);
299 unsigned block_count, i; 299 unsigned block_count, i;
300 void * __iomem ec_info; 300 void __iomem *ec_info;
301 301
302 dev_dbg(dev, "Info block:\n"); 302 dev_dbg(dev, "Info block:\n");
303 dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io)); 303 dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
@@ -569,8 +569,8 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
569{ 569{
570 struct net_device *net_dev; 570 struct net_device *net_dev;
571 struct ec_bhf_priv *priv; 571 struct ec_bhf_priv *priv;
572 void * __iomem dma_io; 572 void __iomem *dma_io;
573 void * __iomem io; 573 void __iomem *io;
574 int err = 0; 574 int err = 0;
575 575
576 err = pci_enable_device(dev); 576 err = pci_enable_device(dev);
@@ -615,7 +615,7 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
615 } 615 }
616 616
617 net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv)); 617 net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
618 if (net_dev == 0) { 618 if (net_dev == NULL) {
619 err = -ENOMEM; 619 err = -ENOMEM;
620 goto err_unmap_dma_io; 620 goto err_unmap_dma_io;
621 } 621 }
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 97db5a7179df..2e7c5553955e 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -120,6 +120,9 @@ static inline char *nic_name(struct pci_dev *pdev)
120#define MAX_VFS 30 /* Max VFs supported by BE3 FW */ 120#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
121#define FW_VER_LEN 32 121#define FW_VER_LEN 32
122 122
123#define RSS_INDIR_TABLE_LEN 128
124#define RSS_HASH_KEY_LEN 40
125
123struct be_dma_mem { 126struct be_dma_mem {
124 void *va; 127 void *va;
125 dma_addr_t dma; 128 dma_addr_t dma;
@@ -371,6 +374,7 @@ enum vf_state {
371#define BE_FLAGS_LINK_STATUS_INIT 1 374#define BE_FLAGS_LINK_STATUS_INIT 1
372#define BE_FLAGS_WORKER_SCHEDULED (1 << 3) 375#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
373#define BE_FLAGS_VLAN_PROMISC (1 << 4) 376#define BE_FLAGS_VLAN_PROMISC (1 << 4)
377#define BE_FLAGS_MCAST_PROMISC (1 << 5)
374#define BE_FLAGS_NAPI_ENABLED (1 << 9) 378#define BE_FLAGS_NAPI_ENABLED (1 << 9)
375#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) 379#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
376#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12) 380#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
@@ -409,6 +413,13 @@ struct be_resources {
409 u32 if_cap_flags; 413 u32 if_cap_flags;
410}; 414};
411 415
416struct rss_info {
417 u64 rss_flags;
418 u8 rsstable[RSS_INDIR_TABLE_LEN];
419 u8 rss_queue[RSS_INDIR_TABLE_LEN];
420 u8 rss_hkey[RSS_HASH_KEY_LEN];
421};
422
412struct be_adapter { 423struct be_adapter {
413 struct pci_dev *pdev; 424 struct pci_dev *pdev;
414 struct net_device *netdev; 425 struct net_device *netdev;
@@ -445,7 +456,7 @@ struct be_adapter {
445 struct be_drv_stats drv_stats; 456 struct be_drv_stats drv_stats;
446 struct be_aic_obj aic_obj[MAX_EVT_QS]; 457 struct be_aic_obj aic_obj[MAX_EVT_QS];
447 u16 vlans_added; 458 u16 vlans_added;
448 u8 vlan_tag[VLAN_N_VID]; 459 unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
449 u8 vlan_prio_bmap; /* Available Priority BitMap */ 460 u8 vlan_prio_bmap; /* Available Priority BitMap */
450 u16 recommended_prio; /* Recommended Priority */ 461 u16 recommended_prio; /* Recommended Priority */
451 struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */ 462 struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
@@ -507,7 +518,7 @@ struct be_adapter {
507 u32 msg_enable; 518 u32 msg_enable;
508 int be_get_temp_freq; 519 int be_get_temp_freq;
509 u8 pf_number; 520 u8 pf_number;
510 u64 rss_flags; 521 struct rss_info rss_info;
511}; 522};
512 523
513#define be_physfn(adapter) (!adapter->virtfn) 524#define be_physfn(adapter) (!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index d1ec15af0d24..f4ea3490f446 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -52,8 +52,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
52 } 52 }
53}; 53};
54 54
55static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, 55static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
56 u8 subsystem)
57{ 56{
58 int i; 57 int i;
59 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map); 58 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
@@ -120,21 +119,28 @@ static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
120 return (void *)addr; 119 return (void *)addr;
121} 120}
122 121
123static int be_mcc_compl_process(struct be_adapter *adapter, 122static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
124 struct be_mcc_compl *compl)
125{ 123{
126 u16 compl_status, extd_status; 124 if (base_status == MCC_STATUS_NOT_SUPPORTED ||
127 struct be_cmd_resp_hdr *resp_hdr; 125 base_status == MCC_STATUS_ILLEGAL_REQUEST ||
128 u8 opcode = 0, subsystem = 0; 126 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
129 127 (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
130 /* Just swap the status to host endian; mcc tag is opaquely copied 128 (base_status == MCC_STATUS_ILLEGAL_FIELD ||
131 * from mcc_wrb */ 129 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
132 be_dws_le_to_cpu(compl, 4); 130 return true;
133 131 else
134 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & 132 return false;
135 CQE_STATUS_COMPL_MASK; 133}
136 134
137 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1); 135/* Place holder for all the async MCC cmds wherein the caller is not in a busy
136 * loop (has not issued be_mcc_notify_wait())
137 */
138static void be_async_cmd_process(struct be_adapter *adapter,
139 struct be_mcc_compl *compl,
140 struct be_cmd_resp_hdr *resp_hdr)
141{
142 enum mcc_base_status base_status = base_status(compl->status);
143 u8 opcode = 0, subsystem = 0;
138 144
139 if (resp_hdr) { 145 if (resp_hdr) {
140 opcode = resp_hdr->opcode; 146 opcode = resp_hdr->opcode;
@@ -144,61 +150,86 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
144 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && 150 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
145 subsystem == CMD_SUBSYSTEM_LOWLEVEL) { 151 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
146 complete(&adapter->et_cmd_compl); 152 complete(&adapter->et_cmd_compl);
147 return 0; 153 return;
148 } 154 }
149 155
150 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || 156 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
151 (opcode == OPCODE_COMMON_WRITE_OBJECT)) && 157 opcode == OPCODE_COMMON_WRITE_OBJECT) &&
152 (subsystem == CMD_SUBSYSTEM_COMMON)) { 158 subsystem == CMD_SUBSYSTEM_COMMON) {
153 adapter->flash_status = compl_status; 159 adapter->flash_status = compl->status;
154 complete(&adapter->et_cmd_compl); 160 complete(&adapter->et_cmd_compl);
161 return;
155 } 162 }
156 163
157 if (compl_status == MCC_STATUS_SUCCESS) { 164 if ((opcode == OPCODE_ETH_GET_STATISTICS ||
158 if (((opcode == OPCODE_ETH_GET_STATISTICS) || 165 opcode == OPCODE_ETH_GET_PPORT_STATS) &&
159 (opcode == OPCODE_ETH_GET_PPORT_STATS)) && 166 subsystem == CMD_SUBSYSTEM_ETH &&
160 (subsystem == CMD_SUBSYSTEM_ETH)) { 167 base_status == MCC_STATUS_SUCCESS) {
161 be_parse_stats(adapter); 168 be_parse_stats(adapter);
162 adapter->stats_cmd_sent = false; 169 adapter->stats_cmd_sent = false;
163 } 170 return;
164 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && 171 }
165 subsystem == CMD_SUBSYSTEM_COMMON) { 172
173 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
174 subsystem == CMD_SUBSYSTEM_COMMON) {
175 if (base_status == MCC_STATUS_SUCCESS) {
166 struct be_cmd_resp_get_cntl_addnl_attribs *resp = 176 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
167 (void *)resp_hdr; 177 (void *)resp_hdr;
168 adapter->drv_stats.be_on_die_temperature = 178 adapter->drv_stats.be_on_die_temperature =
169 resp->on_die_temperature; 179 resp->on_die_temperature;
170 } 180 } else {
171 } else {
172 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
173 adapter->be_get_temp_freq = 0; 181 adapter->be_get_temp_freq = 0;
182 }
183 return;
184 }
185}
186
187static int be_mcc_compl_process(struct be_adapter *adapter,
188 struct be_mcc_compl *compl)
189{
190 enum mcc_base_status base_status;
191 enum mcc_addl_status addl_status;
192 struct be_cmd_resp_hdr *resp_hdr;
193 u8 opcode = 0, subsystem = 0;
194
195 /* Just swap the status to host endian; mcc tag is opaquely copied
196 * from mcc_wrb */
197 be_dws_le_to_cpu(compl, 4);
198
199 base_status = base_status(compl->status);
200 addl_status = addl_status(compl->status);
201
202 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
203 if (resp_hdr) {
204 opcode = resp_hdr->opcode;
205 subsystem = resp_hdr->subsystem;
206 }
207
208 be_async_cmd_process(adapter, compl, resp_hdr);
174 209
175 if (compl_status == MCC_STATUS_NOT_SUPPORTED || 210 if (base_status != MCC_STATUS_SUCCESS &&
176 compl_status == MCC_STATUS_ILLEGAL_REQUEST) 211 !be_skip_err_log(opcode, base_status, addl_status)) {
177 goto done;
178 212
179 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 213 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
180 dev_warn(&adapter->pdev->dev, 214 dev_warn(&adapter->pdev->dev,
181 "VF is not privileged to issue opcode %d-%d\n", 215 "VF is not privileged to issue opcode %d-%d\n",
182 opcode, subsystem); 216 opcode, subsystem);
183 } else { 217 } else {
184 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
185 CQE_STATUS_EXTD_MASK;
186 dev_err(&adapter->pdev->dev, 218 dev_err(&adapter->pdev->dev,
187 "opcode %d-%d failed:status %d-%d\n", 219 "opcode %d-%d failed:status %d-%d\n",
188 opcode, subsystem, compl_status, extd_status); 220 opcode, subsystem, base_status, addl_status);
189
190 if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
191 return extd_status;
192 } 221 }
193 } 222 }
194done: 223 return compl->status;
195 return compl_status;
196} 224}
197 225
198/* Link state evt is a string of bytes; no need for endian swapping */ 226/* Link state evt is a string of bytes; no need for endian swapping */
199static void be_async_link_state_process(struct be_adapter *adapter, 227static void be_async_link_state_process(struct be_adapter *adapter,
200 struct be_async_event_link_state *evt) 228 struct be_mcc_compl *compl)
201{ 229{
230 struct be_async_event_link_state *evt =
231 (struct be_async_event_link_state *)compl;
232
202 /* When link status changes, link speed must be re-queried from FW */ 233 /* When link status changes, link speed must be re-queried from FW */
203 adapter->phy.link_speed = -1; 234 adapter->phy.link_speed = -1;
204 235
@@ -221,8 +252,11 @@ static void be_async_link_state_process(struct be_adapter *adapter,
221 252
222/* Grp5 CoS Priority evt */ 253/* Grp5 CoS Priority evt */
223static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 254static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
224 struct be_async_event_grp5_cos_priority *evt) 255 struct be_mcc_compl *compl)
225{ 256{
257 struct be_async_event_grp5_cos_priority *evt =
258 (struct be_async_event_grp5_cos_priority *)compl;
259
226 if (evt->valid) { 260 if (evt->valid) {
227 adapter->vlan_prio_bmap = evt->available_priority_bmap; 261 adapter->vlan_prio_bmap = evt->available_priority_bmap;
228 adapter->recommended_prio &= ~VLAN_PRIO_MASK; 262 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
@@ -233,8 +267,11 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
233 267
234/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */ 268/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
235static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 269static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
236 struct be_async_event_grp5_qos_link_speed *evt) 270 struct be_mcc_compl *compl)
237{ 271{
272 struct be_async_event_grp5_qos_link_speed *evt =
273 (struct be_async_event_grp5_qos_link_speed *)compl;
274
238 if (adapter->phy.link_speed >= 0 && 275 if (adapter->phy.link_speed >= 0 &&
239 evt->physical_port == adapter->port_num) 276 evt->physical_port == adapter->port_num)
240 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10; 277 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
@@ -242,8 +279,11 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
242 279
243/*Grp5 PVID evt*/ 280/*Grp5 PVID evt*/
244static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 281static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
245 struct be_async_event_grp5_pvid_state *evt) 282 struct be_mcc_compl *compl)
246{ 283{
284 struct be_async_event_grp5_pvid_state *evt =
285 (struct be_async_event_grp5_pvid_state *)compl;
286
247 if (evt->enabled) { 287 if (evt->enabled) {
248 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; 288 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
249 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid); 289 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
@@ -253,26 +293,21 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
253} 293}
254 294
255static void be_async_grp5_evt_process(struct be_adapter *adapter, 295static void be_async_grp5_evt_process(struct be_adapter *adapter,
256 u32 trailer, struct be_mcc_compl *evt) 296 struct be_mcc_compl *compl)
257{ 297{
258 u8 event_type = 0; 298 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
259 299 ASYNC_EVENT_TYPE_MASK;
260 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
261 ASYNC_TRAILER_EVENT_TYPE_MASK;
262 300
263 switch (event_type) { 301 switch (event_type) {
264 case ASYNC_EVENT_COS_PRIORITY: 302 case ASYNC_EVENT_COS_PRIORITY:
265 be_async_grp5_cos_priority_process(adapter, 303 be_async_grp5_cos_priority_process(adapter, compl);
266 (struct be_async_event_grp5_cos_priority *)evt); 304 break;
267 break;
268 case ASYNC_EVENT_QOS_SPEED: 305 case ASYNC_EVENT_QOS_SPEED:
269 be_async_grp5_qos_speed_process(adapter, 306 be_async_grp5_qos_speed_process(adapter, compl);
270 (struct be_async_event_grp5_qos_link_speed *)evt); 307 break;
271 break;
272 case ASYNC_EVENT_PVID_STATE: 308 case ASYNC_EVENT_PVID_STATE:
273 be_async_grp5_pvid_state_process(adapter, 309 be_async_grp5_pvid_state_process(adapter, compl);
274 (struct be_async_event_grp5_pvid_state *)evt); 310 break;
275 break;
276 default: 311 default:
277 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n", 312 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
278 event_type); 313 event_type);
@@ -281,13 +316,13 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
281} 316}
282 317
283static void be_async_dbg_evt_process(struct be_adapter *adapter, 318static void be_async_dbg_evt_process(struct be_adapter *adapter,
284 u32 trailer, struct be_mcc_compl *cmp) 319 struct be_mcc_compl *cmp)
285{ 320{
286 u8 event_type = 0; 321 u8 event_type = 0;
287 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp; 322 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
288 323
289 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & 324 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
290 ASYNC_TRAILER_EVENT_TYPE_MASK; 325 ASYNC_EVENT_TYPE_MASK;
291 326
292 switch (event_type) { 327 switch (event_type) {
293 case ASYNC_DEBUG_EVENT_TYPE_QNQ: 328 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
@@ -302,25 +337,33 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
302 } 337 }
303} 338}
304 339
305static inline bool is_link_state_evt(u32 trailer) 340static inline bool is_link_state_evt(u32 flags)
306{ 341{
307 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 342 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
308 ASYNC_TRAILER_EVENT_CODE_MASK) == 343 ASYNC_EVENT_CODE_LINK_STATE;
309 ASYNC_EVENT_CODE_LINK_STATE;
310} 344}
311 345
312static inline bool is_grp5_evt(u32 trailer) 346static inline bool is_grp5_evt(u32 flags)
313{ 347{
314 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 348 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
315 ASYNC_TRAILER_EVENT_CODE_MASK) == 349 ASYNC_EVENT_CODE_GRP_5;
316 ASYNC_EVENT_CODE_GRP_5);
317} 350}
318 351
319static inline bool is_dbg_evt(u32 trailer) 352static inline bool is_dbg_evt(u32 flags)
320{ 353{
321 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 354 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
322 ASYNC_TRAILER_EVENT_CODE_MASK) == 355 ASYNC_EVENT_CODE_QNQ;
323 ASYNC_EVENT_CODE_QNQ); 356}
357
358static void be_mcc_event_process(struct be_adapter *adapter,
359 struct be_mcc_compl *compl)
360{
361 if (is_link_state_evt(compl->flags))
362 be_async_link_state_process(adapter, compl);
363 else if (is_grp5_evt(compl->flags))
364 be_async_grp5_evt_process(adapter, compl);
365 else if (is_dbg_evt(compl->flags))
366 be_async_dbg_evt_process(adapter, compl);
324} 367}
325 368
326static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 369static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
@@ -362,21 +405,13 @@ int be_process_mcc(struct be_adapter *adapter)
362 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 405 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
363 406
364 spin_lock(&adapter->mcc_cq_lock); 407 spin_lock(&adapter->mcc_cq_lock);
408
365 while ((compl = be_mcc_compl_get(adapter))) { 409 while ((compl = be_mcc_compl_get(adapter))) {
366 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 410 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
367 /* Interpret flags as an async trailer */ 411 be_mcc_event_process(adapter, compl);
368 if (is_link_state_evt(compl->flags))
369 be_async_link_state_process(adapter,
370 (struct be_async_event_link_state *) compl);
371 else if (is_grp5_evt(compl->flags))
372 be_async_grp5_evt_process(adapter,
373 compl->flags, compl);
374 else if (is_dbg_evt(compl->flags))
375 be_async_dbg_evt_process(adapter,
376 compl->flags, compl);
377 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 412 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
378 status = be_mcc_compl_process(adapter, compl); 413 status = be_mcc_compl_process(adapter, compl);
379 atomic_dec(&mcc_obj->q.used); 414 atomic_dec(&mcc_obj->q.used);
380 } 415 }
381 be_mcc_compl_use(compl); 416 be_mcc_compl_use(compl);
382 num++; 417 num++;
@@ -436,7 +471,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
436 if (status == -EIO) 471 if (status == -EIO)
437 goto out; 472 goto out;
438 473
439 status = resp->status; 474 status = (resp->base_status |
475 ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
476 CQE_ADDL_STATUS_SHIFT));
440out: 477out:
441 return status; 478 return status;
442} 479}
@@ -560,10 +597,8 @@ static bool lancer_provisioning_error(struct be_adapter *adapter)
560 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; 597 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
561 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 598 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
562 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 599 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
563 sliport_err1 = ioread32(adapter->db + 600 sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
564 SLIPORT_ERROR1_OFFSET); 601 sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
565 sliport_err2 = ioread32(adapter->db +
566 SLIPORT_ERROR2_OFFSET);
567 602
568 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 && 603 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
569 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2) 604 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
@@ -630,8 +665,7 @@ int be_fw_wait_ready(struct be_adapter *adapter)
630 if (stage == POST_STAGE_ARMFW_RDY) 665 if (stage == POST_STAGE_ARMFW_RDY)
631 return 0; 666 return 0;
632 667
633 dev_info(dev, "Waiting for POST, %ds elapsed\n", 668 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
634 timeout);
635 if (msleep_interruptible(2000)) { 669 if (msleep_interruptible(2000)) {
636 dev_err(dev, "Waiting for POST aborted\n"); 670 dev_err(dev, "Waiting for POST aborted\n");
637 return -EINTR; 671 return -EINTR;
@@ -649,8 +683,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
649 return &wrb->payload.sgl[0]; 683 return &wrb->payload.sgl[0];
650} 684}
651 685
652static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, 686static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
653 unsigned long addr)
654{ 687{
655 wrb->tag0 = addr & 0xFFFFFFFF; 688 wrb->tag0 = addr & 0xFFFFFFFF;
656 wrb->tag1 = upper_32_bits(addr); 689 wrb->tag1 = upper_32_bits(addr);
@@ -659,8 +692,9 @@ static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
659/* Don't touch the hdr after it's prepared */ 692/* Don't touch the hdr after it's prepared */
660/* mem will be NULL for embedded commands */ 693/* mem will be NULL for embedded commands */
661static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 694static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
662 u8 subsystem, u8 opcode, int cmd_len, 695 u8 subsystem, u8 opcode, int cmd_len,
663 struct be_mcc_wrb *wrb, struct be_dma_mem *mem) 696 struct be_mcc_wrb *wrb,
697 struct be_dma_mem *mem)
664{ 698{
665 struct be_sge *sge; 699 struct be_sge *sge;
666 700
@@ -683,7 +717,7 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
683} 717}
684 718
685static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 719static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
686 struct be_dma_mem *mem) 720 struct be_dma_mem *mem)
687{ 721{
688 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); 722 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
689 u64 dma = (u64)mem->dma; 723 u64 dma = (u64)mem->dma;
@@ -868,7 +902,8 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
868 req = embedded_payload(wrb); 902 req = embedded_payload(wrb);
869 903
870 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 904 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
871 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL); 905 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
906 NULL);
872 907
873 /* Support for EQ_CREATEv2 available only SH-R onwards */ 908 /* Support for EQ_CREATEv2 available only SH-R onwards */
874 if (!(BEx_chip(adapter) || lancer_chip(adapter))) 909 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
@@ -917,7 +952,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
917 req = embedded_payload(wrb); 952 req = embedded_payload(wrb);
918 953
919 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 954 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
920 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL); 955 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
956 NULL);
921 req->type = MAC_ADDRESS_TYPE_NETWORK; 957 req->type = MAC_ADDRESS_TYPE_NETWORK;
922 if (permanent) { 958 if (permanent) {
923 req->permanent = 1; 959 req->permanent = 1;
@@ -940,7 +976,7 @@ err:
940 976
941/* Uses synchronous MCCQ */ 977/* Uses synchronous MCCQ */
942int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 978int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
943 u32 if_id, u32 *pmac_id, u32 domain) 979 u32 if_id, u32 *pmac_id, u32 domain)
944{ 980{
945 struct be_mcc_wrb *wrb; 981 struct be_mcc_wrb *wrb;
946 struct be_cmd_req_pmac_add *req; 982 struct be_cmd_req_pmac_add *req;
@@ -956,7 +992,8 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
956 req = embedded_payload(wrb); 992 req = embedded_payload(wrb);
957 993
958 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 994 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
959 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL); 995 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
996 NULL);
960 997
961 req->hdr.domain = domain; 998 req->hdr.domain = domain;
962 req->if_id = cpu_to_le32(if_id); 999 req->if_id = cpu_to_le32(if_id);
@@ -1012,7 +1049,7 @@ err:
1012 1049
1013/* Uses Mbox */ 1050/* Uses Mbox */
1014int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, 1051int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1015 struct be_queue_info *eq, bool no_delay, int coalesce_wm) 1052 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1016{ 1053{
1017 struct be_mcc_wrb *wrb; 1054 struct be_mcc_wrb *wrb;
1018 struct be_cmd_req_cq_create *req; 1055 struct be_cmd_req_cq_create *req;
@@ -1028,17 +1065,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1028 ctxt = &req->context; 1065 ctxt = &req->context;
1029 1066
1030 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1067 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1031 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL); 1068 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1069 NULL);
1032 1070
1033 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1071 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1034 1072
1035 if (BEx_chip(adapter)) { 1073 if (BEx_chip(adapter)) {
1036 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, 1074 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1037 coalesce_wm); 1075 coalesce_wm);
1038 AMAP_SET_BITS(struct amap_cq_context_be, nodelay, 1076 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1039 ctxt, no_delay); 1077 ctxt, no_delay);
1040 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, 1078 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1041 __ilog2_u32(cq->len/256)); 1079 __ilog2_u32(cq->len / 256));
1042 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); 1080 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1043 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); 1081 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1044 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); 1082 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
@@ -1053,14 +1091,12 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1053 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, 1091 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1054 ctxt, coalesce_wm); 1092 ctxt, coalesce_wm);
1055 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, 1093 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1056 no_delay); 1094 no_delay);
1057 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, 1095 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1058 __ilog2_u32(cq->len/256)); 1096 __ilog2_u32(cq->len / 256));
1059 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); 1097 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1060 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, 1098 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1061 ctxt, 1); 1099 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1062 AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
1063 ctxt, eq->id);
1064 } 1100 }
1065 1101
1066 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1102 be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1088,8 +1124,8 @@ static u32 be_encoded_q_len(int q_len)
1088} 1124}
1089 1125
1090static int be_cmd_mccq_ext_create(struct be_adapter *adapter, 1126static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1091 struct be_queue_info *mccq, 1127 struct be_queue_info *mccq,
1092 struct be_queue_info *cq) 1128 struct be_queue_info *cq)
1093{ 1129{
1094 struct be_mcc_wrb *wrb; 1130 struct be_mcc_wrb *wrb;
1095 struct be_cmd_req_mcc_ext_create *req; 1131 struct be_cmd_req_mcc_ext_create *req;
@@ -1105,13 +1141,14 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1105 ctxt = &req->context; 1141 ctxt = &req->context;
1106 1142
1107 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1143 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1108 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL); 1144 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1145 NULL);
1109 1146
1110 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1147 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1111 if (BEx_chip(adapter)) { 1148 if (BEx_chip(adapter)) {
1112 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1149 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1113 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1150 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1114 be_encoded_q_len(mccq->len)); 1151 be_encoded_q_len(mccq->len));
1115 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1152 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1116 } else { 1153 } else {
1117 req->hdr.version = 1; 1154 req->hdr.version = 1;
@@ -1145,8 +1182,8 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1145} 1182}
1146 1183
1147static int be_cmd_mccq_org_create(struct be_adapter *adapter, 1184static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1148 struct be_queue_info *mccq, 1185 struct be_queue_info *mccq,
1149 struct be_queue_info *cq) 1186 struct be_queue_info *cq)
1150{ 1187{
1151 struct be_mcc_wrb *wrb; 1188 struct be_mcc_wrb *wrb;
1152 struct be_cmd_req_mcc_create *req; 1189 struct be_cmd_req_mcc_create *req;
@@ -1162,13 +1199,14 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1162 ctxt = &req->context; 1199 ctxt = &req->context;
1163 1200
1164 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1201 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1165 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL); 1202 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1203 NULL);
1166 1204
1167 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1205 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1168 1206
1169 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1207 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1170 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1208 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1171 be_encoded_q_len(mccq->len)); 1209 be_encoded_q_len(mccq->len));
1172 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1210 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1173 1211
1174 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1212 be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1187,8 +1225,7 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1187} 1225}
1188 1226
1189int be_cmd_mccq_create(struct be_adapter *adapter, 1227int be_cmd_mccq_create(struct be_adapter *adapter,
1190 struct be_queue_info *mccq, 1228 struct be_queue_info *mccq, struct be_queue_info *cq)
1191 struct be_queue_info *cq)
1192{ 1229{
1193 int status; 1230 int status;
1194 1231
@@ -1213,7 +1250,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1213 1250
1214 req = embedded_payload(&wrb); 1251 req = embedded_payload(&wrb);
1215 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1252 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1216 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); 1253 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1217 1254
1218 if (lancer_chip(adapter)) { 1255 if (lancer_chip(adapter)) {
1219 req->hdr.version = 1; 1256 req->hdr.version = 1;
@@ -1250,8 +1287,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1250 1287
1251/* Uses MCC */ 1288/* Uses MCC */
1252int be_cmd_rxq_create(struct be_adapter *adapter, 1289int be_cmd_rxq_create(struct be_adapter *adapter,
1253 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 1290 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1254 u32 if_id, u32 rss, u8 *rss_id) 1291 u32 if_id, u32 rss, u8 *rss_id)
1255{ 1292{
1256 struct be_mcc_wrb *wrb; 1293 struct be_mcc_wrb *wrb;
1257 struct be_cmd_req_eth_rx_create *req; 1294 struct be_cmd_req_eth_rx_create *req;
@@ -1268,7 +1305,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
1268 req = embedded_payload(wrb); 1305 req = embedded_payload(wrb);
1269 1306
1270 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1307 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1271 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); 1308 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1272 1309
1273 req->cq_id = cpu_to_le16(cq_id); 1310 req->cq_id = cpu_to_le16(cq_id);
1274 req->frag_size = fls(frag_size) - 1; 1311 req->frag_size = fls(frag_size) - 1;
@@ -1295,7 +1332,7 @@ err:
1295 * Uses Mbox 1332 * Uses Mbox
1296 */ 1333 */
1297int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1334int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1298 int queue_type) 1335 int queue_type)
1299{ 1336{
1300 struct be_mcc_wrb *wrb; 1337 struct be_mcc_wrb *wrb;
1301 struct be_cmd_req_q_destroy *req; 1338 struct be_cmd_req_q_destroy *req;
@@ -1334,7 +1371,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1334 } 1371 }
1335 1372
1336 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, 1373 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1337 NULL); 1374 NULL);
1338 req->id = cpu_to_le16(q->id); 1375 req->id = cpu_to_le16(q->id);
1339 1376
1340 status = be_mbox_notify_wait(adapter); 1377 status = be_mbox_notify_wait(adapter);
@@ -1361,7 +1398,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1361 req = embedded_payload(wrb); 1398 req = embedded_payload(wrb);
1362 1399
1363 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1400 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1364 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); 1401 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1365 req->id = cpu_to_le16(q->id); 1402 req->id = cpu_to_le16(q->id);
1366 1403
1367 status = be_mcc_notify_wait(adapter); 1404 status = be_mcc_notify_wait(adapter);
@@ -1384,7 +1421,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1384 1421
1385 req = embedded_payload(&wrb); 1422 req = embedded_payload(&wrb);
1386 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1423 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1387 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL); 1424 OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1425 sizeof(*req), &wrb, NULL);
1388 req->hdr.domain = domain; 1426 req->hdr.domain = domain;
1389 req->capability_flags = cpu_to_le32(cap_flags); 1427 req->capability_flags = cpu_to_le32(cap_flags);
1390 req->enable_flags = cpu_to_le32(en_flags); 1428 req->enable_flags = cpu_to_le32(en_flags);
@@ -1422,7 +1460,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1422 req = embedded_payload(wrb); 1460 req = embedded_payload(wrb);
1423 1461
1424 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1462 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1425 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL); 1463 OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1464 sizeof(*req), wrb, NULL);
1426 req->hdr.domain = domain; 1465 req->hdr.domain = domain;
1427 req->interface_id = cpu_to_le32(interface_id); 1466 req->interface_id = cpu_to_le32(interface_id);
1428 1467
@@ -1452,7 +1491,8 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1452 hdr = nonemb_cmd->va; 1491 hdr = nonemb_cmd->va;
1453 1492
1454 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1493 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1455 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd); 1494 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1495 nonemb_cmd);
1456 1496
1457 /* version 1 of the cmd is not supported only by BE2 */ 1497 /* version 1 of the cmd is not supported only by BE2 */
1458 if (BE2_chip(adapter)) 1498 if (BE2_chip(adapter))
@@ -1472,7 +1512,7 @@ err:
1472 1512
1473/* Lancer Stats */ 1513/* Lancer Stats */
1474int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1514int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1475 struct be_dma_mem *nonemb_cmd) 1515 struct be_dma_mem *nonemb_cmd)
1476{ 1516{
1477 1517
1478 struct be_mcc_wrb *wrb; 1518 struct be_mcc_wrb *wrb;
@@ -1493,8 +1533,8 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1493 req = nonemb_cmd->va; 1533 req = nonemb_cmd->va;
1494 1534
1495 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1535 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1496 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb, 1536 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1497 nonemb_cmd); 1537 wrb, nonemb_cmd);
1498 1538
1499 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); 1539 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1500 req->cmd_params.params.reset_stats = 0; 1540 req->cmd_params.params.reset_stats = 0;
@@ -1553,7 +1593,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1553 req = embedded_payload(wrb); 1593 req = embedded_payload(wrb);
1554 1594
1555 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1595 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1556 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL); 1596 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1597 sizeof(*req), wrb, NULL);
1557 1598
1558 /* version 1 of the cmd is not supported only by BE2 */ 1599 /* version 1 of the cmd is not supported only by BE2 */
1559 if (!BE2_chip(adapter)) 1600 if (!BE2_chip(adapter))
@@ -1598,8 +1639,8 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
1598 req = embedded_payload(wrb); 1639 req = embedded_payload(wrb);
1599 1640
1600 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1641 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1601 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), 1642 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1602 wrb, NULL); 1643 sizeof(*req), wrb, NULL);
1603 1644
1604 be_mcc_notify(adapter); 1645 be_mcc_notify(adapter);
1605 1646
@@ -1625,7 +1666,8 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1625 req = embedded_payload(wrb); 1666 req = embedded_payload(wrb);
1626 1667
1627 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1668 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1628 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL); 1669 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1670 NULL);
1629 req->fat_operation = cpu_to_le32(QUERY_FAT); 1671 req->fat_operation = cpu_to_le32(QUERY_FAT);
1630 status = be_mcc_notify_wait(adapter); 1672 status = be_mcc_notify_wait(adapter);
1631 if (!status) { 1673 if (!status) {
@@ -1655,8 +1697,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1655 1697
1656 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1698 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1657 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, 1699 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1658 get_fat_cmd.size, 1700 get_fat_cmd.size,
1659 &get_fat_cmd.dma); 1701 &get_fat_cmd.dma);
1660 if (!get_fat_cmd.va) { 1702 if (!get_fat_cmd.va) {
1661 status = -ENOMEM; 1703 status = -ENOMEM;
1662 dev_err(&adapter->pdev->dev, 1704 dev_err(&adapter->pdev->dev,
@@ -1679,8 +1721,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1679 1721
1680 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; 1722 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1681 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1723 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1682 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb, 1724 OPCODE_COMMON_MANAGE_FAT, payload_len,
1683 &get_fat_cmd); 1725 wrb, &get_fat_cmd);
1684 1726
1685 req->fat_operation = cpu_to_le32(RETRIEVE_FAT); 1727 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1686 req->read_log_offset = cpu_to_le32(log_offset); 1728 req->read_log_offset = cpu_to_le32(log_offset);
@@ -1691,8 +1733,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1691 if (!status) { 1733 if (!status) {
1692 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; 1734 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1693 memcpy(buf + offset, 1735 memcpy(buf + offset,
1694 resp->data_buffer, 1736 resp->data_buffer,
1695 le32_to_cpu(resp->read_log_length)); 1737 le32_to_cpu(resp->read_log_length));
1696 } else { 1738 } else {
1697 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); 1739 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1698 goto err; 1740 goto err;
@@ -1702,14 +1744,13 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1702 } 1744 }
1703err: 1745err:
1704 pci_free_consistent(adapter->pdev, get_fat_cmd.size, 1746 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1705 get_fat_cmd.va, 1747 get_fat_cmd.va, get_fat_cmd.dma);
1706 get_fat_cmd.dma);
1707 spin_unlock_bh(&adapter->mcc_lock); 1748 spin_unlock_bh(&adapter->mcc_lock);
1708} 1749}
1709 1750
1710/* Uses synchronous mcc */ 1751/* Uses synchronous mcc */
1711int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, 1752int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1712 char *fw_on_flash) 1753 char *fw_on_flash)
1713{ 1754{
1714 struct be_mcc_wrb *wrb; 1755 struct be_mcc_wrb *wrb;
1715 struct be_cmd_req_get_fw_version *req; 1756 struct be_cmd_req_get_fw_version *req;
@@ -1726,7 +1767,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1726 req = embedded_payload(wrb); 1767 req = embedded_payload(wrb);
1727 1768
1728 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1769 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1729 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL); 1770 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1771 NULL);
1730 status = be_mcc_notify_wait(adapter); 1772 status = be_mcc_notify_wait(adapter);
1731 if (!status) { 1773 if (!status) {
1732 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); 1774 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
@@ -1759,7 +1801,8 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1759 req = embedded_payload(wrb); 1801 req = embedded_payload(wrb);
1760 1802
1761 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1803 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1762 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL); 1804 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1805 NULL);
1763 1806
1764 req->num_eq = cpu_to_le32(num); 1807 req->num_eq = cpu_to_le32(num);
1765 for (i = 0; i < num; i++) { 1808 for (i = 0; i < num; i++) {
@@ -1777,7 +1820,7 @@ err:
1777 1820
1778/* Uses sycnhronous mcc */ 1821/* Uses sycnhronous mcc */
1779int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1822int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1780 u32 num, bool promiscuous) 1823 u32 num)
1781{ 1824{
1782 struct be_mcc_wrb *wrb; 1825 struct be_mcc_wrb *wrb;
1783 struct be_cmd_req_vlan_config *req; 1826 struct be_cmd_req_vlan_config *req;
@@ -1793,19 +1836,16 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1793 req = embedded_payload(wrb); 1836 req = embedded_payload(wrb);
1794 1837
1795 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1838 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1796 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL); 1839 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1840 wrb, NULL);
1797 1841
1798 req->interface_id = if_id; 1842 req->interface_id = if_id;
1799 req->promiscuous = promiscuous;
1800 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1843 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1801 req->num_vlan = num; 1844 req->num_vlan = num;
1802 if (!promiscuous) { 1845 memcpy(req->normal_vlan, vtag_array,
1803 memcpy(req->normal_vlan, vtag_array, 1846 req->num_vlan * sizeof(vtag_array[0]));
1804 req->num_vlan * sizeof(vtag_array[0]));
1805 }
1806 1847
1807 status = be_mcc_notify_wait(adapter); 1848 status = be_mcc_notify_wait(adapter);
1808
1809err: 1849err:
1810 spin_unlock_bh(&adapter->mcc_lock); 1850 spin_unlock_bh(&adapter->mcc_lock);
1811 return status; 1851 return status;
@@ -1827,18 +1867,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1827 } 1867 }
1828 memset(req, 0, sizeof(*req)); 1868 memset(req, 0, sizeof(*req));
1829 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1869 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1830 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), 1870 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1831 wrb, mem); 1871 wrb, mem);
1832 1872
1833 req->if_id = cpu_to_le32(adapter->if_handle); 1873 req->if_id = cpu_to_le32(adapter->if_handle);
1834 if (flags & IFF_PROMISC) { 1874 if (flags & IFF_PROMISC) {
1835 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1875 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1836 BE_IF_FLAGS_VLAN_PROMISCUOUS | 1876 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1837 BE_IF_FLAGS_MCAST_PROMISCUOUS); 1877 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1838 if (value == ON) 1878 if (value == ON)
1839 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1879 req->if_flags =
1840 BE_IF_FLAGS_VLAN_PROMISCUOUS | 1880 cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1841 BE_IF_FLAGS_MCAST_PROMISCUOUS); 1881 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1882 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1842 } else if (flags & IFF_ALLMULTI) { 1883 } else if (flags & IFF_ALLMULTI) {
1843 req->if_flags_mask = req->if_flags = 1884 req->if_flags_mask = req->if_flags =
1844 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1885 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
@@ -1867,7 +1908,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1867 } 1908 }
1868 1909
1869 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) != 1910 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
1870 req->if_flags_mask) { 1911 req->if_flags_mask) {
1871 dev_warn(&adapter->pdev->dev, 1912 dev_warn(&adapter->pdev->dev,
1872 "Cannot set rx filter flags 0x%x\n", 1913 "Cannot set rx filter flags 0x%x\n",
1873 req->if_flags_mask); 1914 req->if_flags_mask);
@@ -1905,7 +1946,8 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1905 req = embedded_payload(wrb); 1946 req = embedded_payload(wrb);
1906 1947
1907 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1948 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1908 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL); 1949 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
1950 wrb, NULL);
1909 1951
1910 req->tx_flow_control = cpu_to_le16((u16)tx_fc); 1952 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1911 req->rx_flow_control = cpu_to_le16((u16)rx_fc); 1953 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
@@ -1938,7 +1980,8 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1938 req = embedded_payload(wrb); 1980 req = embedded_payload(wrb);
1939 1981
1940 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1982 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1941 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL); 1983 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
1984 wrb, NULL);
1942 1985
1943 status = be_mcc_notify_wait(adapter); 1986 status = be_mcc_notify_wait(adapter);
1944 if (!status) { 1987 if (!status) {
@@ -1968,7 +2011,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1968 req = embedded_payload(wrb); 2011 req = embedded_payload(wrb);
1969 2012
1970 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2013 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1971 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL); 2014 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2015 sizeof(*req), wrb, NULL);
1972 2016
1973 status = be_mbox_notify_wait(adapter); 2017 status = be_mbox_notify_wait(adapter);
1974 if (!status) { 2018 if (!status) {
@@ -2011,7 +2055,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
2011 req = embedded_payload(wrb); 2055 req = embedded_payload(wrb);
2012 2056
2013 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, 2057 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2014 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL); 2058 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2059 NULL);
2015 2060
2016 status = be_mbox_notify_wait(adapter); 2061 status = be_mbox_notify_wait(adapter);
2017 2062
@@ -2020,47 +2065,47 @@ int be_cmd_reset_function(struct be_adapter *adapter)
2020} 2065}
2021 2066
2022int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 2067int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2023 u32 rss_hash_opts, u16 table_size) 2068 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2024{ 2069{
2025 struct be_mcc_wrb *wrb; 2070 struct be_mcc_wrb *wrb;
2026 struct be_cmd_req_rss_config *req; 2071 struct be_cmd_req_rss_config *req;
2027 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
2028 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
2029 0x3ea83c02, 0x4a110304};
2030 int status; 2072 int status;
2031 2073
2032 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) 2074 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2033 return 0; 2075 return 0;
2034 2076
2035 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2077 spin_lock_bh(&adapter->mcc_lock);
2036 return -1;
2037 2078
2038 wrb = wrb_from_mbox(adapter); 2079 wrb = wrb_from_mccq(adapter);
2080 if (!wrb) {
2081 status = -EBUSY;
2082 goto err;
2083 }
2039 req = embedded_payload(wrb); 2084 req = embedded_payload(wrb);
2040 2085
2041 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2086 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2042 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 2087 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2043 2088
2044 req->if_id = cpu_to_le32(adapter->if_handle); 2089 req->if_id = cpu_to_le32(adapter->if_handle);
2045 req->enable_rss = cpu_to_le16(rss_hash_opts); 2090 req->enable_rss = cpu_to_le16(rss_hash_opts);
2046 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); 2091 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2047 2092
2048 if (lancer_chip(adapter) || skyhawk_chip(adapter)) 2093 if (!BEx_chip(adapter))
2049 req->hdr.version = 1; 2094 req->hdr.version = 1;
2050 2095
2051 memcpy(req->cpu_table, rsstable, table_size); 2096 memcpy(req->cpu_table, rsstable, table_size);
2052 memcpy(req->hash, myhash, sizeof(myhash)); 2097 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2053 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 2098 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2054 2099
2055 status = be_mbox_notify_wait(adapter); 2100 status = be_mcc_notify_wait(adapter);
2056 2101err:
2057 mutex_unlock(&adapter->mbox_lock); 2102 spin_unlock_bh(&adapter->mcc_lock);
2058 return status; 2103 return status;
2059} 2104}
2060 2105
2061/* Uses sync mcc */ 2106/* Uses sync mcc */
2062int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 2107int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2063 u8 bcn, u8 sts, u8 state) 2108 u8 bcn, u8 sts, u8 state)
2064{ 2109{
2065 struct be_mcc_wrb *wrb; 2110 struct be_mcc_wrb *wrb;
2066 struct be_cmd_req_enable_disable_beacon *req; 2111 struct be_cmd_req_enable_disable_beacon *req;
@@ -2076,7 +2121,8 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2076 req = embedded_payload(wrb); 2121 req = embedded_payload(wrb);
2077 2122
2078 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2123 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2079 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL); 2124 OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2125 sizeof(*req), wrb, NULL);
2080 2126
2081 req->port_num = port_num; 2127 req->port_num = port_num;
2082 req->beacon_state = state; 2128 req->beacon_state = state;
@@ -2107,7 +2153,8 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2107 req = embedded_payload(wrb); 2153 req = embedded_payload(wrb);
2108 2154
2109 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2155 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2110 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL); 2156 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2157 wrb, NULL);
2111 2158
2112 req->port_num = port_num; 2159 req->port_num = port_num;
2113 2160
@@ -2146,20 +2193,20 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2146 req = embedded_payload(wrb); 2193 req = embedded_payload(wrb);
2147 2194
2148 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2195 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2149 OPCODE_COMMON_WRITE_OBJECT, 2196 OPCODE_COMMON_WRITE_OBJECT,
2150 sizeof(struct lancer_cmd_req_write_object), wrb, 2197 sizeof(struct lancer_cmd_req_write_object), wrb,
2151 NULL); 2198 NULL);
2152 2199
2153 ctxt = &req->context; 2200 ctxt = &req->context;
2154 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2201 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2155 write_length, ctxt, data_size); 2202 write_length, ctxt, data_size);
2156 2203
2157 if (data_size == 0) 2204 if (data_size == 0)
2158 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2205 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2159 eof, ctxt, 1); 2206 eof, ctxt, 1);
2160 else 2207 else
2161 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2208 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2162 eof, ctxt, 0); 2209 eof, ctxt, 0);
2163 2210
2164 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 2211 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2165 req->write_offset = cpu_to_le32(data_offset); 2212 req->write_offset = cpu_to_le32(data_offset);
@@ -2167,8 +2214,8 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2167 req->descriptor_count = cpu_to_le32(1); 2214 req->descriptor_count = cpu_to_le32(1);
2168 req->buf_len = cpu_to_le32(data_size); 2215 req->buf_len = cpu_to_le32(data_size);
2169 req->addr_low = cpu_to_le32((cmd->dma + 2216 req->addr_low = cpu_to_le32((cmd->dma +
2170 sizeof(struct lancer_cmd_req_write_object)) 2217 sizeof(struct lancer_cmd_req_write_object))
2171 & 0xFFFFFFFF); 2218 & 0xFFFFFFFF);
2172 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + 2219 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2173 sizeof(struct lancer_cmd_req_write_object))); 2220 sizeof(struct lancer_cmd_req_write_object)));
2174 2221
@@ -2197,8 +2244,8 @@ err_unlock:
2197} 2244}
2198 2245
2199int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2246int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2200 u32 data_size, u32 data_offset, const char *obj_name, 2247 u32 data_size, u32 data_offset, const char *obj_name,
2201 u32 *data_read, u32 *eof, u8 *addn_status) 2248 u32 *data_read, u32 *eof, u8 *addn_status)
2202{ 2249{
2203 struct be_mcc_wrb *wrb; 2250 struct be_mcc_wrb *wrb;
2204 struct lancer_cmd_req_read_object *req; 2251 struct lancer_cmd_req_read_object *req;
@@ -2216,9 +2263,9 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2216 req = embedded_payload(wrb); 2263 req = embedded_payload(wrb);
2217 2264
2218 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2265 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2219 OPCODE_COMMON_READ_OBJECT, 2266 OPCODE_COMMON_READ_OBJECT,
2220 sizeof(struct lancer_cmd_req_read_object), wrb, 2267 sizeof(struct lancer_cmd_req_read_object), wrb,
2221 NULL); 2268 NULL);
2222 2269
2223 req->desired_read_len = cpu_to_le32(data_size); 2270 req->desired_read_len = cpu_to_le32(data_size);
2224 req->read_offset = cpu_to_le32(data_offset); 2271 req->read_offset = cpu_to_le32(data_offset);
@@ -2244,7 +2291,7 @@ err_unlock:
2244} 2291}
2245 2292
2246int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 2293int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2247 u32 flash_type, u32 flash_opcode, u32 buf_size) 2294 u32 flash_type, u32 flash_opcode, u32 buf_size)
2248{ 2295{
2249 struct be_mcc_wrb *wrb; 2296 struct be_mcc_wrb *wrb;
2250 struct be_cmd_write_flashrom *req; 2297 struct be_cmd_write_flashrom *req;
@@ -2261,7 +2308,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2261 req = cmd->va; 2308 req = cmd->va;
2262 2309
2263 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2310 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2264 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd); 2311 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2312 cmd);
2265 2313
2266 req->params.op_type = cpu_to_le32(flash_type); 2314 req->params.op_type = cpu_to_le32(flash_type);
2267 req->params.op_code = cpu_to_le32(flash_opcode); 2315 req->params.op_code = cpu_to_le32(flash_opcode);
@@ -2284,7 +2332,7 @@ err_unlock:
2284} 2332}
2285 2333
2286int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2334int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2287 int offset) 2335 u16 optype, int offset)
2288{ 2336{
2289 struct be_mcc_wrb *wrb; 2337 struct be_mcc_wrb *wrb;
2290 struct be_cmd_read_flash_crc *req; 2338 struct be_cmd_read_flash_crc *req;
@@ -2303,7 +2351,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2303 OPCODE_COMMON_READ_FLASHROM, sizeof(*req), 2351 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2304 wrb, NULL); 2352 wrb, NULL);
2305 2353
2306 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT); 2354 req->params.op_type = cpu_to_le32(optype);
2307 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 2355 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2308 req->params.offset = cpu_to_le32(offset); 2356 req->params.offset = cpu_to_le32(offset);
2309 req->params.data_buf_size = cpu_to_le32(0x4); 2357 req->params.data_buf_size = cpu_to_le32(0x4);
@@ -2318,7 +2366,7 @@ err:
2318} 2366}
2319 2367
2320int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2368int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2321 struct be_dma_mem *nonemb_cmd) 2369 struct be_dma_mem *nonemb_cmd)
2322{ 2370{
2323 struct be_mcc_wrb *wrb; 2371 struct be_mcc_wrb *wrb;
2324 struct be_cmd_req_acpi_wol_magic_config *req; 2372 struct be_cmd_req_acpi_wol_magic_config *req;
@@ -2334,8 +2382,8 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2334 req = nonemb_cmd->va; 2382 req = nonemb_cmd->va;
2335 2383
2336 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2384 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2337 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb, 2385 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2338 nonemb_cmd); 2386 wrb, nonemb_cmd);
2339 memcpy(req->magic_mac, mac, ETH_ALEN); 2387 memcpy(req->magic_mac, mac, ETH_ALEN);
2340 2388
2341 status = be_mcc_notify_wait(adapter); 2389 status = be_mcc_notify_wait(adapter);
@@ -2363,8 +2411,8 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2363 req = embedded_payload(wrb); 2411 req = embedded_payload(wrb);
2364 2412
2365 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2413 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2366 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb, 2414 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2367 NULL); 2415 wrb, NULL);
2368 2416
2369 req->src_port = port_num; 2417 req->src_port = port_num;
2370 req->dest_port = port_num; 2418 req->dest_port = port_num;
@@ -2378,7 +2426,8 @@ err:
2378} 2426}
2379 2427
2380int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 2428int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2381 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) 2429 u32 loopback_type, u32 pkt_size, u32 num_pkts,
2430 u64 pattern)
2382{ 2431{
2383 struct be_mcc_wrb *wrb; 2432 struct be_mcc_wrb *wrb;
2384 struct be_cmd_req_loopback_test *req; 2433 struct be_cmd_req_loopback_test *req;
@@ -2396,7 +2445,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2396 req = embedded_payload(wrb); 2445 req = embedded_payload(wrb);
2397 2446
2398 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2447 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2399 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); 2448 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2449 NULL);
2400 2450
2401 req->hdr.timeout = cpu_to_le32(15); 2451 req->hdr.timeout = cpu_to_le32(15);
2402 req->pattern = cpu_to_le64(pattern); 2452 req->pattern = cpu_to_le64(pattern);
@@ -2421,7 +2471,7 @@ err:
2421} 2471}
2422 2472
2423int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 2473int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2424 u32 byte_cnt, struct be_dma_mem *cmd) 2474 u32 byte_cnt, struct be_dma_mem *cmd)
2425{ 2475{
2426 struct be_mcc_wrb *wrb; 2476 struct be_mcc_wrb *wrb;
2427 struct be_cmd_req_ddrdma_test *req; 2477 struct be_cmd_req_ddrdma_test *req;
@@ -2437,7 +2487,8 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2437 } 2487 }
2438 req = cmd->va; 2488 req = cmd->va;
2439 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2489 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2440 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd); 2490 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2491 cmd);
2441 2492
2442 req->pattern = cpu_to_le64(pattern); 2493 req->pattern = cpu_to_le64(pattern);
2443 req->byte_count = cpu_to_le32(byte_cnt); 2494 req->byte_count = cpu_to_le32(byte_cnt);
@@ -2465,7 +2516,7 @@ err:
2465} 2516}
2466 2517
2467int be_cmd_get_seeprom_data(struct be_adapter *adapter, 2518int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2468 struct be_dma_mem *nonemb_cmd) 2519 struct be_dma_mem *nonemb_cmd)
2469{ 2520{
2470 struct be_mcc_wrb *wrb; 2521 struct be_mcc_wrb *wrb;
2471 struct be_cmd_req_seeprom_read *req; 2522 struct be_cmd_req_seeprom_read *req;
@@ -2481,8 +2532,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2481 req = nonemb_cmd->va; 2532 req = nonemb_cmd->va;
2482 2533
2483 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2534 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2484 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, 2535 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2485 nonemb_cmd); 2536 nonemb_cmd);
2486 2537
2487 status = be_mcc_notify_wait(adapter); 2538 status = be_mcc_notify_wait(adapter);
2488 2539
@@ -2510,8 +2561,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2510 goto err; 2561 goto err;
2511 } 2562 }
2512 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 2563 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2513 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 2564 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2514 &cmd.dma);
2515 if (!cmd.va) { 2565 if (!cmd.va) {
2516 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2566 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2517 status = -ENOMEM; 2567 status = -ENOMEM;
@@ -2521,8 +2571,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2521 req = cmd.va; 2571 req = cmd.va;
2522 2572
2523 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2573 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2524 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), 2574 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2525 wrb, &cmd); 2575 wrb, &cmd);
2526 2576
2527 status = be_mcc_notify_wait(adapter); 2577 status = be_mcc_notify_wait(adapter);
2528 if (!status) { 2578 if (!status) {
@@ -2544,8 +2594,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2544 BE_SUPPORTED_SPEED_1GBPS; 2594 BE_SUPPORTED_SPEED_1GBPS;
2545 } 2595 }
2546 } 2596 }
2547 pci_free_consistent(adapter->pdev, cmd.size, 2597 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2548 cmd.va, cmd.dma);
2549err: 2598err:
2550 spin_unlock_bh(&adapter->mcc_lock); 2599 spin_unlock_bh(&adapter->mcc_lock);
2551 return status; 2600 return status;
@@ -2568,7 +2617,7 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2568 req = embedded_payload(wrb); 2617 req = embedded_payload(wrb);
2569 2618
2570 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2619 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2571 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); 2620 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2572 2621
2573 req->hdr.domain = domain; 2622 req->hdr.domain = domain;
2574 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); 2623 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
@@ -2597,10 +2646,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2597 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2646 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2598 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2647 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2599 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, 2648 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2600 &attribs_cmd.dma); 2649 &attribs_cmd.dma);
2601 if (!attribs_cmd.va) { 2650 if (!attribs_cmd.va) {
2602 dev_err(&adapter->pdev->dev, 2651 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2603 "Memory allocation failure\n");
2604 status = -ENOMEM; 2652 status = -ENOMEM;
2605 goto err; 2653 goto err;
2606 } 2654 }
@@ -2613,8 +2661,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2613 req = attribs_cmd.va; 2661 req = attribs_cmd.va;
2614 2662
2615 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2663 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2616 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb, 2664 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2617 &attribs_cmd); 2665 wrb, &attribs_cmd);
2618 2666
2619 status = be_mbox_notify_wait(adapter); 2667 status = be_mbox_notify_wait(adapter);
2620 if (!status) { 2668 if (!status) {
@@ -2649,7 +2697,8 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
2649 req = embedded_payload(wrb); 2697 req = embedded_payload(wrb);
2650 2698
2651 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2699 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2652 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL); 2700 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2701 sizeof(*req), wrb, NULL);
2653 2702
2654 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | 2703 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2655 CAPABILITY_BE3_NATIVE_ERX_API); 2704 CAPABILITY_BE3_NATIVE_ERX_API);
@@ -2762,12 +2811,12 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2762 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 2811 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2763 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 2812 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2764 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, 2813 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2765 get_mac_list_cmd.size, 2814 get_mac_list_cmd.size,
2766 &get_mac_list_cmd.dma); 2815 &get_mac_list_cmd.dma);
2767 2816
2768 if (!get_mac_list_cmd.va) { 2817 if (!get_mac_list_cmd.va) {
2769 dev_err(&adapter->pdev->dev, 2818 dev_err(&adapter->pdev->dev,
2770 "Memory allocation failure during GET_MAC_LIST\n"); 2819 "Memory allocation failure during GET_MAC_LIST\n");
2771 return -ENOMEM; 2820 return -ENOMEM;
2772 } 2821 }
2773 2822
@@ -2831,18 +2880,18 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2831 /* If no active mac_id found, return first mac addr */ 2880 /* If no active mac_id found, return first mac addr */
2832 *pmac_id_valid = false; 2881 *pmac_id_valid = false;
2833 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 2882 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2834 ETH_ALEN); 2883 ETH_ALEN);
2835 } 2884 }
2836 2885
2837out: 2886out:
2838 spin_unlock_bh(&adapter->mcc_lock); 2887 spin_unlock_bh(&adapter->mcc_lock);
2839 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, 2888 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2840 get_mac_list_cmd.va, get_mac_list_cmd.dma); 2889 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2841 return status; 2890 return status;
2842} 2891}
2843 2892
2844int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac, 2893int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
2845 u32 if_handle, bool active, u32 domain) 2894 u8 *mac, u32 if_handle, bool active, u32 domain)
2846{ 2895{
2847 2896
2848 if (!active) 2897 if (!active)
@@ -2892,7 +2941,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2892 memset(&cmd, 0, sizeof(struct be_dma_mem)); 2941 memset(&cmd, 0, sizeof(struct be_dma_mem));
2893 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 2942 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2894 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, 2943 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2895 &cmd.dma, GFP_KERNEL); 2944 &cmd.dma, GFP_KERNEL);
2896 if (!cmd.va) 2945 if (!cmd.va)
2897 return -ENOMEM; 2946 return -ENOMEM;
2898 2947
@@ -2906,8 +2955,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2906 2955
2907 req = cmd.va; 2956 req = cmd.va;
2908 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2957 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2909 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), 2958 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2910 wrb, &cmd); 2959 wrb, &cmd);
2911 2960
2912 req->hdr.domain = domain; 2961 req->hdr.domain = domain;
2913 req->mac_count = mac_count; 2962 req->mac_count = mac_count;
@@ -2917,8 +2966,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2917 status = be_mcc_notify_wait(adapter); 2966 status = be_mcc_notify_wait(adapter);
2918 2967
2919err: 2968err:
2920 dma_free_coherent(&adapter->pdev->dev, cmd.size, 2969 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2921 cmd.va, cmd.dma);
2922 spin_unlock_bh(&adapter->mcc_lock); 2970 spin_unlock_bh(&adapter->mcc_lock);
2923 return status; 2971 return status;
2924} 2972}
@@ -2963,7 +3011,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2963 ctxt = &req->context; 3011 ctxt = &req->context;
2964 3012
2965 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3013 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2966 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL); 3014 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3015 NULL);
2967 3016
2968 req->hdr.domain = domain; 3017 req->hdr.domain = domain;
2969 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); 3018 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
@@ -3009,7 +3058,8 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3009 ctxt = &req->context; 3058 ctxt = &req->context;
3010 3059
3011 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3060 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3012 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL); 3061 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3062 NULL);
3013 3063
3014 req->hdr.domain = domain; 3064 req->hdr.domain = domain;
3015 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3065 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
@@ -3027,10 +3077,9 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3027 if (!status) { 3077 if (!status) {
3028 struct be_cmd_resp_get_hsw_config *resp = 3078 struct be_cmd_resp_get_hsw_config *resp =
3029 embedded_payload(wrb); 3079 embedded_payload(wrb);
3030 be_dws_le_to_cpu(&resp->context, 3080 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3031 sizeof(resp->context));
3032 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3081 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3033 pvid, &resp->context); 3082 pvid, &resp->context);
3034 if (pvid) 3083 if (pvid)
3035 *pvid = le16_to_cpu(vid); 3084 *pvid = le16_to_cpu(vid);
3036 if (mode) 3085 if (mode)
@@ -3062,11 +3111,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3062 3111
3063 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3112 memset(&cmd, 0, sizeof(struct be_dma_mem));
3064 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 3113 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3065 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 3114 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3066 &cmd.dma);
3067 if (!cmd.va) { 3115 if (!cmd.va) {
3068 dev_err(&adapter->pdev->dev, 3116 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3069 "Memory allocation failure\n");
3070 status = -ENOMEM; 3117 status = -ENOMEM;
3071 goto err; 3118 goto err;
3072 } 3119 }
@@ -3349,8 +3396,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3349 3396
3350 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3397 memset(&cmd, 0, sizeof(struct be_dma_mem));
3351 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 3398 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3352 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 3399 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3353 &cmd.dma);
3354 if (!cmd.va) { 3400 if (!cmd.va) {
3355 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3401 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3356 status = -ENOMEM; 3402 status = -ENOMEM;
@@ -3396,7 +3442,7 @@ err:
3396 3442
3397/* Uses mbox */ 3443/* Uses mbox */
3398static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter, 3444static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3399 u8 domain, struct be_dma_mem *cmd) 3445 u8 domain, struct be_dma_mem *cmd)
3400{ 3446{
3401 struct be_mcc_wrb *wrb; 3447 struct be_mcc_wrb *wrb;
3402 struct be_cmd_req_get_profile_config *req; 3448 struct be_cmd_req_get_profile_config *req;
@@ -3424,7 +3470,7 @@ static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3424 3470
3425/* Uses sync mcc */ 3471/* Uses sync mcc */
3426static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter, 3472static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
3427 u8 domain, struct be_dma_mem *cmd) 3473 u8 domain, struct be_dma_mem *cmd)
3428{ 3474{
3429 struct be_mcc_wrb *wrb; 3475 struct be_mcc_wrb *wrb;
3430 struct be_cmd_req_get_profile_config *req; 3476 struct be_cmd_req_get_profile_config *req;
@@ -3484,8 +3530,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
3484 resp = cmd.va; 3530 resp = cmd.va;
3485 desc_count = le32_to_cpu(resp->desc_count); 3531 desc_count = le32_to_cpu(resp->desc_count);
3486 3532
3487 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, 3533 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3488 desc_count); 3534 desc_count);
3489 if (pcie) 3535 if (pcie)
3490 res->max_vfs = le16_to_cpu(pcie->num_vfs); 3536 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3491 3537
@@ -3548,33 +3594,47 @@ void be_reset_nic_desc(struct be_nic_res_desc *nic)
3548 nic->cq_count = 0xFFFF; 3594 nic->cq_count = 0xFFFF;
3549 nic->toe_conn_count = 0xFFFF; 3595 nic->toe_conn_count = 0xFFFF;
3550 nic->eq_count = 0xFFFF; 3596 nic->eq_count = 0xFFFF;
3597 nic->iface_count = 0xFFFF;
3551 nic->link_param = 0xFF; 3598 nic->link_param = 0xFF;
3599 nic->channel_id_param = cpu_to_le16(0xF000);
3552 nic->acpi_params = 0xFF; 3600 nic->acpi_params = 0xFF;
3553 nic->wol_param = 0x0F; 3601 nic->wol_param = 0x0F;
3554 nic->bw_min = 0xFFFFFFFF; 3602 nic->tunnel_iface_count = 0xFFFF;
3603 nic->direct_tenant_iface_count = 0xFFFF;
3555 nic->bw_max = 0xFFFFFFFF; 3604 nic->bw_max = 0xFFFFFFFF;
3556} 3605}
3557 3606
3558int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain) 3607int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3608 u8 domain)
3559{ 3609{
3560 if (lancer_chip(adapter)) { 3610 struct be_nic_res_desc nic_desc;
3561 struct be_nic_res_desc nic_desc; 3611 u32 bw_percent;
3612 u16 version = 0;
3613
3614 if (BE3_chip(adapter))
3615 return be_cmd_set_qos(adapter, max_rate / 10, domain);
3562 3616
3563 be_reset_nic_desc(&nic_desc); 3617 be_reset_nic_desc(&nic_desc);
3618 nic_desc.pf_num = adapter->pf_number;
3619 nic_desc.vf_num = domain;
3620 if (lancer_chip(adapter)) {
3564 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; 3621 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3565 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; 3622 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3566 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) | 3623 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3567 (1 << NOSV_SHIFT); 3624 (1 << NOSV_SHIFT);
3568 nic_desc.pf_num = adapter->pf_number; 3625 nic_desc.bw_max = cpu_to_le32(max_rate / 10);
3569 nic_desc.vf_num = domain;
3570 nic_desc.bw_max = cpu_to_le32(bps);
3571
3572 return be_cmd_set_profile_config(adapter, &nic_desc,
3573 RESOURCE_DESC_SIZE_V0,
3574 0, domain);
3575 } else { 3626 } else {
3576 return be_cmd_set_qos(adapter, bps, domain); 3627 version = 1;
3628 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3629 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3630 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3631 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3632 nic_desc.bw_max = cpu_to_le32(bw_percent);
3577 } 3633 }
3634
3635 return be_cmd_set_profile_config(adapter, &nic_desc,
3636 nic_desc.hdr.desc_len,
3637 version, domain);
3578} 3638}
3579 3639
3580int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) 3640int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
@@ -3859,7 +3919,7 @@ err:
3859} 3919}
3860 3920
3861int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 3921int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3862 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 3922 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3863{ 3923{
3864 struct be_adapter *adapter = netdev_priv(netdev_handle); 3924 struct be_adapter *adapter = netdev_priv(netdev_handle);
3865 struct be_mcc_wrb *wrb; 3925 struct be_mcc_wrb *wrb;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index b60e4d53c1c9..3e0a6b243806 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -50,7 +50,7 @@ struct be_mcc_wrb {
50#define CQE_FLAGS_CONSUMED_MASK (1 << 27) 50#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
51 51
52/* Completion Status */ 52/* Completion Status */
53enum { 53enum mcc_base_status {
54 MCC_STATUS_SUCCESS = 0, 54 MCC_STATUS_SUCCESS = 0,
55 MCC_STATUS_FAILED = 1, 55 MCC_STATUS_FAILED = 1,
56 MCC_STATUS_ILLEGAL_REQUEST = 2, 56 MCC_STATUS_ILLEGAL_REQUEST = 2,
@@ -60,12 +60,25 @@ enum {
60 MCC_STATUS_NOT_SUPPORTED = 66 60 MCC_STATUS_NOT_SUPPORTED = 66
61}; 61};
62 62
63#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16 63/* Additional status */
64enum mcc_addl_status {
65 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16,
66 MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d,
67 MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a
68};
69
70#define CQE_BASE_STATUS_MASK 0xFFFF
71#define CQE_BASE_STATUS_SHIFT 0 /* bits 0 - 15 */
72#define CQE_ADDL_STATUS_MASK 0xFF
73#define CQE_ADDL_STATUS_SHIFT 16 /* bits 16 - 31 */
64 74
65#define CQE_STATUS_COMPL_MASK 0xFFFF 75#define base_status(status) \
66#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ 76 ((enum mcc_base_status) \
67#define CQE_STATUS_EXTD_MASK 0xFFFF 77 (status > 0 ? (status & CQE_BASE_STATUS_MASK) : 0))
68#define CQE_STATUS_EXTD_SHIFT 16 /* bits 16 - 31 */ 78#define addl_status(status) \
79 ((enum mcc_addl_status) \
80 (status > 0 ? (status >> CQE_ADDL_STATUS_SHIFT) & \
81 CQE_ADDL_STATUS_MASK : 0))
69 82
70struct be_mcc_compl { 83struct be_mcc_compl {
71 u32 status; /* dword 0 */ 84 u32 status; /* dword 0 */
@@ -74,13 +87,13 @@ struct be_mcc_compl {
74 u32 flags; /* dword 3 */ 87 u32 flags; /* dword 3 */
75}; 88};
76 89
77/* When the async bit of mcc_compl is set, the last 4 bytes of 90/* When the async bit of mcc_compl flags is set, flags
78 * mcc_compl is interpreted as follows: 91 * is interpreted as follows:
79 */ 92 */
80#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */ 93#define ASYNC_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
81#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF 94#define ASYNC_EVENT_CODE_MASK 0xFF
82#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 95#define ASYNC_EVENT_TYPE_SHIFT 16
83#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF 96#define ASYNC_EVENT_TYPE_MASK 0xFF
84#define ASYNC_EVENT_CODE_LINK_STATE 0x1 97#define ASYNC_EVENT_CODE_LINK_STATE 0x1
85#define ASYNC_EVENT_CODE_GRP_5 0x5 98#define ASYNC_EVENT_CODE_GRP_5 0x5
86#define ASYNC_EVENT_QOS_SPEED 0x1 99#define ASYNC_EVENT_QOS_SPEED 0x1
@@ -89,10 +102,6 @@ struct be_mcc_compl {
89#define ASYNC_EVENT_CODE_QNQ 0x6 102#define ASYNC_EVENT_CODE_QNQ 0x6
90#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1 103#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
91 104
92struct be_async_event_trailer {
93 u32 code;
94};
95
96enum { 105enum {
97 LINK_DOWN = 0x0, 106 LINK_DOWN = 0x0,
98 LINK_UP = 0x1 107 LINK_UP = 0x1
@@ -100,7 +109,7 @@ enum {
100#define LINK_STATUS_MASK 0x1 109#define LINK_STATUS_MASK 0x1
101#define LOGICAL_LINK_STATUS_MASK 0x2 110#define LOGICAL_LINK_STATUS_MASK 0x2
102 111
103/* When the event code of an async trailer is link-state, the mcc_compl 112/* When the event code of compl->flags is link-state, the mcc_compl
104 * must be interpreted as follows 113 * must be interpreted as follows
105 */ 114 */
106struct be_async_event_link_state { 115struct be_async_event_link_state {
@@ -110,10 +119,10 @@ struct be_async_event_link_state {
110 u8 port_speed; 119 u8 port_speed;
111 u8 port_fault; 120 u8 port_fault;
112 u8 rsvd0[7]; 121 u8 rsvd0[7];
113 struct be_async_event_trailer trailer; 122 u32 flags;
114} __packed; 123} __packed;
115 124
116/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED 125/* When the event code of compl->flags is GRP-5 and event_type is QOS_SPEED
117 * the mcc_compl must be interpreted as follows 126 * the mcc_compl must be interpreted as follows
118 */ 127 */
119struct be_async_event_grp5_qos_link_speed { 128struct be_async_event_grp5_qos_link_speed {
@@ -121,10 +130,10 @@ struct be_async_event_grp5_qos_link_speed {
121 u8 rsvd[5]; 130 u8 rsvd[5];
122 u16 qos_link_speed; 131 u16 qos_link_speed;
123 u32 event_tag; 132 u32 event_tag;
124 struct be_async_event_trailer trailer; 133 u32 flags;
125} __packed; 134} __packed;
126 135
127/* When the event code of an async trailer is GRP5 and event type is 136/* When the event code of compl->flags is GRP5 and event type is
128 * CoS-Priority, the mcc_compl must be interpreted as follows 137 * CoS-Priority, the mcc_compl must be interpreted as follows
129 */ 138 */
130struct be_async_event_grp5_cos_priority { 139struct be_async_event_grp5_cos_priority {
@@ -134,10 +143,10 @@ struct be_async_event_grp5_cos_priority {
134 u8 valid; 143 u8 valid;
135 u8 rsvd0; 144 u8 rsvd0;
136 u8 event_tag; 145 u8 event_tag;
137 struct be_async_event_trailer trailer; 146 u32 flags;
138} __packed; 147} __packed;
139 148
140/* When the event code of an async trailer is GRP5 and event type is 149/* When the event code of compl->flags is GRP5 and event type is
141 * PVID state, the mcc_compl must be interpreted as follows 150 * PVID state, the mcc_compl must be interpreted as follows
142 */ 151 */
143struct be_async_event_grp5_pvid_state { 152struct be_async_event_grp5_pvid_state {
@@ -146,7 +155,7 @@ struct be_async_event_grp5_pvid_state {
146 u16 tag; 155 u16 tag;
147 u32 event_tag; 156 u32 event_tag;
148 u32 rsvd1; 157 u32 rsvd1;
149 struct be_async_event_trailer trailer; 158 u32 flags;
150} __packed; 159} __packed;
151 160
152/* async event indicating outer VLAN tag in QnQ */ 161/* async event indicating outer VLAN tag in QnQ */
@@ -156,7 +165,7 @@ struct be_async_event_qnq {
156 u16 vlan_tag; 165 u16 vlan_tag;
157 u32 event_tag; 166 u32 event_tag;
158 u8 rsvd1[4]; 167 u8 rsvd1[4];
159 struct be_async_event_trailer trailer; 168 u32 flags;
160} __packed; 169} __packed;
161 170
162struct be_mcc_mailbox { 171struct be_mcc_mailbox {
@@ -258,8 +267,8 @@ struct be_cmd_resp_hdr {
258 u8 opcode; /* dword 0 */ 267 u8 opcode; /* dword 0 */
259 u8 subsystem; /* dword 0 */ 268 u8 subsystem; /* dword 0 */
260 u8 rsvd[2]; /* dword 0 */ 269 u8 rsvd[2]; /* dword 0 */
261 u8 status; /* dword 1 */ 270 u8 base_status; /* dword 1 */
262 u8 add_status; /* dword 1 */ 271 u8 addl_status; /* dword 1 */
263 u8 rsvd1[2]; /* dword 1 */ 272 u8 rsvd1[2]; /* dword 1 */
264 u32 response_length; /* dword 2 */ 273 u32 response_length; /* dword 2 */
265 u32 actual_resp_len; /* dword 3 */ 274 u32 actual_resp_len; /* dword 3 */
@@ -1186,7 +1195,8 @@ struct be_cmd_read_flash_crc {
1186 struct flashrom_params params; 1195 struct flashrom_params params;
1187 u8 crc[4]; 1196 u8 crc[4];
1188 u8 rsvd[4]; 1197 u8 rsvd[4];
1189}; 1198} __packed;
1199
1190/**************** Lancer Firmware Flash ************/ 1200/**************** Lancer Firmware Flash ************/
1191struct amap_lancer_write_obj_context { 1201struct amap_lancer_write_obj_context {
1192 u8 write_length[24]; 1202 u8 write_length[24];
@@ -1891,16 +1901,20 @@ struct be_nic_res_desc {
1891 u16 cq_count; 1901 u16 cq_count;
1892 u16 toe_conn_count; 1902 u16 toe_conn_count;
1893 u16 eq_count; 1903 u16 eq_count;
1894 u32 rsvd5; 1904 u16 vlan_id;
1905 u16 iface_count;
1895 u32 cap_flags; 1906 u32 cap_flags;
1896 u8 link_param; 1907 u8 link_param;
1897 u8 rsvd6[3]; 1908 u8 rsvd6;
1909 u16 channel_id_param;
1898 u32 bw_min; 1910 u32 bw_min;
1899 u32 bw_max; 1911 u32 bw_max;
1900 u8 acpi_params; 1912 u8 acpi_params;
1901 u8 wol_param; 1913 u8 wol_param;
1902 u16 rsvd7; 1914 u16 rsvd7;
1903 u32 rsvd8[7]; 1915 u16 tunnel_iface_count;
1916 u16 direct_tenant_iface_count;
1917 u32 rsvd8[6];
1904} __packed; 1918} __packed;
1905 1919
1906/************ Multi-Channel type ***********/ 1920/************ Multi-Channel type ***********/
@@ -2060,7 +2074,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
2060 char *fw_on_flash); 2074 char *fw_on_flash);
2061int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); 2075int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
2062int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 2076int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
2063 u32 num, bool promiscuous); 2077 u32 num);
2064int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); 2078int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
2065int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); 2079int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
2066int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); 2080int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
@@ -2068,7 +2082,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
2068 u32 *function_mode, u32 *function_caps, u16 *asic_rev); 2082 u32 *function_mode, u32 *function_caps, u16 *asic_rev);
2069int be_cmd_reset_function(struct be_adapter *adapter); 2083int be_cmd_reset_function(struct be_adapter *adapter);
2070int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 2084int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2071 u32 rss_hash_opts, u16 table_size); 2085 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey);
2072int be_process_mcc(struct be_adapter *adapter); 2086int be_process_mcc(struct be_adapter *adapter);
2073int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon, 2087int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
2074 u8 status, u8 state); 2088 u8 status, u8 state);
@@ -2084,7 +2098,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2084 u32 data_size, u32 data_offset, const char *obj_name, 2098 u32 data_size, u32 data_offset, const char *obj_name,
2085 u32 *data_read, u32 *eof, u8 *addn_status); 2099 u32 *data_read, u32 *eof, u8 *addn_status);
2086int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2100int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2087 int offset); 2101 u16 optype, int offset);
2088int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2102int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2089 struct be_dma_mem *nonemb_cmd); 2103 struct be_dma_mem *nonemb_cmd);
2090int be_cmd_fw_init(struct be_adapter *adapter); 2104int be_cmd_fw_init(struct be_adapter *adapter);
@@ -2101,7 +2115,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2101int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 2115int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2102 u8 loopback_type, u8 enable); 2116 u8 loopback_type, u8 enable);
2103int be_cmd_get_phy_info(struct be_adapter *adapter); 2117int be_cmd_get_phy_info(struct be_adapter *adapter);
2104int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain); 2118int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate,
2119 u16 link_speed, u8 domain);
2105void be_detect_error(struct be_adapter *adapter); 2120void be_detect_error(struct be_adapter *adapter);
2106int be_cmd_get_die_temperature(struct be_adapter *adapter); 2121int be_cmd_get_die_temperature(struct be_adapter *adapter);
2107int be_cmd_get_cntl_attributes(struct be_adapter *adapter); 2122int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 15ba96cba65d..e2da4d20dd3d 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -132,6 +132,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {
132 {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */ 132 {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
133 {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */ 133 {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
134 {DRVSTAT_RX_INFO(rx_compl)}, 134 {DRVSTAT_RX_INFO(rx_compl)},
135 {DRVSTAT_RX_INFO(rx_compl_err)},
135 {DRVSTAT_RX_INFO(rx_mcast_pkts)}, 136 {DRVSTAT_RX_INFO(rx_mcast_pkts)},
136 /* Number of page allocation failures while posting receive buffers 137 /* Number of page allocation failures while posting receive buffers
137 * to HW. 138 * to HW.
@@ -181,7 +182,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
181#define BE_NO_LOOPBACK 0xff 182#define BE_NO_LOOPBACK 0xff
182 183
183static void be_get_drvinfo(struct net_device *netdev, 184static void be_get_drvinfo(struct net_device *netdev,
184 struct ethtool_drvinfo *drvinfo) 185 struct ethtool_drvinfo *drvinfo)
185{ 186{
186 struct be_adapter *adapter = netdev_priv(netdev); 187 struct be_adapter *adapter = netdev_priv(netdev);
187 188
@@ -201,8 +202,7 @@ static void be_get_drvinfo(struct net_device *netdev,
201 drvinfo->eedump_len = 0; 202 drvinfo->eedump_len = 0;
202} 203}
203 204
204static u32 205static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
205lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
206{ 206{
207 u32 data_read = 0, eof; 207 u32 data_read = 0, eof;
208 u8 addn_status; 208 u8 addn_status;
@@ -212,14 +212,14 @@ lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
212 memset(&data_len_cmd, 0, sizeof(data_len_cmd)); 212 memset(&data_len_cmd, 0, sizeof(data_len_cmd));
213 /* data_offset and data_size should be 0 to get reg len */ 213 /* data_offset and data_size should be 0 to get reg len */
214 status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, 214 status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
215 file_name, &data_read, &eof, &addn_status); 215 file_name, &data_read, &eof,
216 &addn_status);
216 217
217 return data_read; 218 return data_read;
218} 219}
219 220
220static int 221static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
221lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, 222 u32 buf_len, void *buf)
222 u32 buf_len, void *buf)
223{ 223{
224 struct be_dma_mem read_cmd; 224 struct be_dma_mem read_cmd;
225 u32 read_len = 0, total_read_len = 0, chunk_size; 225 u32 read_len = 0, total_read_len = 0, chunk_size;
@@ -229,11 +229,11 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
229 229
230 read_cmd.size = LANCER_READ_FILE_CHUNK; 230 read_cmd.size = LANCER_READ_FILE_CHUNK;
231 read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, 231 read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
232 &read_cmd.dma); 232 &read_cmd.dma);
233 233
234 if (!read_cmd.va) { 234 if (!read_cmd.va) {
235 dev_err(&adapter->pdev->dev, 235 dev_err(&adapter->pdev->dev,
236 "Memory allocation failure while reading dump\n"); 236 "Memory allocation failure while reading dump\n");
237 return -ENOMEM; 237 return -ENOMEM;
238 } 238 }
239 239
@@ -242,8 +242,8 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
242 LANCER_READ_FILE_CHUNK); 242 LANCER_READ_FILE_CHUNK);
243 chunk_size = ALIGN(chunk_size, 4); 243 chunk_size = ALIGN(chunk_size, 4);
244 status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, 244 status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
245 total_read_len, file_name, &read_len, 245 total_read_len, file_name,
246 &eof, &addn_status); 246 &read_len, &eof, &addn_status);
247 if (!status) { 247 if (!status) {
248 memcpy(buf + total_read_len, read_cmd.va, read_len); 248 memcpy(buf + total_read_len, read_cmd.va, read_len);
249 total_read_len += read_len; 249 total_read_len += read_len;
@@ -254,13 +254,12 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
254 } 254 }
255 } 255 }
256 pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, 256 pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
257 read_cmd.dma); 257 read_cmd.dma);
258 258
259 return status; 259 return status;
260} 260}
261 261
262static int 262static int be_get_reg_len(struct net_device *netdev)
263be_get_reg_len(struct net_device *netdev)
264{ 263{
265 struct be_adapter *adapter = netdev_priv(netdev); 264 struct be_adapter *adapter = netdev_priv(netdev);
266 u32 log_size = 0; 265 u32 log_size = 0;
@@ -271,7 +270,7 @@ be_get_reg_len(struct net_device *netdev)
271 if (be_physfn(adapter)) { 270 if (be_physfn(adapter)) {
272 if (lancer_chip(adapter)) 271 if (lancer_chip(adapter))
273 log_size = lancer_cmd_get_file_len(adapter, 272 log_size = lancer_cmd_get_file_len(adapter,
274 LANCER_FW_DUMP_FILE); 273 LANCER_FW_DUMP_FILE);
275 else 274 else
276 be_cmd_get_reg_len(adapter, &log_size); 275 be_cmd_get_reg_len(adapter, &log_size);
277 } 276 }
@@ -287,7 +286,7 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
287 memset(buf, 0, regs->len); 286 memset(buf, 0, regs->len);
288 if (lancer_chip(adapter)) 287 if (lancer_chip(adapter))
289 lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, 288 lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
290 regs->len, buf); 289 regs->len, buf);
291 else 290 else
292 be_cmd_get_regs(adapter, regs->len, buf); 291 be_cmd_get_regs(adapter, regs->len, buf);
293 } 292 }
@@ -337,9 +336,8 @@ static int be_set_coalesce(struct net_device *netdev,
337 return 0; 336 return 0;
338} 337}
339 338
340static void 339static void be_get_ethtool_stats(struct net_device *netdev,
341be_get_ethtool_stats(struct net_device *netdev, 340 struct ethtool_stats *stats, uint64_t *data)
342 struct ethtool_stats *stats, uint64_t *data)
343{ 341{
344 struct be_adapter *adapter = netdev_priv(netdev); 342 struct be_adapter *adapter = netdev_priv(netdev);
345 struct be_rx_obj *rxo; 343 struct be_rx_obj *rxo;
@@ -390,9 +388,8 @@ be_get_ethtool_stats(struct net_device *netdev,
390 } 388 }
391} 389}
392 390
393static void 391static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
394be_get_stat_strings(struct net_device *netdev, uint32_t stringset, 392 uint8_t *data)
395 uint8_t *data)
396{ 393{
397 struct be_adapter *adapter = netdev_priv(netdev); 394 struct be_adapter *adapter = netdev_priv(netdev);
398 int i, j; 395 int i, j;
@@ -642,16 +639,15 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
642 adapter->rx_fc = ecmd->rx_pause; 639 adapter->rx_fc = ecmd->rx_pause;
643 640
644 status = be_cmd_set_flow_control(adapter, 641 status = be_cmd_set_flow_control(adapter,
645 adapter->tx_fc, adapter->rx_fc); 642 adapter->tx_fc, adapter->rx_fc);
646 if (status) 643 if (status)
647 dev_warn(&adapter->pdev->dev, "Pause param set failed.\n"); 644 dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
648 645
649 return status; 646 return status;
650} 647}
651 648
652static int 649static int be_set_phys_id(struct net_device *netdev,
653be_set_phys_id(struct net_device *netdev, 650 enum ethtool_phys_id_state state)
654 enum ethtool_phys_id_state state)
655{ 651{
656 struct be_adapter *adapter = netdev_priv(netdev); 652 struct be_adapter *adapter = netdev_priv(netdev);
657 653
@@ -708,8 +704,7 @@ static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
708 return status; 704 return status;
709} 705}
710 706
711static void 707static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
712be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
713{ 708{
714 struct be_adapter *adapter = netdev_priv(netdev); 709 struct be_adapter *adapter = netdev_priv(netdev);
715 710
@@ -723,8 +718,7 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
723 memset(&wol->sopass, 0, sizeof(wol->sopass)); 718 memset(&wol->sopass, 0, sizeof(wol->sopass));
724} 719}
725 720
726static int 721static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
727be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
728{ 722{
729 struct be_adapter *adapter = netdev_priv(netdev); 723 struct be_adapter *adapter = netdev_priv(netdev);
730 724
@@ -744,8 +738,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
744 return 0; 738 return 0;
745} 739}
746 740
747static int 741static int be_test_ddr_dma(struct be_adapter *adapter)
748be_test_ddr_dma(struct be_adapter *adapter)
749{ 742{
750 int ret, i; 743 int ret, i;
751 struct be_dma_mem ddrdma_cmd; 744 struct be_dma_mem ddrdma_cmd;
@@ -761,7 +754,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
761 754
762 for (i = 0; i < 2; i++) { 755 for (i = 0; i < 2; i++) {
763 ret = be_cmd_ddr_dma_test(adapter, pattern[i], 756 ret = be_cmd_ddr_dma_test(adapter, pattern[i],
764 4096, &ddrdma_cmd); 757 4096, &ddrdma_cmd);
765 if (ret != 0) 758 if (ret != 0)
766 goto err; 759 goto err;
767 } 760 }
@@ -773,20 +766,17 @@ err:
773} 766}
774 767
775static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, 768static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
776 u64 *status) 769 u64 *status)
777{ 770{
778 be_cmd_set_loopback(adapter, adapter->hba_port_num, 771 be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
779 loopback_type, 1);
780 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num, 772 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
781 loopback_type, 1500, 773 loopback_type, 1500, 2, 0xabc);
782 2, 0xabc); 774 be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
783 be_cmd_set_loopback(adapter, adapter->hba_port_num,
784 BE_NO_LOOPBACK, 1);
785 return *status; 775 return *status;
786} 776}
787 777
788static void 778static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
789be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) 779 u64 *data)
790{ 780{
791 struct be_adapter *adapter = netdev_priv(netdev); 781 struct be_adapter *adapter = netdev_priv(netdev);
792 int status; 782 int status;
@@ -801,12 +791,10 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
801 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 791 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
802 792
803 if (test->flags & ETH_TEST_FL_OFFLINE) { 793 if (test->flags & ETH_TEST_FL_OFFLINE) {
804 if (be_loopback_test(adapter, BE_MAC_LOOPBACK, 794 if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
805 &data[0]) != 0)
806 test->flags |= ETH_TEST_FL_FAILED; 795 test->flags |= ETH_TEST_FL_FAILED;
807 796
808 if (be_loopback_test(adapter, BE_PHY_LOOPBACK, 797 if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)
809 &data[1]) != 0)
810 test->flags |= ETH_TEST_FL_FAILED; 798 test->flags |= ETH_TEST_FL_FAILED;
811 799
812 if (test->flags & ETH_TEST_FL_EXTERNAL_LB) { 800 if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
@@ -832,16 +820,14 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
832 } 820 }
833} 821}
834 822
835static int 823static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
836be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
837{ 824{
838 struct be_adapter *adapter = netdev_priv(netdev); 825 struct be_adapter *adapter = netdev_priv(netdev);
839 826
840 return be_load_fw(adapter, efl->data); 827 return be_load_fw(adapter, efl->data);
841} 828}
842 829
843static int 830static int be_get_eeprom_len(struct net_device *netdev)
844be_get_eeprom_len(struct net_device *netdev)
845{ 831{
846 struct be_adapter *adapter = netdev_priv(netdev); 832 struct be_adapter *adapter = netdev_priv(netdev);
847 833
@@ -851,18 +837,17 @@ be_get_eeprom_len(struct net_device *netdev)
851 if (lancer_chip(adapter)) { 837 if (lancer_chip(adapter)) {
852 if (be_physfn(adapter)) 838 if (be_physfn(adapter))
853 return lancer_cmd_get_file_len(adapter, 839 return lancer_cmd_get_file_len(adapter,
854 LANCER_VPD_PF_FILE); 840 LANCER_VPD_PF_FILE);
855 else 841 else
856 return lancer_cmd_get_file_len(adapter, 842 return lancer_cmd_get_file_len(adapter,
857 LANCER_VPD_VF_FILE); 843 LANCER_VPD_VF_FILE);
858 } else { 844 } else {
859 return BE_READ_SEEPROM_LEN; 845 return BE_READ_SEEPROM_LEN;
860 } 846 }
861} 847}
862 848
863static int 849static int be_read_eeprom(struct net_device *netdev,
864be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 850 struct ethtool_eeprom *eeprom, uint8_t *data)
865 uint8_t *data)
866{ 851{
867 struct be_adapter *adapter = netdev_priv(netdev); 852 struct be_adapter *adapter = netdev_priv(netdev);
868 struct be_dma_mem eeprom_cmd; 853 struct be_dma_mem eeprom_cmd;
@@ -875,10 +860,10 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
875 if (lancer_chip(adapter)) { 860 if (lancer_chip(adapter)) {
876 if (be_physfn(adapter)) 861 if (be_physfn(adapter))
877 return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE, 862 return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
878 eeprom->len, data); 863 eeprom->len, data);
879 else 864 else
880 return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE, 865 return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
881 eeprom->len, data); 866 eeprom->len, data);
882 } 867 }
883 868
884 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16); 869 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
@@ -933,27 +918,27 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
933 918
934 switch (flow_type) { 919 switch (flow_type) {
935 case TCP_V4_FLOW: 920 case TCP_V4_FLOW:
936 if (adapter->rss_flags & RSS_ENABLE_IPV4) 921 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
937 data |= RXH_IP_DST | RXH_IP_SRC; 922 data |= RXH_IP_DST | RXH_IP_SRC;
938 if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4) 923 if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4)
939 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 924 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
940 break; 925 break;
941 case UDP_V4_FLOW: 926 case UDP_V4_FLOW:
942 if (adapter->rss_flags & RSS_ENABLE_IPV4) 927 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
943 data |= RXH_IP_DST | RXH_IP_SRC; 928 data |= RXH_IP_DST | RXH_IP_SRC;
944 if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4) 929 if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4)
945 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 930 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
946 break; 931 break;
947 case TCP_V6_FLOW: 932 case TCP_V6_FLOW:
948 if (adapter->rss_flags & RSS_ENABLE_IPV6) 933 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
949 data |= RXH_IP_DST | RXH_IP_SRC; 934 data |= RXH_IP_DST | RXH_IP_SRC;
950 if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6) 935 if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6)
951 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 936 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
952 break; 937 break;
953 case UDP_V6_FLOW: 938 case UDP_V6_FLOW:
954 if (adapter->rss_flags & RSS_ENABLE_IPV6) 939 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
955 data |= RXH_IP_DST | RXH_IP_SRC; 940 data |= RXH_IP_DST | RXH_IP_SRC;
956 if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6) 941 if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6)
957 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 942 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
958 break; 943 break;
959 } 944 }
@@ -962,7 +947,7 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
962} 947}
963 948
964static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 949static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
965 u32 *rule_locs) 950 u32 *rule_locs)
966{ 951{
967 struct be_adapter *adapter = netdev_priv(netdev); 952 struct be_adapter *adapter = netdev_priv(netdev);
968 953
@@ -992,7 +977,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
992 struct be_rx_obj *rxo; 977 struct be_rx_obj *rxo;
993 int status = 0, i, j; 978 int status = 0, i, j;
994 u8 rsstable[128]; 979 u8 rsstable[128];
995 u32 rss_flags = adapter->rss_flags; 980 u32 rss_flags = adapter->rss_info.rss_flags;
996 981
997 if (cmd->data != L3_RSS_FLAGS && 982 if (cmd->data != L3_RSS_FLAGS &&
998 cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS)) 983 cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1039,7 +1024,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
1039 return -EINVAL; 1024 return -EINVAL;
1040 } 1025 }
1041 1026
1042 if (rss_flags == adapter->rss_flags) 1027 if (rss_flags == adapter->rss_info.rss_flags)
1043 return status; 1028 return status;
1044 1029
1045 if (be_multi_rxq(adapter)) { 1030 if (be_multi_rxq(adapter)) {
@@ -1051,9 +1036,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
1051 } 1036 }
1052 } 1037 }
1053 } 1038 }
1054 status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128); 1039
1040 status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
1041 rss_flags, 128, adapter->rss_info.rss_hkey);
1055 if (!status) 1042 if (!status)
1056 adapter->rss_flags = rss_flags; 1043 adapter->rss_info.rss_flags = rss_flags;
1057 1044
1058 return status; 1045 return status;
1059} 1046}
@@ -1103,6 +1090,69 @@ static int be_set_channels(struct net_device *netdev,
1103 return be_update_queues(adapter); 1090 return be_update_queues(adapter);
1104} 1091}
1105 1092
1093static u32 be_get_rxfh_indir_size(struct net_device *netdev)
1094{
1095 return RSS_INDIR_TABLE_LEN;
1096}
1097
1098static u32 be_get_rxfh_key_size(struct net_device *netdev)
1099{
1100 return RSS_HASH_KEY_LEN;
1101}
1102
1103static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
1104{
1105 struct be_adapter *adapter = netdev_priv(netdev);
1106 int i;
1107 struct rss_info *rss = &adapter->rss_info;
1108
1109 if (indir) {
1110 for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
1111 indir[i] = rss->rss_queue[i];
1112 }
1113
1114 if (hkey)
1115 memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
1116
1117 return 0;
1118}
1119
1120static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
1121 const u8 *hkey)
1122{
1123 int rc = 0, i, j;
1124 struct be_adapter *adapter = netdev_priv(netdev);
1125 u8 rsstable[RSS_INDIR_TABLE_LEN];
1126
1127 if (indir) {
1128 struct be_rx_obj *rxo;
1129 for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
1130 j = indir[i];
1131 rxo = &adapter->rx_obj[j];
1132 rsstable[i] = rxo->rss_id;
1133 adapter->rss_info.rss_queue[i] = j;
1134 }
1135 } else {
1136 memcpy(rsstable, adapter->rss_info.rsstable,
1137 RSS_INDIR_TABLE_LEN);
1138 }
1139
1140 if (!hkey)
1141 hkey = adapter->rss_info.rss_hkey;
1142
1143 rc = be_cmd_rss_config(adapter, rsstable,
1144 adapter->rss_info.rss_flags,
1145 RSS_INDIR_TABLE_LEN, hkey);
1146 if (rc) {
1147 adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
1148 return -EIO;
1149 }
1150 memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN);
1151 memcpy(adapter->rss_info.rsstable, rsstable,
1152 RSS_INDIR_TABLE_LEN);
1153 return 0;
1154}
1155
1106const struct ethtool_ops be_ethtool_ops = { 1156const struct ethtool_ops be_ethtool_ops = {
1107 .get_settings = be_get_settings, 1157 .get_settings = be_get_settings,
1108 .get_drvinfo = be_get_drvinfo, 1158 .get_drvinfo = be_get_drvinfo,
@@ -1129,6 +1179,10 @@ const struct ethtool_ops be_ethtool_ops = {
1129 .self_test = be_self_test, 1179 .self_test = be_self_test,
1130 .get_rxnfc = be_get_rxnfc, 1180 .get_rxnfc = be_get_rxnfc,
1131 .set_rxnfc = be_set_rxnfc, 1181 .set_rxnfc = be_set_rxnfc,
1182 .get_rxfh_indir_size = be_get_rxfh_indir_size,
1183 .get_rxfh_key_size = be_get_rxfh_key_size,
1184 .get_rxfh = be_get_rxfh,
1185 .set_rxfh = be_set_rxfh,
1132 .get_channels = be_get_channels, 1186 .get_channels = be_get_channels,
1133 .set_channels = be_set_channels 1187 .set_channels = be_set_channels
1134}; 1188};
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 3bd198550edb..8840c64aaeca 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -188,10 +188,14 @@
188#define OPTYPE_FCOE_FW_ACTIVE 10 188#define OPTYPE_FCOE_FW_ACTIVE 10
189#define OPTYPE_FCOE_FW_BACKUP 11 189#define OPTYPE_FCOE_FW_BACKUP 11
190#define OPTYPE_NCSI_FW 13 190#define OPTYPE_NCSI_FW 13
191#define OPTYPE_REDBOOT_DIR 18
192#define OPTYPE_REDBOOT_CONFIG 19
193#define OPTYPE_SH_PHY_FW 21
194#define OPTYPE_FLASHISM_JUMPVECTOR 22
195#define OPTYPE_UFI_DIR 23
191#define OPTYPE_PHY_FW 99 196#define OPTYPE_PHY_FW 99
192#define TN_8022 13 197#define TN_8022 13
193 198
194#define ILLEGAL_IOCTL_REQ 2
195#define FLASHROM_OPER_PHY_FLASH 9 199#define FLASHROM_OPER_PHY_FLASH 9
196#define FLASHROM_OPER_PHY_SAVE 10 200#define FLASHROM_OPER_PHY_SAVE 10
197#define FLASHROM_OPER_FLASH 1 201#define FLASHROM_OPER_FLASH 1
@@ -250,6 +254,9 @@
250#define IMAGE_FIRMWARE_BACKUP_FCoE 178 254#define IMAGE_FIRMWARE_BACKUP_FCoE 178
251#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179 255#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179
252#define IMAGE_FIRMWARE_PHY 192 256#define IMAGE_FIRMWARE_PHY 192
257#define IMAGE_REDBOOT_DIR 208
258#define IMAGE_REDBOOT_CONFIG 209
259#define IMAGE_UFI_DIR 210
253#define IMAGE_BOOT_CODE 224 260#define IMAGE_BOOT_CODE 224
254 261
255/************* Rx Packet Type Encoding **************/ 262/************* Rx Packet Type Encoding **************/
@@ -534,7 +541,8 @@ struct flash_section_entry {
534 u32 image_size; 541 u32 image_size;
535 u32 cksum; 542 u32 cksum;
536 u32 entry_point; 543 u32 entry_point;
537 u32 rsvd0; 544 u16 optype;
545 u16 rsvd0;
538 u32 rsvd1; 546 u32 rsvd1;
539 u8 ver_data[32]; 547 u8 ver_data[32];
540} __packed; 548} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index dc19bc5dec77..6822b3d76d85 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -134,7 +134,7 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
134} 134}
135 135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, 136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
137 u16 len, u16 entry_size) 137 u16 len, u16 entry_size)
138{ 138{
139 struct be_dma_mem *mem = &q->dma_mem; 139 struct be_dma_mem *mem = &q->dma_mem;
140 140
@@ -154,7 +154,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
154 u32 reg, enabled; 154 u32 reg, enabled;
155 155
156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157 &reg); 157 &reg);
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159 159
160 if (!enabled && enable) 160 if (!enabled && enable)
@@ -165,7 +165,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
165 return; 165 return;
166 166
167 pci_write_config_dword(adapter->pdev, 167 pci_write_config_dword(adapter->pdev,
168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg); 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
169} 169}
170 170
171static void be_intr_set(struct be_adapter *adapter, bool enable) 171static void be_intr_set(struct be_adapter *adapter, bool enable)
@@ -206,12 +206,11 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
206} 206}
207 207
208static void be_eq_notify(struct be_adapter *adapter, u16 qid, 208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
209 bool arm, bool clear_int, u16 num_popped) 209 bool arm, bool clear_int, u16 num_popped)
210{ 210{
211 u32 val = 0; 211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK; 212 val |= qid & DB_EQ_RING_ID_MASK;
213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << 213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
214 DB_EQ_RING_ID_EXT_MASK_SHIFT);
215 214
216 if (adapter->eeh_error) 215 if (adapter->eeh_error)
217 return; 216 return;
@@ -477,7 +476,7 @@ static void populate_be_v2_stats(struct be_adapter *adapter)
477 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; 476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
478 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; 477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
479 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; 478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
480 if (be_roce_supported(adapter)) { 479 if (be_roce_supported(adapter)) {
481 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd; 480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
482 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd; 481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
483 drvs->rx_roce_frames = port_stats->roce_frames_received; 482 drvs->rx_roce_frames = port_stats->roce_frames_received;
@@ -491,8 +490,7 @@ static void populate_lancer_stats(struct be_adapter *adapter)
491{ 490{
492 491
493 struct be_drv_stats *drvs = &adapter->drv_stats; 492 struct be_drv_stats *drvs = &adapter->drv_stats;
494 struct lancer_pport_stats *pport_stats = 493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
495 pport_stats_from_cmd(adapter);
496 494
497 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats)); 495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
498 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo; 496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
@@ -539,8 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
539} 537}
540 538
541static void populate_erx_stats(struct be_adapter *adapter, 539static void populate_erx_stats(struct be_adapter *adapter,
542 struct be_rx_obj *rxo, 540 struct be_rx_obj *rxo, u32 erx_stat)
543 u32 erx_stat)
544{ 541{
545 if (!BEx_chip(adapter)) 542 if (!BEx_chip(adapter))
546 rx_stats(rxo)->rx_drops_no_frags = erx_stat; 543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
@@ -579,7 +576,7 @@ void be_parse_stats(struct be_adapter *adapter)
579} 576}
580 577
581static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, 578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
582 struct rtnl_link_stats64 *stats) 579 struct rtnl_link_stats64 *stats)
583{ 580{
584 struct be_adapter *adapter = netdev_priv(netdev); 581 struct be_adapter *adapter = netdev_priv(netdev);
585 struct be_drv_stats *drvs = &adapter->drv_stats; 582 struct be_drv_stats *drvs = &adapter->drv_stats;
@@ -660,7 +657,8 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
660} 657}
661 658
662static void be_tx_stats_update(struct be_tx_obj *txo, 659static void be_tx_stats_update(struct be_tx_obj *txo,
663 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped) 660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
664{ 662{
665 struct be_tx_stats *stats = tx_stats(txo); 663 struct be_tx_stats *stats = tx_stats(txo);
666 664
@@ -676,7 +674,7 @@ static void be_tx_stats_update(struct be_tx_obj *txo,
676 674
677/* Determine number of WRB entries needed to xmit data in an skb */ 675/* Determine number of WRB entries needed to xmit data in an skb */
678static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb, 676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
679 bool *dummy) 677 bool *dummy)
680{ 678{
681 int cnt = (skb->len > skb->data_len); 679 int cnt = (skb->len > skb->data_len);
682 680
@@ -704,7 +702,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
704} 702}
705 703
706static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
707 struct sk_buff *skb) 705 struct sk_buff *skb)
708{ 706{
709 u8 vlan_prio; 707 u8 vlan_prio;
710 u16 vlan_tag; 708 u16 vlan_tag;
@@ -733,7 +731,8 @@ static u16 skb_ip_proto(struct sk_buff *skb)
733} 731}
734 732
735static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, 733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
736 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan) 734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
737{ 736{
738 u16 vlan_tag, proto; 737 u16 vlan_tag, proto;
739 738
@@ -774,7 +773,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
774} 773}
775 774
776static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, 775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
777 bool unmap_single) 776 bool unmap_single)
778{ 777{
779 dma_addr_t dma; 778 dma_addr_t dma;
780 779
@@ -791,8 +790,8 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
791} 790}
792 791
793static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, 792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
794 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb, 793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
795 bool skip_hw_vlan) 794 bool skip_hw_vlan)
796{ 795{
797 dma_addr_t busaddr; 796 dma_addr_t busaddr;
798 int i, copied = 0; 797 int i, copied = 0;
@@ -821,8 +820,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
821 } 820 }
822 821
823 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
824 const struct skb_frag_struct *frag = 823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
825 &skb_shinfo(skb)->frags[i];
826 busaddr = skb_frag_dma_map(dev, frag, 0, 824 busaddr = skb_frag_dma_map(dev, frag, 0,
827 skb_frag_size(frag), DMA_TO_DEVICE); 825 skb_frag_size(frag), DMA_TO_DEVICE);
828 if (dma_mapping_error(dev, busaddr)) 826 if (dma_mapping_error(dev, busaddr))
@@ -927,8 +925,7 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
927 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; 925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
928} 926}
929 927
930static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, 928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
931 struct sk_buff *skb)
932{ 929{
933 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); 930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
934} 931}
@@ -959,7 +956,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
959 */ 956 */
960 if (be_pvid_tagging_enabled(adapter) && 957 if (be_pvid_tagging_enabled(adapter) &&
961 veh->h_vlan_proto == htons(ETH_P_8021Q)) 958 veh->h_vlan_proto == htons(ETH_P_8021Q))
962 *skip_hw_vlan = true; 959 *skip_hw_vlan = true;
963 960
964 /* HW has a bug wherein it will calculate CSUM for VLAN 961 /* HW has a bug wherein it will calculate CSUM for VLAN
965 * pkts even though it is disabled. 962 * pkts even though it is disabled.
@@ -1077,16 +1074,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
1077{ 1074{
1078 struct be_adapter *adapter = netdev_priv(netdev); 1075 struct be_adapter *adapter = netdev_priv(netdev);
1079 if (new_mtu < BE_MIN_MTU || 1076 if (new_mtu < BE_MIN_MTU ||
1080 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - 1077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
1081 (ETH_HLEN + ETH_FCS_LEN))) {
1082 dev_info(&adapter->pdev->dev, 1078 dev_info(&adapter->pdev->dev,
1083 "MTU must be between %d and %d bytes\n", 1079 "MTU must be between %d and %d bytes\n",
1084 BE_MIN_MTU, 1080 BE_MIN_MTU,
1085 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))); 1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1086 return -EINVAL; 1082 return -EINVAL;
1087 } 1083 }
1088 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", 1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1089 netdev->mtu, new_mtu); 1085 netdev->mtu, new_mtu);
1090 netdev->mtu = new_mtu; 1086 netdev->mtu = new_mtu;
1091 return 0; 1087 return 0;
1092} 1088}
@@ -1098,7 +1094,7 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
1098static int be_vid_config(struct be_adapter *adapter) 1094static int be_vid_config(struct be_adapter *adapter)
1099{ 1095{
1100 u16 vids[BE_NUM_VLANS_SUPPORTED]; 1096 u16 vids[BE_NUM_VLANS_SUPPORTED];
1101 u16 num = 0, i; 1097 u16 num = 0, i = 0;
1102 int status = 0; 1098 int status = 0;
1103 1099
1104 /* No need to further configure vids if in promiscuous mode */ 1100 /* No need to further configure vids if in promiscuous mode */
@@ -1109,16 +1105,14 @@ static int be_vid_config(struct be_adapter *adapter)
1109 goto set_vlan_promisc; 1105 goto set_vlan_promisc;
1110 1106
1111 /* Construct VLAN Table to give to HW */ 1107 /* Construct VLAN Table to give to HW */
1112 for (i = 0; i < VLAN_N_VID; i++) 1108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1113 if (adapter->vlan_tag[i]) 1109 vids[num++] = cpu_to_le16(i);
1114 vids[num++] = cpu_to_le16(i);
1115
1116 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1117 vids, num, 0);
1118 1110
1111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
1119 if (status) { 1112 if (status) {
1120 /* Set to VLAN promisc mode as setting VLAN filter failed */ 1113 /* Set to VLAN promisc mode as setting VLAN filter failed */
1121 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES) 1114 if (addl_status(status) ==
1115 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1122 goto set_vlan_promisc; 1116 goto set_vlan_promisc;
1123 dev_err(&adapter->pdev->dev, 1117 dev_err(&adapter->pdev->dev,
1124 "Setting HW VLAN filtering failed.\n"); 1118 "Setting HW VLAN filtering failed.\n");
@@ -1160,16 +1154,16 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1160 if (lancer_chip(adapter) && vid == 0) 1154 if (lancer_chip(adapter) && vid == 0)
1161 return status; 1155 return status;
1162 1156
1163 if (adapter->vlan_tag[vid]) 1157 if (test_bit(vid, adapter->vids))
1164 return status; 1158 return status;
1165 1159
1166 adapter->vlan_tag[vid] = 1; 1160 set_bit(vid, adapter->vids);
1167 adapter->vlans_added++; 1161 adapter->vlans_added++;
1168 1162
1169 status = be_vid_config(adapter); 1163 status = be_vid_config(adapter);
1170 if (status) { 1164 if (status) {
1171 adapter->vlans_added--; 1165 adapter->vlans_added--;
1172 adapter->vlan_tag[vid] = 0; 1166 clear_bit(vid, adapter->vids);
1173 } 1167 }
1174 1168
1175 return status; 1169 return status;
@@ -1184,12 +1178,12 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1184 if (lancer_chip(adapter) && vid == 0) 1178 if (lancer_chip(adapter) && vid == 0)
1185 goto ret; 1179 goto ret;
1186 1180
1187 adapter->vlan_tag[vid] = 0; 1181 clear_bit(vid, adapter->vids);
1188 status = be_vid_config(adapter); 1182 status = be_vid_config(adapter);
1189 if (!status) 1183 if (!status)
1190 adapter->vlans_added--; 1184 adapter->vlans_added--;
1191 else 1185 else
1192 adapter->vlan_tag[vid] = 1; 1186 set_bit(vid, adapter->vids);
1193ret: 1187ret:
1194 return status; 1188 return status;
1195} 1189}
@@ -1197,7 +1191,7 @@ ret:
1197static void be_clear_promisc(struct be_adapter *adapter) 1191static void be_clear_promisc(struct be_adapter *adapter)
1198{ 1192{
1199 adapter->promiscuous = false; 1193 adapter->promiscuous = false;
1200 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC; 1194 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
1201 1195
1202 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); 1196 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1203} 1197}
@@ -1222,10 +1216,8 @@ static void be_set_rx_mode(struct net_device *netdev)
1222 1216
1223 /* Enable multicast promisc if num configured exceeds what we support */ 1217 /* Enable multicast promisc if num configured exceeds what we support */
1224 if (netdev->flags & IFF_ALLMULTI || 1218 if (netdev->flags & IFF_ALLMULTI ||
1225 netdev_mc_count(netdev) > be_max_mc(adapter)) { 1219 netdev_mc_count(netdev) > be_max_mc(adapter))
1226 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); 1220 goto set_mcast_promisc;
1227 goto done;
1228 }
1229 1221
1230 if (netdev_uc_count(netdev) != adapter->uc_macs) { 1222 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1231 struct netdev_hw_addr *ha; 1223 struct netdev_hw_addr *ha;
@@ -1251,13 +1243,22 @@ static void be_set_rx_mode(struct net_device *netdev)
1251 } 1243 }
1252 1244
1253 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON); 1245 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1254 1246 if (!status) {
1255 /* Set to MCAST promisc mode if setting MULTICAST address fails */ 1247 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1256 if (status) { 1248 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1257 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n"); 1249 goto done;
1258 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1259 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1260 } 1250 }
1251
1252set_mcast_promisc:
1253 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1254 return;
1255
1256 /* Set to MCAST promisc mode if setting MULTICAST address fails
1257 * or if num configured exceeds what we support
1258 */
1259 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1260 if (!status)
1261 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
1261done: 1262done:
1262 return; 1263 return;
1263} 1264}
@@ -1287,7 +1288,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1287 1288
1288 if (status) 1289 if (status)
1289 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", 1290 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1290 mac, vf); 1291 mac, vf);
1291 else 1292 else
1292 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); 1293 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1293 1294
@@ -1295,7 +1296,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1295} 1296}
1296 1297
1297static int be_get_vf_config(struct net_device *netdev, int vf, 1298static int be_get_vf_config(struct net_device *netdev, int vf,
1298 struct ifla_vf_info *vi) 1299 struct ifla_vf_info *vi)
1299{ 1300{
1300 struct be_adapter *adapter = netdev_priv(netdev); 1301 struct be_adapter *adapter = netdev_priv(netdev);
1301 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1302 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1307,7 +1308,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1307 return -EINVAL; 1308 return -EINVAL;
1308 1309
1309 vi->vf = vf; 1310 vi->vf = vf;
1310 vi->tx_rate = vf_cfg->tx_rate; 1311 vi->max_tx_rate = vf_cfg->tx_rate;
1312 vi->min_tx_rate = 0;
1311 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK; 1313 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1312 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT; 1314 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1313 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); 1315 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
@@ -1316,8 +1318,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1316 return 0; 1318 return 0;
1317} 1319}
1318 1320
1319static int be_set_vf_vlan(struct net_device *netdev, 1321static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1320 int vf, u16 vlan, u8 qos)
1321{ 1322{
1322 struct be_adapter *adapter = netdev_priv(netdev); 1323 struct be_adapter *adapter = netdev_priv(netdev);
1323 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1324 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1348,11 +1349,14 @@ static int be_set_vf_vlan(struct net_device *netdev,
1348 return status; 1349 return status;
1349} 1350}
1350 1351
1351static int be_set_vf_tx_rate(struct net_device *netdev, 1352static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1352 int vf, int rate) 1353 int min_tx_rate, int max_tx_rate)
1353{ 1354{
1354 struct be_adapter *adapter = netdev_priv(netdev); 1355 struct be_adapter *adapter = netdev_priv(netdev);
1355 int status = 0; 1356 struct device *dev = &adapter->pdev->dev;
1357 int percent_rate, status = 0;
1358 u16 link_speed = 0;
1359 u8 link_status;
1356 1360
1357 if (!sriov_enabled(adapter)) 1361 if (!sriov_enabled(adapter))
1358 return -EPERM; 1362 return -EPERM;
@@ -1360,18 +1364,50 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
1360 if (vf >= adapter->num_vfs) 1364 if (vf >= adapter->num_vfs)
1361 return -EINVAL; 1365 return -EINVAL;
1362 1366
1363 if (rate < 100 || rate > 10000) { 1367 if (min_tx_rate)
1364 dev_err(&adapter->pdev->dev,
1365 "tx rate must be between 100 and 10000 Mbps\n");
1366 return -EINVAL; 1368 return -EINVAL;
1369
1370 if (!max_tx_rate)
1371 goto config_qos;
1372
1373 status = be_cmd_link_status_query(adapter, &link_speed,
1374 &link_status, 0);
1375 if (status)
1376 goto err;
1377
1378 if (!link_status) {
1379 dev_err(dev, "TX-rate setting not allowed when link is down\n");
1380 status = -EPERM;
1381 goto err;
1382 }
1383
1384 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1385 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1386 link_speed);
1387 status = -EINVAL;
1388 goto err;
1389 }
1390
1391 /* On Skyhawk the QOS setting must be done only as a % value */
1392 percent_rate = link_speed / 100;
1393 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1394 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1395 percent_rate);
1396 status = -EINVAL;
1397 goto err;
1367 } 1398 }
1368 1399
1369 status = be_cmd_config_qos(adapter, rate / 10, vf + 1); 1400config_qos:
1401 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
1370 if (status) 1402 if (status)
1371 dev_err(&adapter->pdev->dev, 1403 goto err;
1372 "tx rate %d on VF %d failed\n", rate, vf); 1404
1373 else 1405 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1374 adapter->vf_cfg[vf].tx_rate = rate; 1406 return 0;
1407
1408err:
1409 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1410 max_tx_rate, vf);
1375 return status; 1411 return status;
1376} 1412}
1377static int be_set_vf_link_state(struct net_device *netdev, int vf, 1413static int be_set_vf_link_state(struct net_device *netdev, int vf,
@@ -1469,7 +1505,7 @@ modify_eqd:
1469} 1505}
1470 1506
1471static void be_rx_stats_update(struct be_rx_obj *rxo, 1507static void be_rx_stats_update(struct be_rx_obj *rxo,
1472 struct be_rx_compl_info *rxcp) 1508 struct be_rx_compl_info *rxcp)
1473{ 1509{
1474 struct be_rx_stats *stats = rx_stats(rxo); 1510 struct be_rx_stats *stats = rx_stats(rxo);
1475 1511
@@ -1566,7 +1602,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1566 skb_frag_set_page(skb, 0, page_info->page); 1602 skb_frag_set_page(skb, 0, page_info->page);
1567 skb_shinfo(skb)->frags[0].page_offset = 1603 skb_shinfo(skb)->frags[0].page_offset =
1568 page_info->page_offset + hdr_len; 1604 page_info->page_offset + hdr_len;
1569 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len); 1605 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1606 curr_frag_len - hdr_len);
1570 skb->data_len = curr_frag_len - hdr_len; 1607 skb->data_len = curr_frag_len - hdr_len;
1571 skb->truesize += rx_frag_size; 1608 skb->truesize += rx_frag_size;
1572 skb->tail += hdr_len; 1609 skb->tail += hdr_len;
@@ -1725,8 +1762,8 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1725 if (rxcp->vlanf) { 1762 if (rxcp->vlanf) {
1726 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq, 1763 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
1727 compl); 1764 compl);
1728 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, 1765 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1729 compl); 1766 vlan_tag, compl);
1730 } 1767 }
1731 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl); 1768 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1732 rxcp->tunneled = 1769 rxcp->tunneled =
@@ -1757,8 +1794,8 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1757 if (rxcp->vlanf) { 1794 if (rxcp->vlanf) {
1758 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq, 1795 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
1759 compl); 1796 compl);
1760 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, 1797 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1761 compl); 1798 vlan_tag, compl);
1762 } 1799 }
1763 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl); 1800 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1764 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, 1801 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
@@ -1799,7 +1836,7 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1799 rxcp->vlan_tag = swab16(rxcp->vlan_tag); 1836 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1800 1837
1801 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) && 1838 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1802 !adapter->vlan_tag[rxcp->vlan_tag]) 1839 !test_bit(rxcp->vlan_tag, adapter->vids))
1803 rxcp->vlanf = 0; 1840 rxcp->vlanf = 0;
1804 } 1841 }
1805 1842
@@ -1915,7 +1952,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1915} 1952}
1916 1953
1917static u16 be_tx_compl_process(struct be_adapter *adapter, 1954static u16 be_tx_compl_process(struct be_adapter *adapter,
1918 struct be_tx_obj *txo, u16 last_index) 1955 struct be_tx_obj *txo, u16 last_index)
1919{ 1956{
1920 struct be_queue_info *txq = &txo->q; 1957 struct be_queue_info *txq = &txo->q;
1921 struct be_eth_wrb *wrb; 1958 struct be_eth_wrb *wrb;
@@ -2122,7 +2159,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2122 2159
2123 eq = &eqo->q; 2160 eq = &eqo->q;
2124 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, 2161 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2125 sizeof(struct be_eq_entry)); 2162 sizeof(struct be_eq_entry));
2126 if (rc) 2163 if (rc)
2127 return rc; 2164 return rc;
2128 2165
@@ -2155,7 +2192,7 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
2155 2192
2156 cq = &adapter->mcc_obj.cq; 2193 cq = &adapter->mcc_obj.cq;
2157 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, 2194 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2158 sizeof(struct be_mcc_compl))) 2195 sizeof(struct be_mcc_compl)))
2159 goto err; 2196 goto err;
2160 2197
2161 /* Use the default EQ for MCC completions */ 2198 /* Use the default EQ for MCC completions */
@@ -2275,7 +2312,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
2275 rxo->adapter = adapter; 2312 rxo->adapter = adapter;
2276 cq = &rxo->cq; 2313 cq = &rxo->cq;
2277 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, 2314 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2278 sizeof(struct be_eth_rx_compl)); 2315 sizeof(struct be_eth_rx_compl));
2279 if (rc) 2316 if (rc)
2280 return rc; 2317 return rc;
2281 2318
@@ -2339,7 +2376,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
2339} 2376}
2340 2377
2341static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, 2378static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2342 int budget, int polling) 2379 int budget, int polling)
2343{ 2380{
2344 struct be_adapter *adapter = rxo->adapter; 2381 struct be_adapter *adapter = rxo->adapter;
2345 struct be_queue_info *rx_cq = &rxo->cq; 2382 struct be_queue_info *rx_cq = &rxo->cq;
@@ -2365,7 +2402,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2365 * promiscuous mode on some skews 2402 * promiscuous mode on some skews
2366 */ 2403 */
2367 if (unlikely(rxcp->port != adapter->port_num && 2404 if (unlikely(rxcp->port != adapter->port_num &&
2368 !lancer_chip(adapter))) { 2405 !lancer_chip(adapter))) {
2369 be_rx_compl_discard(rxo, rxcp); 2406 be_rx_compl_discard(rxo, rxcp);
2370 goto loop_continue; 2407 goto loop_continue;
2371 } 2408 }
@@ -2405,8 +2442,9 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2405 if (!txcp) 2442 if (!txcp)
2406 break; 2443 break;
2407 num_wrbs += be_tx_compl_process(adapter, txo, 2444 num_wrbs += be_tx_compl_process(adapter, txo,
2408 AMAP_GET_BITS(struct amap_eth_tx_compl, 2445 AMAP_GET_BITS(struct
2409 wrb_index, txcp)); 2446 amap_eth_tx_compl,
2447 wrb_index, txcp));
2410 } 2448 }
2411 2449
2412 if (work_done) { 2450 if (work_done) {
@@ -2416,7 +2454,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2416 /* As Tx wrbs have been freed up, wake up netdev queue 2454 /* As Tx wrbs have been freed up, wake up netdev queue
2417 * if it was stopped due to lack of tx wrbs. */ 2455 * if it was stopped due to lack of tx wrbs. */
2418 if (__netif_subqueue_stopped(adapter->netdev, idx) && 2456 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2419 atomic_read(&txo->q.used) < txo->q.len / 2) { 2457 atomic_read(&txo->q.used) < txo->q.len / 2) {
2420 netif_wake_subqueue(adapter->netdev, idx); 2458 netif_wake_subqueue(adapter->netdev, idx);
2421 } 2459 }
2422 2460
@@ -2510,9 +2548,9 @@ void be_detect_error(struct be_adapter *adapter)
2510 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 2548 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2511 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 2549 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2512 sliport_err1 = ioread32(adapter->db + 2550 sliport_err1 = ioread32(adapter->db +
2513 SLIPORT_ERROR1_OFFSET); 2551 SLIPORT_ERROR1_OFFSET);
2514 sliport_err2 = ioread32(adapter->db + 2552 sliport_err2 = ioread32(adapter->db +
2515 SLIPORT_ERROR2_OFFSET); 2553 SLIPORT_ERROR2_OFFSET);
2516 adapter->hw_error = true; 2554 adapter->hw_error = true;
2517 /* Do not log error messages if its a FW reset */ 2555 /* Do not log error messages if its a FW reset */
2518 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && 2556 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
@@ -2531,13 +2569,13 @@ void be_detect_error(struct be_adapter *adapter)
2531 } 2569 }
2532 } else { 2570 } else {
2533 pci_read_config_dword(adapter->pdev, 2571 pci_read_config_dword(adapter->pdev,
2534 PCICFG_UE_STATUS_LOW, &ue_lo); 2572 PCICFG_UE_STATUS_LOW, &ue_lo);
2535 pci_read_config_dword(adapter->pdev, 2573 pci_read_config_dword(adapter->pdev,
2536 PCICFG_UE_STATUS_HIGH, &ue_hi); 2574 PCICFG_UE_STATUS_HIGH, &ue_hi);
2537 pci_read_config_dword(adapter->pdev, 2575 pci_read_config_dword(adapter->pdev,
2538 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); 2576 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2539 pci_read_config_dword(adapter->pdev, 2577 pci_read_config_dword(adapter->pdev,
2540 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); 2578 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2541 2579
2542 ue_lo = (ue_lo & ~ue_lo_mask); 2580 ue_lo = (ue_lo & ~ue_lo_mask);
2543 ue_hi = (ue_hi & ~ue_hi_mask); 2581 ue_hi = (ue_hi & ~ue_hi_mask);
@@ -2624,7 +2662,7 @@ fail:
2624} 2662}
2625 2663
2626static inline int be_msix_vec_get(struct be_adapter *adapter, 2664static inline int be_msix_vec_get(struct be_adapter *adapter,
2627 struct be_eq_obj *eqo) 2665 struct be_eq_obj *eqo)
2628{ 2666{
2629 return adapter->msix_entries[eqo->msix_idx].vector; 2667 return adapter->msix_entries[eqo->msix_idx].vector;
2630} 2668}
@@ -2648,7 +2686,7 @@ err_msix:
2648 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--) 2686 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2649 free_irq(be_msix_vec_get(adapter, eqo), eqo); 2687 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2650 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n", 2688 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2651 status); 2689 status);
2652 be_msix_disable(adapter); 2690 be_msix_disable(adapter);
2653 return status; 2691 return status;
2654} 2692}
@@ -2774,7 +2812,8 @@ static int be_rx_qs_create(struct be_adapter *adapter)
2774{ 2812{
2775 struct be_rx_obj *rxo; 2813 struct be_rx_obj *rxo;
2776 int rc, i, j; 2814 int rc, i, j;
2777 u8 rsstable[128]; 2815 u8 rss_hkey[RSS_HASH_KEY_LEN];
2816 struct rss_info *rss = &adapter->rss_info;
2778 2817
2779 for_all_rx_queues(adapter, rxo, i) { 2818 for_all_rx_queues(adapter, rxo, i) {
2780 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN, 2819 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
@@ -2799,31 +2838,36 @@ static int be_rx_qs_create(struct be_adapter *adapter)
2799 } 2838 }
2800 2839
2801 if (be_multi_rxq(adapter)) { 2840 if (be_multi_rxq(adapter)) {
2802 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) { 2841 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2842 j += adapter->num_rx_qs - 1) {
2803 for_all_rss_queues(adapter, rxo, i) { 2843 for_all_rss_queues(adapter, rxo, i) {
2804 if ((j + i) >= 128) 2844 if ((j + i) >= RSS_INDIR_TABLE_LEN)
2805 break; 2845 break;
2806 rsstable[j + i] = rxo->rss_id; 2846 rss->rsstable[j + i] = rxo->rss_id;
2847 rss->rss_queue[j + i] = i;
2807 } 2848 }
2808 } 2849 }
2809 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | 2850 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2810 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6; 2851 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2811 2852
2812 if (!BEx_chip(adapter)) 2853 if (!BEx_chip(adapter))
2813 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | 2854 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2814 RSS_ENABLE_UDP_IPV6; 2855 RSS_ENABLE_UDP_IPV6;
2815 } else { 2856 } else {
2816 /* Disable RSS, if only default RX Q is created */ 2857 /* Disable RSS, if only default RX Q is created */
2817 adapter->rss_flags = RSS_ENABLE_NONE; 2858 rss->rss_flags = RSS_ENABLE_NONE;
2818 } 2859 }
2819 2860
2820 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, 2861 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
2821 128); 2862 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
2863 128, rss_hkey);
2822 if (rc) { 2864 if (rc) {
2823 adapter->rss_flags = RSS_ENABLE_NONE; 2865 rss->rss_flags = RSS_ENABLE_NONE;
2824 return rc; 2866 return rc;
2825 } 2867 }
2826 2868
2869 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2870
2827 /* First time posting */ 2871 /* First time posting */
2828 for_all_rx_queues(adapter, rxo, i) 2872 for_all_rx_queues(adapter, rxo, i)
2829 be_post_rx_frags(rxo, GFP_KERNEL); 2873 be_post_rx_frags(rxo, GFP_KERNEL);
@@ -2896,7 +2940,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2896 2940
2897 if (enable) { 2941 if (enable) {
2898 status = pci_write_config_dword(adapter->pdev, 2942 status = pci_write_config_dword(adapter->pdev,
2899 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); 2943 PCICFG_PM_CONTROL_OFFSET,
2944 PCICFG_PM_CONTROL_MASK);
2900 if (status) { 2945 if (status) {
2901 dev_err(&adapter->pdev->dev, 2946 dev_err(&adapter->pdev->dev,
2902 "Could not enable Wake-on-lan\n"); 2947 "Could not enable Wake-on-lan\n");
@@ -2905,7 +2950,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2905 return status; 2950 return status;
2906 } 2951 }
2907 status = be_cmd_enable_magic_wol(adapter, 2952 status = be_cmd_enable_magic_wol(adapter,
2908 adapter->netdev->dev_addr, &cmd); 2953 adapter->netdev->dev_addr,
2954 &cmd);
2909 pci_enable_wake(adapter->pdev, PCI_D3hot, 1); 2955 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2910 pci_enable_wake(adapter->pdev, PCI_D3cold, 1); 2956 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2911 } else { 2957 } else {
@@ -2944,7 +2990,8 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
2944 2990
2945 if (status) 2991 if (status)
2946 dev_err(&adapter->pdev->dev, 2992 dev_err(&adapter->pdev->dev,
2947 "Mac address assignment failed for VF %d\n", vf); 2993 "Mac address assignment failed for VF %d\n",
2994 vf);
2948 else 2995 else
2949 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); 2996 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2950 2997
@@ -3086,9 +3133,11 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3086 3133
3087 /* If a FW profile exists, then cap_flags are updated */ 3134 /* If a FW profile exists, then cap_flags are updated */
3088 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | 3135 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3089 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST); 3136 BE_IF_FLAGS_BROADCAST |
3090 status = be_cmd_if_create(adapter, cap_flags, en_flags, 3137 BE_IF_FLAGS_MULTICAST);
3091 &vf_cfg->if_handle, vf + 1); 3138 status =
3139 be_cmd_if_create(adapter, cap_flags, en_flags,
3140 &vf_cfg->if_handle, vf + 1);
3092 if (status) 3141 if (status)
3093 goto err; 3142 goto err;
3094 } 3143 }
@@ -3119,7 +3168,6 @@ static int be_vf_setup(struct be_adapter *adapter)
3119 struct be_vf_cfg *vf_cfg; 3168 struct be_vf_cfg *vf_cfg;
3120 int status, old_vfs, vf; 3169 int status, old_vfs, vf;
3121 u32 privileges; 3170 u32 privileges;
3122 u16 lnk_speed;
3123 3171
3124 old_vfs = pci_num_vf(adapter->pdev); 3172 old_vfs = pci_num_vf(adapter->pdev);
3125 if (old_vfs) { 3173 if (old_vfs) {
@@ -3175,16 +3223,9 @@ static int be_vf_setup(struct be_adapter *adapter)
3175 vf); 3223 vf);
3176 } 3224 }
3177 3225
3178 /* BE3 FW, by default, caps VF TX-rate to 100mbps. 3226 /* Allow full available bandwidth */
3179 * Allow full available bandwidth 3227 if (!old_vfs)
3180 */ 3228 be_cmd_config_qos(adapter, 0, 0, vf + 1);
3181 if (BE3_chip(adapter) && !old_vfs)
3182 be_cmd_config_qos(adapter, 1000, vf + 1);
3183
3184 status = be_cmd_link_status_query(adapter, &lnk_speed,
3185 NULL, vf + 1);
3186 if (!status)
3187 vf_cfg->tx_rate = lnk_speed;
3188 3229
3189 if (!old_vfs) { 3230 if (!old_vfs) {
3190 be_cmd_enable_vf(adapter, vf + 1); 3231 be_cmd_enable_vf(adapter, vf + 1);
@@ -3590,35 +3631,7 @@ static void be_netpoll(struct net_device *netdev)
3590} 3631}
3591#endif 3632#endif
3592 3633
3593#define FW_FILE_HDR_SIGN "ServerEngines Corp. " 3634static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3594static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3595
3596static bool be_flash_redboot(struct be_adapter *adapter,
3597 const u8 *p, u32 img_start, int image_size,
3598 int hdr_size)
3599{
3600 u32 crc_offset;
3601 u8 flashed_crc[4];
3602 int status;
3603
3604 crc_offset = hdr_size + img_start + image_size - 4;
3605
3606 p += crc_offset;
3607
3608 status = be_cmd_get_flash_crc(adapter, flashed_crc,
3609 (image_size - 4));
3610 if (status) {
3611 dev_err(&adapter->pdev->dev,
3612 "could not get crc from flash, not flashing redboot\n");
3613 return false;
3614 }
3615
3616 /*update redboot only if crc does not match*/
3617 if (!memcmp(flashed_crc, p, 4))
3618 return false;
3619 else
3620 return true;
3621}
3622 3635
3623static bool phy_flashing_required(struct be_adapter *adapter) 3636static bool phy_flashing_required(struct be_adapter *adapter)
3624{ 3637{
@@ -3649,8 +3662,8 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
3649} 3662}
3650 3663
3651static struct flash_section_info *get_fsec_info(struct be_adapter *adapter, 3664static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3652 int header_size, 3665 int header_size,
3653 const struct firmware *fw) 3666 const struct firmware *fw)
3654{ 3667{
3655 struct flash_section_info *fsec = NULL; 3668 struct flash_section_info *fsec = NULL;
3656 const u8 *p = fw->data; 3669 const u8 *p = fw->data;
@@ -3665,12 +3678,35 @@ static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3665 return NULL; 3678 return NULL;
3666} 3679}
3667 3680
3681static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3682 u32 img_offset, u32 img_size, int hdr_size,
3683 u16 img_optype, bool *crc_match)
3684{
3685 u32 crc_offset;
3686 int status;
3687 u8 crc[4];
3688
3689 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3690 if (status)
3691 return status;
3692
3693 crc_offset = hdr_size + img_offset + img_size - 4;
3694
3695 /* Skip flashing, if crc of flashed region matches */
3696 if (!memcmp(crc, p + crc_offset, 4))
3697 *crc_match = true;
3698 else
3699 *crc_match = false;
3700
3701 return status;
3702}
3703
3668static int be_flash(struct be_adapter *adapter, const u8 *img, 3704static int be_flash(struct be_adapter *adapter, const u8 *img,
3669 struct be_dma_mem *flash_cmd, int optype, int img_size) 3705 struct be_dma_mem *flash_cmd, int optype, int img_size)
3670{ 3706{
3671 u32 total_bytes = 0, flash_op, num_bytes = 0;
3672 int status = 0;
3673 struct be_cmd_write_flashrom *req = flash_cmd->va; 3707 struct be_cmd_write_flashrom *req = flash_cmd->va;
3708 u32 total_bytes, flash_op, num_bytes;
3709 int status;
3674 3710
3675 total_bytes = img_size; 3711 total_bytes = img_size;
3676 while (total_bytes) { 3712 while (total_bytes) {
@@ -3693,32 +3729,28 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
3693 memcpy(req->data_buf, img, num_bytes); 3729 memcpy(req->data_buf, img, num_bytes);
3694 img += num_bytes; 3730 img += num_bytes;
3695 status = be_cmd_write_flashrom(adapter, flash_cmd, optype, 3731 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3696 flash_op, num_bytes); 3732 flash_op, num_bytes);
3697 if (status) { 3733 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
3698 if (status == ILLEGAL_IOCTL_REQ && 3734 optype == OPTYPE_PHY_FW)
3699 optype == OPTYPE_PHY_FW) 3735 break;
3700 break; 3736 else if (status)
3701 dev_err(&adapter->pdev->dev,
3702 "cmd to write to flash rom failed.\n");
3703 return status; 3737 return status;
3704 }
3705 } 3738 }
3706 return 0; 3739 return 0;
3707} 3740}
3708 3741
3709/* For BE2, BE3 and BE3-R */ 3742/* For BE2, BE3 and BE3-R */
3710static int be_flash_BEx(struct be_adapter *adapter, 3743static int be_flash_BEx(struct be_adapter *adapter,
3711 const struct firmware *fw, 3744 const struct firmware *fw,
3712 struct be_dma_mem *flash_cmd, 3745 struct be_dma_mem *flash_cmd, int num_of_images)
3713 int num_of_images)
3714
3715{ 3746{
3716 int status = 0, i, filehdr_size = 0;
3717 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); 3747 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3718 const u8 *p = fw->data; 3748 struct device *dev = &adapter->pdev->dev;
3719 const struct flash_comp *pflashcomp;
3720 int num_comp, redboot;
3721 struct flash_section_info *fsec = NULL; 3749 struct flash_section_info *fsec = NULL;
3750 int status, i, filehdr_size, num_comp;
3751 const struct flash_comp *pflashcomp;
3752 bool crc_match;
3753 const u8 *p;
3722 3754
3723 struct flash_comp gen3_flash_types[] = { 3755 struct flash_comp gen3_flash_types[] = {
3724 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE, 3756 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
@@ -3775,8 +3807,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
3775 /* Get flash section info*/ 3807 /* Get flash section info*/
3776 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); 3808 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3777 if (!fsec) { 3809 if (!fsec) {
3778 dev_err(&adapter->pdev->dev, 3810 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
3779 "Invalid Cookie. UFI corrupted ?\n");
3780 return -1; 3811 return -1;
3781 } 3812 }
3782 for (i = 0; i < num_comp; i++) { 3813 for (i = 0; i < num_comp; i++) {
@@ -3792,23 +3823,32 @@ static int be_flash_BEx(struct be_adapter *adapter,
3792 continue; 3823 continue;
3793 3824
3794 if (pflashcomp[i].optype == OPTYPE_REDBOOT) { 3825 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3795 redboot = be_flash_redboot(adapter, fw->data, 3826 status = be_check_flash_crc(adapter, fw->data,
3796 pflashcomp[i].offset, pflashcomp[i].size, 3827 pflashcomp[i].offset,
3797 filehdr_size + img_hdrs_size); 3828 pflashcomp[i].size,
3798 if (!redboot) 3829 filehdr_size +
3830 img_hdrs_size,
3831 OPTYPE_REDBOOT, &crc_match);
3832 if (status) {
3833 dev_err(dev,
3834 "Could not get CRC for 0x%x region\n",
3835 pflashcomp[i].optype);
3836 continue;
3837 }
3838
3839 if (crc_match)
3799 continue; 3840 continue;
3800 } 3841 }
3801 3842
3802 p = fw->data; 3843 p = fw->data + filehdr_size + pflashcomp[i].offset +
3803 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size; 3844 img_hdrs_size;
3804 if (p + pflashcomp[i].size > fw->data + fw->size) 3845 if (p + pflashcomp[i].size > fw->data + fw->size)
3805 return -1; 3846 return -1;
3806 3847
3807 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, 3848 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3808 pflashcomp[i].size); 3849 pflashcomp[i].size);
3809 if (status) { 3850 if (status) {
3810 dev_err(&adapter->pdev->dev, 3851 dev_err(dev, "Flashing section type 0x%x failed\n",
3811 "Flashing section type %d failed.\n",
3812 pflashcomp[i].img_type); 3852 pflashcomp[i].img_type);
3813 return status; 3853 return status;
3814 } 3854 }
@@ -3816,80 +3856,142 @@ static int be_flash_BEx(struct be_adapter *adapter,
3816 return 0; 3856 return 0;
3817} 3857}
3818 3858
3859static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3860{
3861 u32 img_type = le32_to_cpu(fsec_entry.type);
3862 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3863
3864 if (img_optype != 0xFFFF)
3865 return img_optype;
3866
3867 switch (img_type) {
3868 case IMAGE_FIRMWARE_iSCSI:
3869 img_optype = OPTYPE_ISCSI_ACTIVE;
3870 break;
3871 case IMAGE_BOOT_CODE:
3872 img_optype = OPTYPE_REDBOOT;
3873 break;
3874 case IMAGE_OPTION_ROM_ISCSI:
3875 img_optype = OPTYPE_BIOS;
3876 break;
3877 case IMAGE_OPTION_ROM_PXE:
3878 img_optype = OPTYPE_PXE_BIOS;
3879 break;
3880 case IMAGE_OPTION_ROM_FCoE:
3881 img_optype = OPTYPE_FCOE_BIOS;
3882 break;
3883 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3884 img_optype = OPTYPE_ISCSI_BACKUP;
3885 break;
3886 case IMAGE_NCSI:
3887 img_optype = OPTYPE_NCSI_FW;
3888 break;
3889 case IMAGE_FLASHISM_JUMPVECTOR:
3890 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3891 break;
3892 case IMAGE_FIRMWARE_PHY:
3893 img_optype = OPTYPE_SH_PHY_FW;
3894 break;
3895 case IMAGE_REDBOOT_DIR:
3896 img_optype = OPTYPE_REDBOOT_DIR;
3897 break;
3898 case IMAGE_REDBOOT_CONFIG:
3899 img_optype = OPTYPE_REDBOOT_CONFIG;
3900 break;
3901 case IMAGE_UFI_DIR:
3902 img_optype = OPTYPE_UFI_DIR;
3903 break;
3904 default:
3905 break;
3906 }
3907
3908 return img_optype;
3909}
3910
3819static int be_flash_skyhawk(struct be_adapter *adapter, 3911static int be_flash_skyhawk(struct be_adapter *adapter,
3820 const struct firmware *fw, 3912 const struct firmware *fw,
3821 struct be_dma_mem *flash_cmd, int num_of_images) 3913 struct be_dma_mem *flash_cmd, int num_of_images)
3822{ 3914{
3823 int status = 0, i, filehdr_size = 0;
3824 int img_offset, img_size, img_optype, redboot;
3825 int img_hdrs_size = num_of_images * sizeof(struct image_hdr); 3915 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3826 const u8 *p = fw->data; 3916 struct device *dev = &adapter->pdev->dev;
3827 struct flash_section_info *fsec = NULL; 3917 struct flash_section_info *fsec = NULL;
3918 u32 img_offset, img_size, img_type;
3919 int status, i, filehdr_size;
3920 bool crc_match, old_fw_img;
3921 u16 img_optype;
3922 const u8 *p;
3828 3923
3829 filehdr_size = sizeof(struct flash_file_hdr_g3); 3924 filehdr_size = sizeof(struct flash_file_hdr_g3);
3830 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); 3925 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3831 if (!fsec) { 3926 if (!fsec) {
3832 dev_err(&adapter->pdev->dev, 3927 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
3833 "Invalid Cookie. UFI corrupted ?\n");
3834 return -1; 3928 return -1;
3835 } 3929 }
3836 3930
3837 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) { 3931 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3838 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset); 3932 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3839 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size); 3933 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3934 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3935 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
3936 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
3840 3937
3841 switch (le32_to_cpu(fsec->fsec_entry[i].type)) { 3938 if (img_optype == 0xFFFF)
3842 case IMAGE_FIRMWARE_iSCSI:
3843 img_optype = OPTYPE_ISCSI_ACTIVE;
3844 break;
3845 case IMAGE_BOOT_CODE:
3846 img_optype = OPTYPE_REDBOOT;
3847 break;
3848 case IMAGE_OPTION_ROM_ISCSI:
3849 img_optype = OPTYPE_BIOS;
3850 break;
3851 case IMAGE_OPTION_ROM_PXE:
3852 img_optype = OPTYPE_PXE_BIOS;
3853 break;
3854 case IMAGE_OPTION_ROM_FCoE:
3855 img_optype = OPTYPE_FCOE_BIOS;
3856 break;
3857 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3858 img_optype = OPTYPE_ISCSI_BACKUP;
3859 break;
3860 case IMAGE_NCSI:
3861 img_optype = OPTYPE_NCSI_FW;
3862 break;
3863 default:
3864 continue; 3939 continue;
3940 /* Don't bother verifying CRC if an old FW image is being
3941 * flashed
3942 */
3943 if (old_fw_img)
3944 goto flash;
3945
3946 status = be_check_flash_crc(adapter, fw->data, img_offset,
3947 img_size, filehdr_size +
3948 img_hdrs_size, img_optype,
3949 &crc_match);
3950 /* The current FW image on the card does not recognize the new
3951 * FLASH op_type. The FW download is partially complete.
3952 * Reboot the server now to enable FW image to recognize the
3953 * new FLASH op_type. To complete the remaining process,
3954 * download the same FW again after the reboot.
3955 */
3956 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
3957 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
3958 dev_err(dev, "Flash incomplete. Reset the server\n");
3959 dev_err(dev, "Download FW image again after reset\n");
3960 return -EAGAIN;
3961 } else if (status) {
3962 dev_err(dev, "Could not get CRC for 0x%x region\n",
3963 img_optype);
3964 return -EFAULT;
3865 } 3965 }
3866 3966
3867 if (img_optype == OPTYPE_REDBOOT) { 3967 if (crc_match)
3868 redboot = be_flash_redboot(adapter, fw->data, 3968 continue;
3869 img_offset, img_size,
3870 filehdr_size + img_hdrs_size);
3871 if (!redboot)
3872 continue;
3873 }
3874 3969
3875 p = fw->data; 3970flash:
3876 p += filehdr_size + img_offset + img_hdrs_size; 3971 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
3877 if (p + img_size > fw->data + fw->size) 3972 if (p + img_size > fw->data + fw->size)
3878 return -1; 3973 return -1;
3879 3974
3880 status = be_flash(adapter, p, flash_cmd, img_optype, img_size); 3975 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3881 if (status) { 3976 /* For old FW images ignore ILLEGAL_FIELD error or errors on
3882 dev_err(&adapter->pdev->dev, 3977 * UFI_DIR region
3883 "Flashing section type %d failed.\n", 3978 */
3884 fsec->fsec_entry[i].type); 3979 if (old_fw_img &&
3885 return status; 3980 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
3981 (img_optype == OPTYPE_UFI_DIR &&
3982 base_status(status) == MCC_STATUS_FAILED))) {
3983 continue;
3984 } else if (status) {
3985 dev_err(dev, "Flashing section type 0x%x failed\n",
3986 img_type);
3987 return -EFAULT;
3886 } 3988 }
3887 } 3989 }
3888 return 0; 3990 return 0;
3889} 3991}
3890 3992
3891static int lancer_fw_download(struct be_adapter *adapter, 3993static int lancer_fw_download(struct be_adapter *adapter,
3892 const struct firmware *fw) 3994 const struct firmware *fw)
3893{ 3995{
3894#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) 3996#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3895#define LANCER_FW_DOWNLOAD_LOCATION "/prg" 3997#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
@@ -3955,7 +4057,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
3955 } 4057 }
3956 4058
3957 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, 4059 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3958 flash_cmd.dma); 4060 flash_cmd.dma);
3959 if (status) { 4061 if (status) {
3960 dev_err(&adapter->pdev->dev, 4062 dev_err(&adapter->pdev->dev,
3961 "Firmware load error. " 4063 "Firmware load error. "
@@ -3976,9 +4078,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
3976 goto lancer_fw_exit; 4078 goto lancer_fw_exit;
3977 } 4079 }
3978 } else if (change_status != LANCER_NO_RESET_NEEDED) { 4080 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3979 dev_err(&adapter->pdev->dev, 4081 dev_err(&adapter->pdev->dev,
3980 "System reboot required for new FW" 4082 "System reboot required for new FW to be active\n");
3981 " to be active\n");
3982 } 4083 }
3983 4084
3984 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); 4085 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
@@ -4042,7 +4143,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4042 switch (ufi_type) { 4143 switch (ufi_type) {
4043 case UFI_TYPE4: 4144 case UFI_TYPE4:
4044 status = be_flash_skyhawk(adapter, fw, 4145 status = be_flash_skyhawk(adapter, fw,
4045 &flash_cmd, num_imgs); 4146 &flash_cmd, num_imgs);
4046 break; 4147 break;
4047 case UFI_TYPE3R: 4148 case UFI_TYPE3R:
4048 status = be_flash_BEx(adapter, fw, &flash_cmd, 4149 status = be_flash_BEx(adapter, fw, &flash_cmd,
@@ -4112,8 +4213,7 @@ fw_exit:
4112 return status; 4213 return status;
4113} 4214}
4114 4215
4115static int be_ndo_bridge_setlink(struct net_device *dev, 4216static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
4116 struct nlmsghdr *nlh)
4117{ 4217{
4118 struct be_adapter *adapter = netdev_priv(dev); 4218 struct be_adapter *adapter = netdev_priv(dev);
4119 struct nlattr *attr, *br_spec; 4219 struct nlattr *attr, *br_spec;
@@ -4155,8 +4255,7 @@ err:
4155} 4255}
4156 4256
4157static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4257static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4158 struct net_device *dev, 4258 struct net_device *dev, u32 filter_mask)
4159 u32 filter_mask)
4160{ 4259{
4161 struct be_adapter *adapter = netdev_priv(dev); 4260 struct be_adapter *adapter = netdev_priv(dev);
4162 int status = 0; 4261 int status = 0;
@@ -4254,7 +4353,7 @@ static const struct net_device_ops be_netdev_ops = {
4254 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, 4353 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
4255 .ndo_set_vf_mac = be_set_vf_mac, 4354 .ndo_set_vf_mac = be_set_vf_mac,
4256 .ndo_set_vf_vlan = be_set_vf_vlan, 4355 .ndo_set_vf_vlan = be_set_vf_vlan,
4257 .ndo_set_vf_tx_rate = be_set_vf_tx_rate, 4356 .ndo_set_vf_rate = be_set_vf_tx_rate,
4258 .ndo_get_vf_config = be_get_vf_config, 4357 .ndo_get_vf_config = be_get_vf_config,
4259 .ndo_set_vf_link_state = be_set_vf_link_state, 4358 .ndo_set_vf_link_state = be_set_vf_link_state,
4260#ifdef CONFIG_NET_POLL_CONTROLLER 4359#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4301,7 +4400,7 @@ static void be_netdev_init(struct net_device *netdev)
4301 4400
4302 netdev->netdev_ops = &be_netdev_ops; 4401 netdev->netdev_ops = &be_netdev_ops;
4303 4402
4304 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); 4403 netdev->ethtool_ops = &be_ethtool_ops;
4305} 4404}
4306 4405
4307static void be_unmap_pci_bars(struct be_adapter *adapter) 4406static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -4870,7 +4969,7 @@ static void be_shutdown(struct pci_dev *pdev)
4870} 4969}
4871 4970
4872static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, 4971static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4873 pci_channel_state_t state) 4972 pci_channel_state_t state)
4874{ 4973{
4875 struct be_adapter *adapter = pci_get_drvdata(pdev); 4974 struct be_adapter *adapter = pci_get_drvdata(pdev);
4876 struct net_device *netdev = adapter->netdev; 4975 struct net_device *netdev = adapter->netdev;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 8b70ca7e342b..f3658bdb64cc 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -769,11 +769,6 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
769 return phy_mii_ioctl(phy, ifr, cmd); 769 return phy_mii_ioctl(phy, ifr, cmd);
770} 770}
771 771
772static int ethoc_config(struct net_device *dev, struct ifmap *map)
773{
774 return -ENOSYS;
775}
776
777static void ethoc_do_set_mac_address(struct net_device *dev) 772static void ethoc_do_set_mac_address(struct net_device *dev)
778{ 773{
779 struct ethoc *priv = netdev_priv(dev); 774 struct ethoc *priv = netdev_priv(dev);
@@ -995,7 +990,6 @@ static const struct net_device_ops ethoc_netdev_ops = {
995 .ndo_open = ethoc_open, 990 .ndo_open = ethoc_open,
996 .ndo_stop = ethoc_stop, 991 .ndo_stop = ethoc_stop,
997 .ndo_do_ioctl = ethoc_ioctl, 992 .ndo_do_ioctl = ethoc_ioctl,
998 .ndo_set_config = ethoc_config,
999 .ndo_set_mac_address = ethoc_set_mac_address, 993 .ndo_set_mac_address = ethoc_set_mac_address,
1000 .ndo_set_rx_mode = ethoc_set_multicast_list, 994 .ndo_set_rx_mode = ethoc_set_multicast_list,
1001 .ndo_change_mtu = ethoc_change_mtu, 995 .ndo_change_mtu = ethoc_change_mtu,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 68069eabc4f8..c77fa4a69844 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1210,7 +1210,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
1210 1210
1211 SET_NETDEV_DEV(netdev, &pdev->dev); 1211 SET_NETDEV_DEV(netdev, &pdev->dev);
1212 1212
1213 SET_ETHTOOL_OPS(netdev, &ftgmac100_ethtool_ops); 1213 netdev->ethtool_ops = &ftgmac100_ethtool_ops;
1214 netdev->netdev_ops = &ftgmac100_netdev_ops; 1214 netdev->netdev_ops = &ftgmac100_netdev_ops;
1215 netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO; 1215 netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
1216 1216
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 8be5b40c0a12..4ff1adc6bfca 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1085,7 +1085,7 @@ static int ftmac100_probe(struct platform_device *pdev)
1085 } 1085 }
1086 1086
1087 SET_NETDEV_DEV(netdev, &pdev->dev); 1087 SET_NETDEV_DEV(netdev, &pdev->dev);
1088 SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops); 1088 netdev->ethtool_ops = &ftmac100_ethtool_ops;
1089 netdev->netdev_ops = &ftmac100_netdev_ops; 1089 netdev->netdev_ops = &ftmac100_netdev_ops;
1090 1090
1091 platform_set_drvdata(pdev, netdev); 1091 platform_set_drvdata(pdev, netdev);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 6048dc8604ee..270308315d43 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -67,6 +67,7 @@ config FSL_XGMAC_MDIO
67 tristate "Freescale XGMAC MDIO" 67 tristate "Freescale XGMAC MDIO"
68 depends on FSL_SOC 68 depends on FSL_SOC
69 select PHYLIB 69 select PHYLIB
70 select OF_MDIO
70 ---help--- 71 ---help---
71 This driver supports the MDIO bus on the Fman 10G Ethernet MACs. 72 This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
72 73
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 3b8d6d19ff05..671d080105a7 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -221,7 +221,7 @@ struct bufdesc_ex {
221#define BD_ENET_TX_RCMASK ((ushort)0x003c) 221#define BD_ENET_TX_RCMASK ((ushort)0x003c)
222#define BD_ENET_TX_UN ((ushort)0x0002) 222#define BD_ENET_TX_UN ((ushort)0x0002)
223#define BD_ENET_TX_CSL ((ushort)0x0001) 223#define BD_ENET_TX_CSL ((ushort)0x0001)
224#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */ 224#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
225 225
226/*enhanced buffer descriptor control/status used by Ethernet transmit*/ 226/*enhanced buffer descriptor control/status used by Ethernet transmit*/
227#define BD_ENET_TX_INT 0x40000000 227#define BD_ENET_TX_INT 0x40000000
@@ -246,8 +246,8 @@ struct bufdesc_ex {
246#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) 246#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
247#define FEC_ENET_TX_FRSIZE 2048 247#define FEC_ENET_TX_FRSIZE 2048
248#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) 248#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
249#define TX_RING_SIZE 16 /* Must be power of two */ 249#define TX_RING_SIZE 512 /* Must be power of two */
250#define TX_RING_MOD_MASK 15 /* for this to work */ 250#define TX_RING_MOD_MASK 511 /* for this to work */
251 251
252#define BD_ENET_RX_INT 0x00800000 252#define BD_ENET_RX_INT 0x00800000
253#define BD_ENET_RX_PTP ((ushort)0x0400) 253#define BD_ENET_RX_PTP ((ushort)0x0400)
@@ -296,8 +296,15 @@ struct fec_enet_private {
296 /* The ring entries to be free()ed */ 296 /* The ring entries to be free()ed */
297 struct bufdesc *dirty_tx; 297 struct bufdesc *dirty_tx;
298 298
299 unsigned short bufdesc_size;
299 unsigned short tx_ring_size; 300 unsigned short tx_ring_size;
300 unsigned short rx_ring_size; 301 unsigned short rx_ring_size;
302 unsigned short tx_stop_threshold;
303 unsigned short tx_wake_threshold;
304
305 /* Software TSO */
306 char *tso_hdrs;
307 dma_addr_t tso_hdrs_dma;
301 308
302 struct platform_device *pdev; 309 struct platform_device *pdev;
303 310
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8d69e439f0c5..38d9d276ab8b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -36,6 +36,7 @@
36#include <linux/in.h> 36#include <linux/in.h>
37#include <linux/ip.h> 37#include <linux/ip.h>
38#include <net/ip.h> 38#include <net/ip.h>
39#include <net/tso.h>
39#include <linux/tcp.h> 40#include <linux/tcp.h>
40#include <linux/udp.h> 41#include <linux/udp.h>
41#include <linux/icmp.h> 42#include <linux/icmp.h>
@@ -54,6 +55,7 @@
54#include <linux/of_net.h> 55#include <linux/of_net.h>
55#include <linux/regulator/consumer.h> 56#include <linux/regulator/consumer.h>
56#include <linux/if_vlan.h> 57#include <linux/if_vlan.h>
58#include <linux/pinctrl/consumer.h>
57 59
58#include <asm/cacheflush.h> 60#include <asm/cacheflush.h>
59 61
@@ -172,10 +174,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
172#endif 174#endif
173#endif /* CONFIG_M5272 */ 175#endif /* CONFIG_M5272 */
174 176
175#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
176#error "FEC: descriptor ring size constants too large"
177#endif
178
179/* Interrupt events/masks. */ 177/* Interrupt events/masks. */
180#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ 178#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
181#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ 179#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
@@ -231,6 +229,15 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
231#define FEC_PAUSE_FLAG_AUTONEG 0x1 229#define FEC_PAUSE_FLAG_AUTONEG 0x1
232#define FEC_PAUSE_FLAG_ENABLE 0x2 230#define FEC_PAUSE_FLAG_ENABLE 0x2
233 231
232#define TSO_HEADER_SIZE 128
233/* Max number of allowed TCP segments for software TSO */
234#define FEC_MAX_TSO_SEGS 100
235#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
236
237#define IS_TSO_HEADER(txq, addr) \
238 ((addr >= txq->tso_hdrs_dma) && \
239 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
240
234static int mii_cnt; 241static int mii_cnt;
235 242
236static inline 243static inline
@@ -286,6 +293,22 @@ struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_priva
286 return (new_bd < base) ? (new_bd + ring_size) : new_bd; 293 return (new_bd < base) ? (new_bd + ring_size) : new_bd;
287} 294}
288 295
296static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
297 struct fec_enet_private *fep)
298{
299 return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
300}
301
302static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep)
303{
304 int entries;
305
306 entries = ((const char *)fep->dirty_tx -
307 (const char *)fep->cur_tx) / fep->bufdesc_size - 1;
308
309 return entries > 0 ? entries : entries + fep->tx_ring_size;
310}
311
289static void *swap_buffer(void *bufaddr, int len) 312static void *swap_buffer(void *bufaddr, int len)
290{ 313{
291 int i; 314 int i;
@@ -307,33 +330,133 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
307 if (unlikely(skb_cow_head(skb, 0))) 330 if (unlikely(skb_cow_head(skb, 0)))
308 return -1; 331 return -1;
309 332
333 ip_hdr(skb)->check = 0;
310 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 334 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
311 335
312 return 0; 336 return 0;
313} 337}
314 338
315static netdev_tx_t 339static void
316fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 340fec_enet_submit_work(struct bufdesc *bdp, struct fec_enet_private *fep)
341{
342 const struct platform_device_id *id_entry =
343 platform_get_device_id(fep->pdev);
344 struct bufdesc *bdp_pre;
345
346 bdp_pre = fec_enet_get_prevdesc(bdp, fep);
347 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
348 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
349 fep->delay_work.trig_tx = true;
350 schedule_delayed_work(&(fep->delay_work.delay_work),
351 msecs_to_jiffies(1));
352 }
353}
354
355static int
356fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
317{ 357{
318 struct fec_enet_private *fep = netdev_priv(ndev); 358 struct fec_enet_private *fep = netdev_priv(ndev);
319 const struct platform_device_id *id_entry = 359 const struct platform_device_id *id_entry =
320 platform_get_device_id(fep->pdev); 360 platform_get_device_id(fep->pdev);
321 struct bufdesc *bdp, *bdp_pre; 361 struct bufdesc *bdp = fep->cur_tx;
322 void *bufaddr; 362 struct bufdesc_ex *ebdp;
323 unsigned short status; 363 int nr_frags = skb_shinfo(skb)->nr_frags;
364 int frag, frag_len;
365 unsigned short status;
366 unsigned int estatus = 0;
367 skb_frag_t *this_frag;
324 unsigned int index; 368 unsigned int index;
369 void *bufaddr;
370 int i;
325 371
326 /* Fill in a Tx ring entry */ 372 for (frag = 0; frag < nr_frags; frag++) {
373 this_frag = &skb_shinfo(skb)->frags[frag];
374 bdp = fec_enet_get_nextdesc(bdp, fep);
375 ebdp = (struct bufdesc_ex *)bdp;
376
377 status = bdp->cbd_sc;
378 status &= ~BD_ENET_TX_STATS;
379 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
380 frag_len = skb_shinfo(skb)->frags[frag].size;
381
382 /* Handle the last BD specially */
383 if (frag == nr_frags - 1) {
384 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
385 if (fep->bufdesc_ex) {
386 estatus |= BD_ENET_TX_INT;
387 if (unlikely(skb_shinfo(skb)->tx_flags &
388 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
389 estatus |= BD_ENET_TX_TS;
390 }
391 }
392
393 if (fep->bufdesc_ex) {
394 if (skb->ip_summed == CHECKSUM_PARTIAL)
395 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
396 ebdp->cbd_bdu = 0;
397 ebdp->cbd_esc = estatus;
398 }
399
400 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
401
402 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
403 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
404 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
405 memcpy(fep->tx_bounce[index], bufaddr, frag_len);
406 bufaddr = fep->tx_bounce[index];
407
408 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
409 swap_buffer(bufaddr, frag_len);
410 }
411
412 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
413 frag_len, DMA_TO_DEVICE);
414 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
415 dev_kfree_skb_any(skb);
416 if (net_ratelimit())
417 netdev_err(ndev, "Tx DMA memory map failed\n");
418 goto dma_mapping_error;
419 }
420
421 bdp->cbd_datlen = frag_len;
422 bdp->cbd_sc = status;
423 }
424
425 fep->cur_tx = bdp;
426
427 return 0;
428
429dma_mapping_error:
327 bdp = fep->cur_tx; 430 bdp = fep->cur_tx;
431 for (i = 0; i < frag; i++) {
432 bdp = fec_enet_get_nextdesc(bdp, fep);
433 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
434 bdp->cbd_datlen, DMA_TO_DEVICE);
435 }
436 return NETDEV_TX_OK;
437}
328 438
329 status = bdp->cbd_sc; 439static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
440{
441 struct fec_enet_private *fep = netdev_priv(ndev);
442 const struct platform_device_id *id_entry =
443 platform_get_device_id(fep->pdev);
444 int nr_frags = skb_shinfo(skb)->nr_frags;
445 struct bufdesc *bdp, *last_bdp;
446 void *bufaddr;
447 unsigned short status;
448 unsigned short buflen;
449 unsigned int estatus = 0;
450 unsigned int index;
451 int entries_free;
452 int ret;
330 453
331 if (status & BD_ENET_TX_READY) { 454 entries_free = fec_enet_get_free_txdesc_num(fep);
332 /* Ooops. All transmit buffers are full. Bail out. 455 if (entries_free < MAX_SKB_FRAGS + 1) {
333 * This should not happen, since ndev->tbusy should be set. 456 dev_kfree_skb_any(skb);
334 */ 457 if (net_ratelimit())
335 netdev_err(ndev, "tx queue full!\n"); 458 netdev_err(ndev, "NOT enough BD for SG!\n");
336 return NETDEV_TX_BUSY; 459 return NETDEV_TX_OK;
337 } 460 }
338 461
339 /* Protocol checksum off-load for TCP and UDP. */ 462 /* Protocol checksum off-load for TCP and UDP. */
@@ -342,102 +465,300 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
342 return NETDEV_TX_OK; 465 return NETDEV_TX_OK;
343 } 466 }
344 467
345 /* Clear all of the status flags */ 468 /* Fill in a Tx ring entry */
469 bdp = fep->cur_tx;
470 status = bdp->cbd_sc;
346 status &= ~BD_ENET_TX_STATS; 471 status &= ~BD_ENET_TX_STATS;
347 472
348 /* Set buffer length and buffer pointer */ 473 /* Set buffer length and buffer pointer */
349 bufaddr = skb->data; 474 bufaddr = skb->data;
350 bdp->cbd_datlen = skb->len; 475 buflen = skb_headlen(skb);
351
352 /*
353 * On some FEC implementations data must be aligned on
354 * 4-byte boundaries. Use bounce buffers to copy data
355 * and get it aligned. Ugh.
356 */
357 if (fep->bufdesc_ex)
358 index = (struct bufdesc_ex *)bdp -
359 (struct bufdesc_ex *)fep->tx_bd_base;
360 else
361 index = bdp - fep->tx_bd_base;
362 476
363 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { 477 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
364 memcpy(fep->tx_bounce[index], skb->data, skb->len); 478 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
479 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
480 memcpy(fep->tx_bounce[index], skb->data, buflen);
365 bufaddr = fep->tx_bounce[index]; 481 bufaddr = fep->tx_bounce[index];
366 }
367 482
368 /* 483 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
369 * Some design made an incorrect assumption on endian mode of 484 swap_buffer(bufaddr, buflen);
370 * the system that it's running on. As the result, driver has to 485 }
371 * swap every frame going to and coming from the controller.
372 */
373 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
374 swap_buffer(bufaddr, skb->len);
375
376 /* Save skb pointer */
377 fep->tx_skbuff[index] = skb;
378 486
379 /* Push the data cache so the CPM does not get stale memory 487 /* Push the data cache so the CPM does not get stale memory
380 * data. 488 * data.
381 */ 489 */
382 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, 490 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
383 skb->len, DMA_TO_DEVICE); 491 buflen, DMA_TO_DEVICE);
384 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { 492 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
385 bdp->cbd_bufaddr = 0;
386 fep->tx_skbuff[index] = NULL;
387 dev_kfree_skb_any(skb); 493 dev_kfree_skb_any(skb);
388 if (net_ratelimit()) 494 if (net_ratelimit())
389 netdev_err(ndev, "Tx DMA memory map failed\n"); 495 netdev_err(ndev, "Tx DMA memory map failed\n");
390 return NETDEV_TX_OK; 496 return NETDEV_TX_OK;
391 } 497 }
392 498
499 if (nr_frags) {
500 ret = fec_enet_txq_submit_frag_skb(skb, ndev);
501 if (ret)
502 return ret;
503 } else {
504 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
505 if (fep->bufdesc_ex) {
506 estatus = BD_ENET_TX_INT;
507 if (unlikely(skb_shinfo(skb)->tx_flags &
508 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
509 estatus |= BD_ENET_TX_TS;
510 }
511 }
512
393 if (fep->bufdesc_ex) { 513 if (fep->bufdesc_ex) {
394 514
395 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 515 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
396 ebdp->cbd_bdu = 0; 516
397 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 517 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
398 fep->hwts_tx_en)) { 518 fep->hwts_tx_en))
399 ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
400 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 519 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
401 } else {
402 ebdp->cbd_esc = BD_ENET_TX_INT;
403 520
404 /* Enable protocol checksum flags 521 if (skb->ip_summed == CHECKSUM_PARTIAL)
405 * We do not bother with the IP Checksum bits as they 522 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
406 * are done by the kernel 523
407 */ 524 ebdp->cbd_bdu = 0;
408 if (skb->ip_summed == CHECKSUM_PARTIAL) 525 ebdp->cbd_esc = estatus;
409 ebdp->cbd_esc |= BD_ENET_TX_PINS;
410 }
411 } 526 }
412 527
528 last_bdp = fep->cur_tx;
529 index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep);
530 /* Save skb pointer */
531 fep->tx_skbuff[index] = skb;
532
533 bdp->cbd_datlen = buflen;
534
413 /* Send it on its way. Tell FEC it's ready, interrupt when done, 535 /* Send it on its way. Tell FEC it's ready, interrupt when done,
414 * it's the last BD of the frame, and to put the CRC on the end. 536 * it's the last BD of the frame, and to put the CRC on the end.
415 */ 537 */
416 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 538 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
417 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
418 bdp->cbd_sc = status; 539 bdp->cbd_sc = status;
419 540
420 bdp_pre = fec_enet_get_prevdesc(bdp, fep); 541 fec_enet_submit_work(bdp, fep);
421 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
422 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
423 fep->delay_work.trig_tx = true;
424 schedule_delayed_work(&(fep->delay_work.delay_work),
425 msecs_to_jiffies(1));
426 }
427 542
428 /* If this was the last BD in the ring, start at the beginning again. */ 543 /* If this was the last BD in the ring, start at the beginning again. */
429 bdp = fec_enet_get_nextdesc(bdp, fep); 544 bdp = fec_enet_get_nextdesc(last_bdp, fep);
430 545
431 skb_tx_timestamp(skb); 546 skb_tx_timestamp(skb);
432 547
433 fep->cur_tx = bdp; 548 fep->cur_tx = bdp;
434 549
435 if (fep->cur_tx == fep->dirty_tx) 550 /* Trigger transmission start */
436 netif_stop_queue(ndev); 551 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
552
553 return 0;
554}
555
556static int
557fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
558 struct bufdesc *bdp, int index, char *data,
559 int size, bool last_tcp, bool is_last)
560{
561 struct fec_enet_private *fep = netdev_priv(ndev);
562 const struct platform_device_id *id_entry =
563 platform_get_device_id(fep->pdev);
564 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
565 unsigned short status;
566 unsigned int estatus = 0;
567
568 status = bdp->cbd_sc;
569 status &= ~BD_ENET_TX_STATS;
570
571 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
572 bdp->cbd_datlen = size;
573
574 if (((unsigned long) data) & FEC_ALIGNMENT ||
575 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
576 memcpy(fep->tx_bounce[index], data, size);
577 data = fep->tx_bounce[index];
578
579 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
580 swap_buffer(data, size);
581 }
582
583 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
584 size, DMA_TO_DEVICE);
585 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
586 dev_kfree_skb_any(skb);
587 if (net_ratelimit())
588 netdev_err(ndev, "Tx DMA memory map failed\n");
589 return NETDEV_TX_BUSY;
590 }
591
592 if (fep->bufdesc_ex) {
593 if (skb->ip_summed == CHECKSUM_PARTIAL)
594 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
595 ebdp->cbd_bdu = 0;
596 ebdp->cbd_esc = estatus;
597 }
598
599 /* Handle the last BD specially */
600 if (last_tcp)
601 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
602 if (is_last) {
603 status |= BD_ENET_TX_INTR;
604 if (fep->bufdesc_ex)
605 ebdp->cbd_esc |= BD_ENET_TX_INT;
606 }
607
608 bdp->cbd_sc = status;
609
610 return 0;
611}
612
613static int
614fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
615 struct bufdesc *bdp, int index)
616{
617 struct fec_enet_private *fep = netdev_priv(ndev);
618 const struct platform_device_id *id_entry =
619 platform_get_device_id(fep->pdev);
620 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
621 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
622 void *bufaddr;
623 unsigned long dmabuf;
624 unsigned short status;
625 unsigned int estatus = 0;
626
627 status = bdp->cbd_sc;
628 status &= ~BD_ENET_TX_STATS;
629 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
630
631 bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
632 dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE;
633 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
634 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
635 memcpy(fep->tx_bounce[index], skb->data, hdr_len);
636 bufaddr = fep->tx_bounce[index];
637
638 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
639 swap_buffer(bufaddr, hdr_len);
640
641 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
642 hdr_len, DMA_TO_DEVICE);
643 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
644 dev_kfree_skb_any(skb);
645 if (net_ratelimit())
646 netdev_err(ndev, "Tx DMA memory map failed\n");
647 return NETDEV_TX_BUSY;
648 }
649 }
650
651 bdp->cbd_bufaddr = dmabuf;
652 bdp->cbd_datlen = hdr_len;
653
654 if (fep->bufdesc_ex) {
655 if (skb->ip_summed == CHECKSUM_PARTIAL)
656 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
657 ebdp->cbd_bdu = 0;
658 ebdp->cbd_esc = estatus;
659 }
660
661 bdp->cbd_sc = status;
662
663 return 0;
664}
665
666static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
667{
668 struct fec_enet_private *fep = netdev_priv(ndev);
669 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
670 int total_len, data_left;
671 struct bufdesc *bdp = fep->cur_tx;
672 struct tso_t tso;
673 unsigned int index = 0;
674 int ret;
675
676 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) {
677 dev_kfree_skb_any(skb);
678 if (net_ratelimit())
679 netdev_err(ndev, "NOT enough BD for TSO!\n");
680 return NETDEV_TX_OK;
681 }
682
683 /* Protocol checksum off-load for TCP and UDP. */
684 if (fec_enet_clear_csum(skb, ndev)) {
685 dev_kfree_skb_any(skb);
686 return NETDEV_TX_OK;
687 }
688
689 /* Initialize the TSO handler, and prepare the first payload */
690 tso_start(skb, &tso);
691
692 total_len = skb->len - hdr_len;
693 while (total_len > 0) {
694 char *hdr;
695
696 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
697 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
698 total_len -= data_left;
699
700 /* prepare packet headers: MAC + IP + TCP */
701 hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
702 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
703 ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index);
704 if (ret)
705 goto err_release;
706
707 while (data_left > 0) {
708 int size;
709
710 size = min_t(int, tso.size, data_left);
711 bdp = fec_enet_get_nextdesc(bdp, fep);
712 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
713 ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data,
714 size, size == data_left,
715 total_len == 0);
716 if (ret)
717 goto err_release;
718
719 data_left -= size;
720 tso_build_data(skb, &tso, size);
721 }
722
723 bdp = fec_enet_get_nextdesc(bdp, fep);
724 }
725
726 /* Save skb pointer */
727 fep->tx_skbuff[index] = skb;
728
729 fec_enet_submit_work(bdp, fep);
730
731 skb_tx_timestamp(skb);
732 fep->cur_tx = bdp;
437 733
438 /* Trigger transmission start */ 734 /* Trigger transmission start */
439 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 735 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
440 736
737 return 0;
738
739err_release:
740 /* TODO: Release all used data descriptors for TSO */
741 return ret;
742}
743
744static netdev_tx_t
745fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
746{
747 struct fec_enet_private *fep = netdev_priv(ndev);
748 int entries_free;
749 int ret;
750
751 if (skb_is_gso(skb))
752 ret = fec_enet_txq_submit_tso(skb, ndev);
753 else
754 ret = fec_enet_txq_submit_skb(skb, ndev);
755 if (ret)
756 return ret;
757
758 entries_free = fec_enet_get_free_txdesc_num(fep);
759 if (entries_free <= fep->tx_stop_threshold)
760 netif_stop_queue(ndev);
761
441 return NETDEV_TX_OK; 762 return NETDEV_TX_OK;
442} 763}
443 764
@@ -756,6 +1077,7 @@ fec_enet_tx(struct net_device *ndev)
756 unsigned short status; 1077 unsigned short status;
757 struct sk_buff *skb; 1078 struct sk_buff *skb;
758 int index = 0; 1079 int index = 0;
1080 int entries_free;
759 1081
760 fep = netdev_priv(ndev); 1082 fep = netdev_priv(ndev);
761 bdp = fep->dirty_tx; 1083 bdp = fep->dirty_tx;
@@ -769,16 +1091,17 @@ fec_enet_tx(struct net_device *ndev)
769 if (bdp == fep->cur_tx) 1091 if (bdp == fep->cur_tx)
770 break; 1092 break;
771 1093
772 if (fep->bufdesc_ex) 1094 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
773 index = (struct bufdesc_ex *)bdp -
774 (struct bufdesc_ex *)fep->tx_bd_base;
775 else
776 index = bdp - fep->tx_bd_base;
777 1095
778 skb = fep->tx_skbuff[index]; 1096 skb = fep->tx_skbuff[index];
779 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len, 1097 if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
780 DMA_TO_DEVICE); 1098 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1099 bdp->cbd_datlen, DMA_TO_DEVICE);
781 bdp->cbd_bufaddr = 0; 1100 bdp->cbd_bufaddr = 0;
1101 if (!skb) {
1102 bdp = fec_enet_get_nextdesc(bdp, fep);
1103 continue;
1104 }
782 1105
783 /* Check for errors. */ 1106 /* Check for errors. */
784 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1107 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -797,7 +1120,7 @@ fec_enet_tx(struct net_device *ndev)
797 ndev->stats.tx_carrier_errors++; 1120 ndev->stats.tx_carrier_errors++;
798 } else { 1121 } else {
799 ndev->stats.tx_packets++; 1122 ndev->stats.tx_packets++;
800 ndev->stats.tx_bytes += bdp->cbd_datlen; 1123 ndev->stats.tx_bytes += skb->len;
801 } 1124 }
802 1125
803 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && 1126 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
@@ -834,15 +1157,15 @@ fec_enet_tx(struct net_device *ndev)
834 1157
835 /* Since we have freed up a buffer, the ring is no longer full 1158 /* Since we have freed up a buffer, the ring is no longer full
836 */ 1159 */
837 if (fep->dirty_tx != fep->cur_tx) { 1160 if (netif_queue_stopped(ndev)) {
838 if (netif_queue_stopped(ndev)) 1161 entries_free = fec_enet_get_free_txdesc_num(fep);
1162 if (entries_free >= fep->tx_wake_threshold)
839 netif_wake_queue(ndev); 1163 netif_wake_queue(ndev);
840 } 1164 }
841 } 1165 }
842 return; 1166 return;
843} 1167}
844 1168
845
846/* During a receive, the cur_rx points to the current incoming buffer. 1169/* During a receive, the cur_rx points to the current incoming buffer.
847 * When we update through the ring, if the next incoming buffer has 1170 * When we update through the ring, if the next incoming buffer has
848 * not been given to the system, we just set the empty indicator, 1171 * not been given to the system, we just set the empty indicator,
@@ -920,11 +1243,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
920 pkt_len = bdp->cbd_datlen; 1243 pkt_len = bdp->cbd_datlen;
921 ndev->stats.rx_bytes += pkt_len; 1244 ndev->stats.rx_bytes += pkt_len;
922 1245
923 if (fep->bufdesc_ex) 1246 index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep);
924 index = (struct bufdesc_ex *)bdp -
925 (struct bufdesc_ex *)fep->rx_bd_base;
926 else
927 index = bdp - fep->rx_bd_base;
928 data = fep->rx_skbuff[index]->data; 1247 data = fep->rx_skbuff[index]->data;
929 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, 1248 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
930 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1249 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
@@ -1255,6 +1574,49 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1255 return 0; 1574 return 0;
1256} 1575}
1257 1576
1577static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1578{
1579 struct fec_enet_private *fep = netdev_priv(ndev);
1580 int ret;
1581
1582 if (enable) {
1583 ret = clk_prepare_enable(fep->clk_ahb);
1584 if (ret)
1585 return ret;
1586 ret = clk_prepare_enable(fep->clk_ipg);
1587 if (ret)
1588 goto failed_clk_ipg;
1589 if (fep->clk_enet_out) {
1590 ret = clk_prepare_enable(fep->clk_enet_out);
1591 if (ret)
1592 goto failed_clk_enet_out;
1593 }
1594 if (fep->clk_ptp) {
1595 ret = clk_prepare_enable(fep->clk_ptp);
1596 if (ret)
1597 goto failed_clk_ptp;
1598 }
1599 } else {
1600 clk_disable_unprepare(fep->clk_ahb);
1601 clk_disable_unprepare(fep->clk_ipg);
1602 if (fep->clk_enet_out)
1603 clk_disable_unprepare(fep->clk_enet_out);
1604 if (fep->clk_ptp)
1605 clk_disable_unprepare(fep->clk_ptp);
1606 }
1607
1608 return 0;
1609failed_clk_ptp:
1610 if (fep->clk_enet_out)
1611 clk_disable_unprepare(fep->clk_enet_out);
1612failed_clk_enet_out:
1613 clk_disable_unprepare(fep->clk_ipg);
1614failed_clk_ipg:
1615 clk_disable_unprepare(fep->clk_ahb);
1616
1617 return ret;
1618}
1619
1258static int fec_enet_mii_probe(struct net_device *ndev) 1620static int fec_enet_mii_probe(struct net_device *ndev)
1259{ 1621{
1260 struct fec_enet_private *fep = netdev_priv(ndev); 1622 struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1364,7 +1726,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1364 * Reference Manual has an error on this, and gets fixed on i.MX6Q 1726 * Reference Manual has an error on this, and gets fixed on i.MX6Q
1365 * document. 1727 * document.
1366 */ 1728 */
1367 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000); 1729 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
1368 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) 1730 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1369 fep->phy_speed--; 1731 fep->phy_speed--;
1370 fep->phy_speed <<= 1; 1732 fep->phy_speed <<= 1;
@@ -1773,6 +2135,11 @@ fec_enet_open(struct net_device *ndev)
1773 struct fec_enet_private *fep = netdev_priv(ndev); 2135 struct fec_enet_private *fep = netdev_priv(ndev);
1774 int ret; 2136 int ret;
1775 2137
2138 pinctrl_pm_select_default_state(&fep->pdev->dev);
2139 ret = fec_enet_clk_enable(ndev, true);
2140 if (ret)
2141 return ret;
2142
1776 /* I should reset the ring buffers here, but I don't yet know 2143 /* I should reset the ring buffers here, but I don't yet know
1777 * a simple way to do that. 2144 * a simple way to do that.
1778 */ 2145 */
@@ -1811,6 +2178,8 @@ fec_enet_close(struct net_device *ndev)
1811 phy_disconnect(fep->phy_dev); 2178 phy_disconnect(fep->phy_dev);
1812 } 2179 }
1813 2180
2181 fec_enet_clk_enable(ndev, false);
2182 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
1814 fec_enet_free_buffers(ndev); 2183 fec_enet_free_buffers(ndev);
1815 2184
1816 return 0; 2185 return 0;
@@ -1988,13 +2357,35 @@ static int fec_enet_init(struct net_device *ndev)
1988 const struct platform_device_id *id_entry = 2357 const struct platform_device_id *id_entry =
1989 platform_get_device_id(fep->pdev); 2358 platform_get_device_id(fep->pdev);
1990 struct bufdesc *cbd_base; 2359 struct bufdesc *cbd_base;
2360 int bd_size;
2361
2362 /* init the tx & rx ring size */
2363 fep->tx_ring_size = TX_RING_SIZE;
2364 fep->rx_ring_size = RX_RING_SIZE;
2365
2366 fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
2367 fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
2368
2369 if (fep->bufdesc_ex)
2370 fep->bufdesc_size = sizeof(struct bufdesc_ex);
2371 else
2372 fep->bufdesc_size = sizeof(struct bufdesc);
2373 bd_size = (fep->tx_ring_size + fep->rx_ring_size) *
2374 fep->bufdesc_size;
1991 2375
1992 /* Allocate memory for buffer descriptors. */ 2376 /* Allocate memory for buffer descriptors. */
1993 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, 2377 cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma,
1994 GFP_KERNEL); 2378 GFP_KERNEL);
1995 if (!cbd_base) 2379 if (!cbd_base)
1996 return -ENOMEM; 2380 return -ENOMEM;
1997 2381
2382 fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE,
2383 &fep->tso_hdrs_dma, GFP_KERNEL);
2384 if (!fep->tso_hdrs) {
2385 dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma);
2386 return -ENOMEM;
2387 }
2388
1998 memset(cbd_base, 0, PAGE_SIZE); 2389 memset(cbd_base, 0, PAGE_SIZE);
1999 2390
2000 fep->netdev = ndev; 2391 fep->netdev = ndev;
@@ -2004,10 +2395,6 @@ static int fec_enet_init(struct net_device *ndev)
2004 /* make sure MAC we just acquired is programmed into the hw */ 2395 /* make sure MAC we just acquired is programmed into the hw */
2005 fec_set_mac_address(ndev, NULL); 2396 fec_set_mac_address(ndev, NULL);
2006 2397
2007 /* init the tx & rx ring size */
2008 fep->tx_ring_size = TX_RING_SIZE;
2009 fep->rx_ring_size = RX_RING_SIZE;
2010
2011 /* Set receive and transmit descriptor base. */ 2398 /* Set receive and transmit descriptor base. */
2012 fep->rx_bd_base = cbd_base; 2399 fep->rx_bd_base = cbd_base;
2013 if (fep->bufdesc_ex) 2400 if (fep->bufdesc_ex)
@@ -2024,21 +2411,21 @@ static int fec_enet_init(struct net_device *ndev)
2024 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 2411 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
2025 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); 2412 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
2026 2413
2027 if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) { 2414 if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN)
2028 /* enable hw VLAN support */ 2415 /* enable hw VLAN support */
2029 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 2416 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2030 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
2031 }
2032 2417
2033 if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { 2418 if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
2419 ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
2420
2034 /* enable hw accelerator */ 2421 /* enable hw accelerator */
2035 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 2422 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2036 | NETIF_F_RXCSUM); 2423 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
2037 ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2038 | NETIF_F_RXCSUM);
2039 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 2424 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
2040 } 2425 }
2041 2426
2427 ndev->hw_features = ndev->features;
2428
2042 fec_restart(ndev, 0); 2429 fec_restart(ndev, 0);
2043 2430
2044 return 0; 2431 return 0;
@@ -2114,6 +2501,9 @@ fec_probe(struct platform_device *pdev)
2114 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 2501 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
2115#endif 2502#endif
2116 2503
2504 /* Select default pin state */
2505 pinctrl_pm_select_default_state(&pdev->dev);
2506
2117 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2507 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2118 fep->hwp = devm_ioremap_resource(&pdev->dev, r); 2508 fep->hwp = devm_ioremap_resource(&pdev->dev, r);
2119 if (IS_ERR(fep->hwp)) { 2509 if (IS_ERR(fep->hwp)) {
@@ -2164,26 +2554,10 @@ fec_probe(struct platform_device *pdev)
2164 fep->bufdesc_ex = 0; 2554 fep->bufdesc_ex = 0;
2165 } 2555 }
2166 2556
2167 ret = clk_prepare_enable(fep->clk_ahb); 2557 ret = fec_enet_clk_enable(ndev, true);
2168 if (ret) 2558 if (ret)
2169 goto failed_clk; 2559 goto failed_clk;
2170 2560
2171 ret = clk_prepare_enable(fep->clk_ipg);
2172 if (ret)
2173 goto failed_clk_ipg;
2174
2175 if (fep->clk_enet_out) {
2176 ret = clk_prepare_enable(fep->clk_enet_out);
2177 if (ret)
2178 goto failed_clk_enet_out;
2179 }
2180
2181 if (fep->clk_ptp) {
2182 ret = clk_prepare_enable(fep->clk_ptp);
2183 if (ret)
2184 goto failed_clk_ptp;
2185 }
2186
2187 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 2561 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
2188 if (!IS_ERR(fep->reg_phy)) { 2562 if (!IS_ERR(fep->reg_phy)) {
2189 ret = regulator_enable(fep->reg_phy); 2563 ret = regulator_enable(fep->reg_phy);
@@ -2225,6 +2599,8 @@ fec_probe(struct platform_device *pdev)
2225 2599
2226 /* Carrier starts down, phylib will bring it up */ 2600 /* Carrier starts down, phylib will bring it up */
2227 netif_carrier_off(ndev); 2601 netif_carrier_off(ndev);
2602 fec_enet_clk_enable(ndev, false);
2603 pinctrl_pm_select_sleep_state(&pdev->dev);
2228 2604
2229 ret = register_netdev(ndev); 2605 ret = register_netdev(ndev);
2230 if (ret) 2606 if (ret)
@@ -2244,15 +2620,7 @@ failed_init:
2244 if (fep->reg_phy) 2620 if (fep->reg_phy)
2245 regulator_disable(fep->reg_phy); 2621 regulator_disable(fep->reg_phy);
2246failed_regulator: 2622failed_regulator:
2247 if (fep->clk_ptp) 2623 fec_enet_clk_enable(ndev, false);
2248 clk_disable_unprepare(fep->clk_ptp);
2249failed_clk_ptp:
2250 if (fep->clk_enet_out)
2251 clk_disable_unprepare(fep->clk_enet_out);
2252failed_clk_enet_out:
2253 clk_disable_unprepare(fep->clk_ipg);
2254failed_clk_ipg:
2255 clk_disable_unprepare(fep->clk_ahb);
2256failed_clk: 2624failed_clk:
2257failed_ioremap: 2625failed_ioremap:
2258 free_netdev(ndev); 2626 free_netdev(ndev);
@@ -2272,14 +2640,9 @@ fec_drv_remove(struct platform_device *pdev)
2272 del_timer_sync(&fep->time_keep); 2640 del_timer_sync(&fep->time_keep);
2273 if (fep->reg_phy) 2641 if (fep->reg_phy)
2274 regulator_disable(fep->reg_phy); 2642 regulator_disable(fep->reg_phy);
2275 if (fep->clk_ptp)
2276 clk_disable_unprepare(fep->clk_ptp);
2277 if (fep->ptp_clock) 2643 if (fep->ptp_clock)
2278 ptp_clock_unregister(fep->ptp_clock); 2644 ptp_clock_unregister(fep->ptp_clock);
2279 if (fep->clk_enet_out) 2645 fec_enet_clk_enable(ndev, false);
2280 clk_disable_unprepare(fep->clk_enet_out);
2281 clk_disable_unprepare(fep->clk_ipg);
2282 clk_disable_unprepare(fep->clk_ahb);
2283 free_netdev(ndev); 2646 free_netdev(ndev);
2284 2647
2285 return 0; 2648 return 0;
@@ -2296,12 +2659,8 @@ fec_suspend(struct device *dev)
2296 fec_stop(ndev); 2659 fec_stop(ndev);
2297 netif_device_detach(ndev); 2660 netif_device_detach(ndev);
2298 } 2661 }
2299 if (fep->clk_ptp) 2662 fec_enet_clk_enable(ndev, false);
2300 clk_disable_unprepare(fep->clk_ptp); 2663 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2301 if (fep->clk_enet_out)
2302 clk_disable_unprepare(fep->clk_enet_out);
2303 clk_disable_unprepare(fep->clk_ipg);
2304 clk_disable_unprepare(fep->clk_ahb);
2305 2664
2306 if (fep->reg_phy) 2665 if (fep->reg_phy)
2307 regulator_disable(fep->reg_phy); 2666 regulator_disable(fep->reg_phy);
@@ -2322,25 +2681,10 @@ fec_resume(struct device *dev)
2322 return ret; 2681 return ret;
2323 } 2682 }
2324 2683
2325 ret = clk_prepare_enable(fep->clk_ahb); 2684 pinctrl_pm_select_default_state(&fep->pdev->dev);
2685 ret = fec_enet_clk_enable(ndev, true);
2326 if (ret) 2686 if (ret)
2327 goto failed_clk_ahb; 2687 goto failed_clk;
2328
2329 ret = clk_prepare_enable(fep->clk_ipg);
2330 if (ret)
2331 goto failed_clk_ipg;
2332
2333 if (fep->clk_enet_out) {
2334 ret = clk_prepare_enable(fep->clk_enet_out);
2335 if (ret)
2336 goto failed_clk_enet_out;
2337 }
2338
2339 if (fep->clk_ptp) {
2340 ret = clk_prepare_enable(fep->clk_ptp);
2341 if (ret)
2342 goto failed_clk_ptp;
2343 }
2344 2688
2345 if (netif_running(ndev)) { 2689 if (netif_running(ndev)) {
2346 fec_restart(ndev, fep->full_duplex); 2690 fec_restart(ndev, fep->full_duplex);
@@ -2349,14 +2693,7 @@ fec_resume(struct device *dev)
2349 2693
2350 return 0; 2694 return 0;
2351 2695
2352failed_clk_ptp: 2696failed_clk:
2353 if (fep->clk_enet_out)
2354 clk_disable_unprepare(fep->clk_enet_out);
2355failed_clk_enet_out:
2356 clk_disable_unprepare(fep->clk_ipg);
2357failed_clk_ipg:
2358 clk_disable_unprepare(fep->clk_ahb);
2359failed_clk_ahb:
2360 if (fep->reg_phy) 2697 if (fep->reg_phy)
2361 regulator_disable(fep->reg_phy); 2698 regulator_disable(fep->reg_phy);
2362 return ret; 2699 return ret;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index dc80db41d6b3..cfaf17b70f3f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -792,10 +792,6 @@ static int fs_init_phy(struct net_device *dev)
792 phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, 792 phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
793 iface); 793 iface);
794 if (!phydev) { 794 if (!phydev) {
795 phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
796 iface);
797 }
798 if (!phydev) {
799 dev_err(&dev->dev, "Could not attach to PHY\n"); 795 dev_err(&dev->dev, "Could not attach to PHY\n");
800 return -ENODEV; 796 return -ENODEV;
801 } 797 }
@@ -1029,9 +1025,16 @@ static int fs_enet_probe(struct platform_device *ofdev)
1029 fpi->use_napi = 1; 1025 fpi->use_napi = 1;
1030 fpi->napi_weight = 17; 1026 fpi->napi_weight = 17;
1031 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); 1027 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
1032 if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link", 1028 if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
1033 NULL))) 1029 err = of_phy_register_fixed_link(ofdev->dev.of_node);
1034 goto out_free_fpi; 1030 if (err)
1031 goto out_free_fpi;
1032
1033 /* In the case of a fixed PHY, the DT node associated
1034 * to the PHY is the Ethernet MAC DT node.
1035 */
1036 fpi->phy_node = ofdev->dev.of_node;
1037 }
1035 1038
1036 if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) { 1039 if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
1037 phy_connection_type = of_get_property(ofdev->dev.of_node, 1040 phy_connection_type = of_get_property(ofdev->dev.of_node,
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ee6ddbd4f252..a6cf40e62f3a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -889,6 +889,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
889 889
890 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 890 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
891 891
892 /* In the case of a fixed PHY, the DT node associated
893 * to the PHY is the Ethernet MAC DT node.
894 */
895 if (of_phy_is_fixed_link(np)) {
896 err = of_phy_register_fixed_link(np);
897 if (err)
898 goto err_grp_init;
899
900 priv->phy_node = np;
901 }
902
892 /* Find the TBI PHY. If it's not there, we don't support SGMII */ 903 /* Find the TBI PHY. If it's not there, we don't support SGMII */
893 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 904 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
894 905
@@ -1231,7 +1242,7 @@ static void gfar_hw_init(struct gfar_private *priv)
1231 gfar_write_isrg(priv); 1242 gfar_write_isrg(priv);
1232} 1243}
1233 1244
1234static void __init gfar_init_addr_hash_table(struct gfar_private *priv) 1245static void gfar_init_addr_hash_table(struct gfar_private *priv)
1235{ 1246{
1236 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1247 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1237 1248
@@ -1373,6 +1384,9 @@ static int gfar_probe(struct platform_device *ofdev)
1373 1384
1374 gfar_hw_init(priv); 1385 gfar_hw_init(priv);
1375 1386
1387 /* Carrier starts down, phylib will bring it up */
1388 netif_carrier_off(dev);
1389
1376 err = register_netdev(dev); 1390 err = register_netdev(dev);
1377 1391
1378 if (err) { 1392 if (err) {
@@ -1380,9 +1394,6 @@ static int gfar_probe(struct platform_device *ofdev)
1380 goto register_fail; 1394 goto register_fail;
1381 } 1395 }
1382 1396
1383 /* Carrier starts down, phylib will bring it up */
1384 netif_carrier_off(dev);
1385
1386 device_init_wakeup(&dev->dev, 1397 device_init_wakeup(&dev->dev,
1387 priv->device_flags & 1398 priv->device_flags &
1388 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1399 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
@@ -1660,9 +1671,6 @@ static int init_phy(struct net_device *dev)
1660 1671
1661 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, 1672 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1662 interface); 1673 interface);
1663 if (!priv->phydev)
1664 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1665 interface);
1666 if (!priv->phydev) { 1674 if (!priv->phydev) {
1667 dev_err(&dev->dev, "could not attach to PHY\n"); 1675 dev_err(&dev->dev, "could not attach to PHY\n");
1668 return -ENODEV; 1676 return -ENODEV;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index c8299c31b21f..fab39e295441 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1728,9 +1728,6 @@ static int init_phy(struct net_device *dev)
1728 1728
1729 phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, 1729 phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
1730 priv->phy_interface); 1730 priv->phy_interface);
1731 if (!phydev)
1732 phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1733 priv->phy_interface);
1734 if (!phydev) { 1731 if (!phydev) {
1735 dev_err(&dev->dev, "Could not attach to PHY\n"); 1732 dev_err(&dev->dev, "Could not attach to PHY\n");
1736 return -ENODEV; 1733 return -ENODEV;
@@ -3790,6 +3787,17 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3790 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); 3787 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
3791 3788
3792 ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); 3789 ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
3790 if (!ug_info->phy_node) {
3791 /* In the case of a fixed PHY, the DT node associated
3792 * to the PHY is the Ethernet MAC DT node.
3793 */
3794 if (of_phy_is_fixed_link(np)) {
3795 err = of_phy_register_fixed_link(np);
3796 if (err)
3797 return err;
3798 }
3799 ug_info->phy_node = np;
3800 }
3793 3801
3794 /* Find the TBI PHY node. If it's not there, we don't support SGMII */ 3802 /* Find the TBI PHY node. If it's not there, we don't support SGMII */
3795 ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 3803 ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 413329eff2ff..cc83350d56ba 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -417,5 +417,5 @@ static const struct ethtool_ops uec_ethtool_ops = {
417 417
418void uec_set_ethtool_ops(struct net_device *netdev) 418void uec_set_ethtool_ops(struct net_device *netdev)
419{ 419{
420 SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops); 420 netdev->ethtool_ops = &uec_ethtool_ops;
421} 421}
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index d449fcb90199..0c9d55c862ae 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -162,7 +162,9 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
162 162
163 /* Return all Fs if nothing was there */ 163 /* Return all Fs if nothing was there */
164 if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) { 164 if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
165 dev_err(&bus->dev, "MDIO read error\n"); 165 dev_err(&bus->dev,
166 "Error while reading PHY%d reg at %d.%d\n",
167 phy_id, dev_addr, regnum);
166 return 0xffff; 168 return 0xffff;
167 } 169 }
168 170
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 7becab1aa3e4..cfe7a7431730 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -256,7 +256,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
256 dev->netdev_ops = &fjn_netdev_ops; 256 dev->netdev_ops = &fjn_netdev_ops;
257 dev->watchdog_timeo = TX_TIMEOUT; 257 dev->watchdog_timeo = TX_TIMEOUT;
258 258
259 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 259 dev->ethtool_ops = &netdev_ethtool_ops;
260 260
261 return fmvj18x_config(link); 261 return fmvj18x_config(link);
262} /* fmvj18x_attach */ 262} /* fmvj18x_attach */
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
new file mode 100644
index 000000000000..e9421731b05e
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -0,0 +1,27 @@
1#
2# HISILICON device configuration
3#
4
5config NET_VENDOR_HISILICON
6 bool "Hisilicon devices"
7 default y
8 depends on ARM
9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from
12 <http://www.tldp.org/docs.html#howto>.
13
14 Note that the answer to this question doesn't directly affect the
15 kernel: saying N will just cause the configurator to skip all
16 the questions about Hisilicon devices. If you say Y, you will be asked
17 for your specific card in the following questions.
18
19if NET_VENDOR_HISILICON
20
21config HIX5HD2_GMAC
22 tristate "Hisilicon HIX5HD2 Family Network Device Support"
23 select PHYLIB
24 help
25 This selects the hix5hd2 mac family network device.
26
27endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
new file mode 100644
index 000000000000..9175e84622d4
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the HISILICON network device drivers.
3#
4
5obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
new file mode 100644
index 000000000000..0ffdcd381fdd
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -0,0 +1,1066 @@
1/* Copyright (c) 2014 Linaro Ltd.
2 * Copyright (c) 2014 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/module.h>
11#include <linux/interrupt.h>
12#include <linux/etherdevice.h>
13#include <linux/platform_device.h>
14#include <linux/of_net.h>
15#include <linux/of_mdio.h>
16#include <linux/clk.h>
17#include <linux/circ_buf.h>
18
19#define STATION_ADDR_LOW 0x0000
20#define STATION_ADDR_HIGH 0x0004
21#define MAC_DUPLEX_HALF_CTRL 0x0008
22#define MAX_FRM_SIZE 0x003c
23#define PORT_MODE 0x0040
24#define PORT_EN 0x0044
25#define BITS_TX_EN BIT(2)
26#define BITS_RX_EN BIT(1)
27#define REC_FILT_CONTROL 0x0064
28#define BIT_CRC_ERR_PASS BIT(5)
29#define BIT_PAUSE_FRM_PASS BIT(4)
30#define BIT_VLAN_DROP_EN BIT(3)
31#define BIT_BC_DROP_EN BIT(2)
32#define BIT_MC_MATCH_EN BIT(1)
33#define BIT_UC_MATCH_EN BIT(0)
34#define PORT_MC_ADDR_LOW 0x0068
35#define PORT_MC_ADDR_HIGH 0x006C
36#define CF_CRC_STRIP 0x01b0
37#define MODE_CHANGE_EN 0x01b4
38#define BIT_MODE_CHANGE_EN BIT(0)
39#define COL_SLOT_TIME 0x01c0
40#define RECV_CONTROL 0x01e0
41#define BIT_STRIP_PAD_EN BIT(3)
42#define BIT_RUNT_PKT_EN BIT(4)
43#define CONTROL_WORD 0x0214
44#define MDIO_SINGLE_CMD 0x03c0
45#define MDIO_SINGLE_DATA 0x03c4
46#define MDIO_CTRL 0x03cc
47#define MDIO_RDATA_STATUS 0x03d0
48
49#define MDIO_START BIT(20)
50#define MDIO_R_VALID BIT(0)
51#define MDIO_READ (BIT(17) | MDIO_START)
52#define MDIO_WRITE (BIT(16) | MDIO_START)
53
54#define RX_FQ_START_ADDR 0x0500
55#define RX_FQ_DEPTH 0x0504
56#define RX_FQ_WR_ADDR 0x0508
57#define RX_FQ_RD_ADDR 0x050c
58#define RX_FQ_VLDDESC_CNT 0x0510
59#define RX_FQ_ALEMPTY_TH 0x0514
60#define RX_FQ_REG_EN 0x0518
61#define BITS_RX_FQ_START_ADDR_EN BIT(2)
62#define BITS_RX_FQ_DEPTH_EN BIT(1)
63#define BITS_RX_FQ_RD_ADDR_EN BIT(0)
64#define RX_FQ_ALFULL_TH 0x051c
65#define RX_BQ_START_ADDR 0x0520
66#define RX_BQ_DEPTH 0x0524
67#define RX_BQ_WR_ADDR 0x0528
68#define RX_BQ_RD_ADDR 0x052c
69#define RX_BQ_FREE_DESC_CNT 0x0530
70#define RX_BQ_ALEMPTY_TH 0x0534
71#define RX_BQ_REG_EN 0x0538
72#define BITS_RX_BQ_START_ADDR_EN BIT(2)
73#define BITS_RX_BQ_DEPTH_EN BIT(1)
74#define BITS_RX_BQ_WR_ADDR_EN BIT(0)
75#define RX_BQ_ALFULL_TH 0x053c
76#define TX_BQ_START_ADDR 0x0580
77#define TX_BQ_DEPTH 0x0584
78#define TX_BQ_WR_ADDR 0x0588
79#define TX_BQ_RD_ADDR 0x058c
80#define TX_BQ_VLDDESC_CNT 0x0590
81#define TX_BQ_ALEMPTY_TH 0x0594
82#define TX_BQ_REG_EN 0x0598
83#define BITS_TX_BQ_START_ADDR_EN BIT(2)
84#define BITS_TX_BQ_DEPTH_EN BIT(1)
85#define BITS_TX_BQ_RD_ADDR_EN BIT(0)
86#define TX_BQ_ALFULL_TH 0x059c
87#define TX_RQ_START_ADDR 0x05a0
88#define TX_RQ_DEPTH 0x05a4
89#define TX_RQ_WR_ADDR 0x05a8
90#define TX_RQ_RD_ADDR 0x05ac
91#define TX_RQ_FREE_DESC_CNT 0x05b0
92#define TX_RQ_ALEMPTY_TH 0x05b4
93#define TX_RQ_REG_EN 0x05b8
94#define BITS_TX_RQ_START_ADDR_EN BIT(2)
95#define BITS_TX_RQ_DEPTH_EN BIT(1)
96#define BITS_TX_RQ_WR_ADDR_EN BIT(0)
97#define TX_RQ_ALFULL_TH 0x05bc
98#define RAW_PMU_INT 0x05c0
99#define ENA_PMU_INT 0x05c4
100#define STATUS_PMU_INT 0x05c8
101#define MAC_FIFO_ERR_IN BIT(30)
102#define TX_RQ_IN_TIMEOUT_INT BIT(29)
103#define RX_BQ_IN_TIMEOUT_INT BIT(28)
104#define TXOUTCFF_FULL_INT BIT(27)
105#define TXOUTCFF_EMPTY_INT BIT(26)
106#define TXCFF_FULL_INT BIT(25)
107#define TXCFF_EMPTY_INT BIT(24)
108#define RXOUTCFF_FULL_INT BIT(23)
109#define RXOUTCFF_EMPTY_INT BIT(22)
110#define RXCFF_FULL_INT BIT(21)
111#define RXCFF_EMPTY_INT BIT(20)
112#define TX_RQ_IN_INT BIT(19)
113#define TX_BQ_OUT_INT BIT(18)
114#define RX_BQ_IN_INT BIT(17)
115#define RX_FQ_OUT_INT BIT(16)
116#define TX_RQ_EMPTY_INT BIT(15)
117#define TX_RQ_FULL_INT BIT(14)
118#define TX_RQ_ALEMPTY_INT BIT(13)
119#define TX_RQ_ALFULL_INT BIT(12)
120#define TX_BQ_EMPTY_INT BIT(11)
121#define TX_BQ_FULL_INT BIT(10)
122#define TX_BQ_ALEMPTY_INT BIT(9)
123#define TX_BQ_ALFULL_INT BIT(8)
124#define RX_BQ_EMPTY_INT BIT(7)
125#define RX_BQ_FULL_INT BIT(6)
126#define RX_BQ_ALEMPTY_INT BIT(5)
127#define RX_BQ_ALFULL_INT BIT(4)
128#define RX_FQ_EMPTY_INT BIT(3)
129#define RX_FQ_FULL_INT BIT(2)
130#define RX_FQ_ALEMPTY_INT BIT(1)
131#define RX_FQ_ALFULL_INT BIT(0)
132
133#define DEF_INT_MASK (RX_BQ_IN_INT | RX_BQ_IN_TIMEOUT_INT | \
134 TX_RQ_IN_INT | TX_RQ_IN_TIMEOUT_INT)
135
136#define DESC_WR_RD_ENA 0x05cc
137#define IN_QUEUE_TH 0x05d8
138#define OUT_QUEUE_TH 0x05dc
139#define QUEUE_TX_BQ_SHIFT 16
140#define RX_BQ_IN_TIMEOUT_TH 0x05e0
141#define TX_RQ_IN_TIMEOUT_TH 0x05e4
142#define STOP_CMD 0x05e8
143#define BITS_TX_STOP BIT(1)
144#define BITS_RX_STOP BIT(0)
145#define FLUSH_CMD 0x05eC
146#define BITS_TX_FLUSH_CMD BIT(5)
147#define BITS_RX_FLUSH_CMD BIT(4)
148#define BITS_TX_FLUSH_FLAG_DOWN BIT(3)
149#define BITS_TX_FLUSH_FLAG_UP BIT(2)
150#define BITS_RX_FLUSH_FLAG_DOWN BIT(1)
151#define BITS_RX_FLUSH_FLAG_UP BIT(0)
152#define RX_CFF_NUM_REG 0x05f0
153#define PMU_FSM_REG 0x05f8
154#define RX_FIFO_PKT_IN_NUM 0x05fc
155#define RX_FIFO_PKT_OUT_NUM 0x0600
156
157#define RGMII_SPEED_1000 0x2c
158#define RGMII_SPEED_100 0x2f
159#define RGMII_SPEED_10 0x2d
160#define MII_SPEED_100 0x0f
161#define MII_SPEED_10 0x0d
162#define GMAC_SPEED_1000 0x05
163#define GMAC_SPEED_100 0x01
164#define GMAC_SPEED_10 0x00
165#define GMAC_FULL_DUPLEX BIT(4)
166
167#define RX_BQ_INT_THRESHOLD 0x01
168#define TX_RQ_INT_THRESHOLD 0x01
169#define RX_BQ_IN_TIMEOUT 0x10000
170#define TX_RQ_IN_TIMEOUT 0x50000
171
172#define MAC_MAX_FRAME_SIZE 1600
173#define DESC_SIZE 32
174#define RX_DESC_NUM 1024
175#define TX_DESC_NUM 1024
176
177#define DESC_VLD_FREE 0
178#define DESC_VLD_BUSY 0x80000000
179#define DESC_FL_MID 0
180#define DESC_FL_LAST 0x20000000
181#define DESC_FL_FIRST 0x40000000
182#define DESC_FL_FULL 0x60000000
183#define DESC_DATA_LEN_OFF 16
184#define DESC_BUFF_LEN_OFF 0
185#define DESC_DATA_MASK 0x7ff
186
187/* DMA descriptor ring helpers */
188#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
189#define dma_cnt(n) ((n) >> 5)
190#define dma_byte(n) ((n) << 5)
191
192struct hix5hd2_desc {
193 __le32 buff_addr;
194 __le32 cmd;
195} __aligned(32);
196
197struct hix5hd2_desc_sw {
198 struct hix5hd2_desc *desc;
199 dma_addr_t phys_addr;
200 unsigned int count;
201 unsigned int size;
202};
203
204#define QUEUE_NUMS 4
205struct hix5hd2_priv {
206 struct hix5hd2_desc_sw pool[QUEUE_NUMS];
207#define rx_fq pool[0]
208#define rx_bq pool[1]
209#define tx_bq pool[2]
210#define tx_rq pool[3]
211
212 void __iomem *base;
213 void __iomem *ctrl_base;
214
215 struct sk_buff *tx_skb[TX_DESC_NUM];
216 struct sk_buff *rx_skb[RX_DESC_NUM];
217
218 struct device *dev;
219 struct net_device *netdev;
220
221 struct phy_device *phy;
222 struct device_node *phy_node;
223 phy_interface_t phy_mode;
224
225 unsigned int speed;
226 unsigned int duplex;
227
228 struct clk *clk;
229 struct mii_bus *bus;
230 struct napi_struct napi;
231 struct work_struct tx_timeout_task;
232};
233
234static void hix5hd2_config_port(struct net_device *dev, u32 speed, u32 duplex)
235{
236 struct hix5hd2_priv *priv = netdev_priv(dev);
237 u32 val;
238
239 priv->speed = speed;
240 priv->duplex = duplex;
241
242 switch (priv->phy_mode) {
243 case PHY_INTERFACE_MODE_RGMII:
244 if (speed == SPEED_1000)
245 val = RGMII_SPEED_1000;
246 else if (speed == SPEED_100)
247 val = RGMII_SPEED_100;
248 else
249 val = RGMII_SPEED_10;
250 break;
251 case PHY_INTERFACE_MODE_MII:
252 if (speed == SPEED_100)
253 val = MII_SPEED_100;
254 else
255 val = MII_SPEED_10;
256 break;
257 default:
258 netdev_warn(dev, "not supported mode\n");
259 val = MII_SPEED_10;
260 break;
261 }
262
263 if (duplex)
264 val |= GMAC_FULL_DUPLEX;
265 writel_relaxed(val, priv->ctrl_base);
266
267 writel_relaxed(BIT_MODE_CHANGE_EN, priv->base + MODE_CHANGE_EN);
268 if (speed == SPEED_1000)
269 val = GMAC_SPEED_1000;
270 else if (speed == SPEED_100)
271 val = GMAC_SPEED_100;
272 else
273 val = GMAC_SPEED_10;
274 writel_relaxed(val, priv->base + PORT_MODE);
275 writel_relaxed(0, priv->base + MODE_CHANGE_EN);
276 writel_relaxed(duplex, priv->base + MAC_DUPLEX_HALF_CTRL);
277}
278
279static void hix5hd2_set_desc_depth(struct hix5hd2_priv *priv, int rx, int tx)
280{
281 writel_relaxed(BITS_RX_FQ_DEPTH_EN, priv->base + RX_FQ_REG_EN);
282 writel_relaxed(rx << 3, priv->base + RX_FQ_DEPTH);
283 writel_relaxed(0, priv->base + RX_FQ_REG_EN);
284
285 writel_relaxed(BITS_RX_BQ_DEPTH_EN, priv->base + RX_BQ_REG_EN);
286 writel_relaxed(rx << 3, priv->base + RX_BQ_DEPTH);
287 writel_relaxed(0, priv->base + RX_BQ_REG_EN);
288
289 writel_relaxed(BITS_TX_BQ_DEPTH_EN, priv->base + TX_BQ_REG_EN);
290 writel_relaxed(tx << 3, priv->base + TX_BQ_DEPTH);
291 writel_relaxed(0, priv->base + TX_BQ_REG_EN);
292
293 writel_relaxed(BITS_TX_RQ_DEPTH_EN, priv->base + TX_RQ_REG_EN);
294 writel_relaxed(tx << 3, priv->base + TX_RQ_DEPTH);
295 writel_relaxed(0, priv->base + TX_RQ_REG_EN);
296}
297
298static void hix5hd2_set_rx_fq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
299{
300 writel_relaxed(BITS_RX_FQ_START_ADDR_EN, priv->base + RX_FQ_REG_EN);
301 writel_relaxed(phy_addr, priv->base + RX_FQ_START_ADDR);
302 writel_relaxed(0, priv->base + RX_FQ_REG_EN);
303}
304
305static void hix5hd2_set_rx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
306{
307 writel_relaxed(BITS_RX_BQ_START_ADDR_EN, priv->base + RX_BQ_REG_EN);
308 writel_relaxed(phy_addr, priv->base + RX_BQ_START_ADDR);
309 writel_relaxed(0, priv->base + RX_BQ_REG_EN);
310}
311
312static void hix5hd2_set_tx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
313{
314 writel_relaxed(BITS_TX_BQ_START_ADDR_EN, priv->base + TX_BQ_REG_EN);
315 writel_relaxed(phy_addr, priv->base + TX_BQ_START_ADDR);
316 writel_relaxed(0, priv->base + TX_BQ_REG_EN);
317}
318
319static void hix5hd2_set_tx_rq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
320{
321 writel_relaxed(BITS_TX_RQ_START_ADDR_EN, priv->base + TX_RQ_REG_EN);
322 writel_relaxed(phy_addr, priv->base + TX_RQ_START_ADDR);
323 writel_relaxed(0, priv->base + TX_RQ_REG_EN);
324}
325
326static void hix5hd2_set_desc_addr(struct hix5hd2_priv *priv)
327{
328 hix5hd2_set_rx_fq(priv, priv->rx_fq.phys_addr);
329 hix5hd2_set_rx_bq(priv, priv->rx_bq.phys_addr);
330 hix5hd2_set_tx_rq(priv, priv->tx_rq.phys_addr);
331 hix5hd2_set_tx_bq(priv, priv->tx_bq.phys_addr);
332}
333
334static void hix5hd2_hw_init(struct hix5hd2_priv *priv)
335{
336 u32 val;
337
338 /* disable and clear all interrupts */
339 writel_relaxed(0, priv->base + ENA_PMU_INT);
340 writel_relaxed(~0, priv->base + RAW_PMU_INT);
341
342 writel_relaxed(BIT_CRC_ERR_PASS, priv->base + REC_FILT_CONTROL);
343 writel_relaxed(MAC_MAX_FRAME_SIZE, priv->base + CONTROL_WORD);
344 writel_relaxed(0, priv->base + COL_SLOT_TIME);
345
346 val = RX_BQ_INT_THRESHOLD | TX_RQ_INT_THRESHOLD << QUEUE_TX_BQ_SHIFT;
347 writel_relaxed(val, priv->base + IN_QUEUE_TH);
348
349 writel_relaxed(RX_BQ_IN_TIMEOUT, priv->base + RX_BQ_IN_TIMEOUT_TH);
350 writel_relaxed(TX_RQ_IN_TIMEOUT, priv->base + TX_RQ_IN_TIMEOUT_TH);
351
352 hix5hd2_set_desc_depth(priv, RX_DESC_NUM, TX_DESC_NUM);
353 hix5hd2_set_desc_addr(priv);
354}
355
356static void hix5hd2_irq_enable(struct hix5hd2_priv *priv)
357{
358 writel_relaxed(DEF_INT_MASK, priv->base + ENA_PMU_INT);
359}
360
361static void hix5hd2_irq_disable(struct hix5hd2_priv *priv)
362{
363 writel_relaxed(0, priv->base + ENA_PMU_INT);
364}
365
366static void hix5hd2_port_enable(struct hix5hd2_priv *priv)
367{
368 writel_relaxed(0xf, priv->base + DESC_WR_RD_ENA);
369 writel_relaxed(BITS_RX_EN | BITS_TX_EN, priv->base + PORT_EN);
370}
371
372static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
373{
374 writel_relaxed(~(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
375 writel_relaxed(0, priv->base + DESC_WR_RD_ENA);
376}
377
378static void hix5hd2_hw_set_mac_addr(struct net_device *dev)
379{
380 struct hix5hd2_priv *priv = netdev_priv(dev);
381 unsigned char *mac = dev->dev_addr;
382 u32 val;
383
384 val = mac[1] | (mac[0] << 8);
385 writel_relaxed(val, priv->base + STATION_ADDR_HIGH);
386
387 val = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
388 writel_relaxed(val, priv->base + STATION_ADDR_LOW);
389}
390
391static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
392{
393 int ret;
394
395 ret = eth_mac_addr(dev, p);
396 if (!ret)
397 hix5hd2_hw_set_mac_addr(dev);
398
399 return ret;
400}
401
402static void hix5hd2_adjust_link(struct net_device *dev)
403{
404 struct hix5hd2_priv *priv = netdev_priv(dev);
405 struct phy_device *phy = priv->phy;
406
407 if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
408 hix5hd2_config_port(dev, phy->speed, phy->duplex);
409 phy_print_status(phy);
410 }
411}
412
413static void hix5hd2_rx_refill(struct hix5hd2_priv *priv)
414{
415 struct hix5hd2_desc *desc;
416 struct sk_buff *skb;
417 u32 start, end, num, pos, i;
418 u32 len = MAC_MAX_FRAME_SIZE;
419 dma_addr_t addr;
420
421 /* software write pointer */
422 start = dma_cnt(readl_relaxed(priv->base + RX_FQ_WR_ADDR));
423 /* logic read pointer */
424 end = dma_cnt(readl_relaxed(priv->base + RX_FQ_RD_ADDR));
425 num = CIRC_SPACE(start, end, RX_DESC_NUM);
426
427 for (i = 0, pos = start; i < num; i++) {
428 if (priv->rx_skb[pos]) {
429 break;
430 } else {
431 skb = netdev_alloc_skb_ip_align(priv->netdev, len);
432 if (unlikely(skb == NULL))
433 break;
434 }
435
436 addr = dma_map_single(priv->dev, skb->data, len, DMA_FROM_DEVICE);
437 if (dma_mapping_error(priv->dev, addr)) {
438 dev_kfree_skb_any(skb);
439 break;
440 }
441
442 desc = priv->rx_fq.desc + pos;
443 desc->buff_addr = cpu_to_le32(addr);
444 priv->rx_skb[pos] = skb;
445 desc->cmd = cpu_to_le32(DESC_VLD_FREE |
446 (len - 1) << DESC_BUFF_LEN_OFF);
447 pos = dma_ring_incr(pos, RX_DESC_NUM);
448 }
449
450 /* ensure desc updated */
451 wmb();
452
453 if (pos != start)
454 writel_relaxed(dma_byte(pos), priv->base + RX_FQ_WR_ADDR);
455}
456
457static int hix5hd2_rx(struct net_device *dev, int limit)
458{
459 struct hix5hd2_priv *priv = netdev_priv(dev);
460 struct sk_buff *skb;
461 struct hix5hd2_desc *desc;
462 dma_addr_t addr;
463 u32 start, end, num, pos, i, len;
464
465 /* software read pointer */
466 start = dma_cnt(readl_relaxed(priv->base + RX_BQ_RD_ADDR));
467 /* logic write pointer */
468 end = dma_cnt(readl_relaxed(priv->base + RX_BQ_WR_ADDR));
469 num = CIRC_CNT(end, start, RX_DESC_NUM);
470 if (num > limit)
471 num = limit;
472
473 /* ensure get updated desc */
474 rmb();
475 for (i = 0, pos = start; i < num; i++) {
476 skb = priv->rx_skb[pos];
477 if (unlikely(!skb)) {
478 netdev_err(dev, "inconsistent rx_skb\n");
479 break;
480 }
481 priv->rx_skb[pos] = NULL;
482
483 desc = priv->rx_bq.desc + pos;
484 len = (le32_to_cpu(desc->cmd) >> DESC_DATA_LEN_OFF) &
485 DESC_DATA_MASK;
486 addr = le32_to_cpu(desc->buff_addr);
487 dma_unmap_single(priv->dev, addr, MAC_MAX_FRAME_SIZE,
488 DMA_FROM_DEVICE);
489
490 skb_put(skb, len);
491 if (skb->len > MAC_MAX_FRAME_SIZE) {
492 netdev_err(dev, "rcv len err, len = %d\n", skb->len);
493 dev->stats.rx_errors++;
494 dev->stats.rx_length_errors++;
495 dev_kfree_skb_any(skb);
496 goto next;
497 }
498
499 skb->protocol = eth_type_trans(skb, dev);
500 napi_gro_receive(&priv->napi, skb);
501 dev->stats.rx_packets++;
502 dev->stats.rx_bytes += skb->len;
503 dev->last_rx = jiffies;
504next:
505 pos = dma_ring_incr(pos, RX_DESC_NUM);
506 }
507
508 if (pos != start)
509 writel_relaxed(dma_byte(pos), priv->base + RX_BQ_RD_ADDR);
510
511 hix5hd2_rx_refill(priv);
512
513 return num;
514}
515
516static void hix5hd2_xmit_reclaim(struct net_device *dev)
517{
518 struct sk_buff *skb;
519 struct hix5hd2_desc *desc;
520 struct hix5hd2_priv *priv = netdev_priv(dev);
521 unsigned int bytes_compl = 0, pkts_compl = 0;
522 u32 start, end, num, pos, i;
523 dma_addr_t addr;
524
525 netif_tx_lock(dev);
526
527 /* software read */
528 start = dma_cnt(readl_relaxed(priv->base + TX_RQ_RD_ADDR));
529 /* logic write */
530 end = dma_cnt(readl_relaxed(priv->base + TX_RQ_WR_ADDR));
531 num = CIRC_CNT(end, start, TX_DESC_NUM);
532
533 for (i = 0, pos = start; i < num; i++) {
534 skb = priv->tx_skb[pos];
535 if (unlikely(!skb)) {
536 netdev_err(dev, "inconsistent tx_skb\n");
537 break;
538 }
539
540 pkts_compl++;
541 bytes_compl += skb->len;
542 desc = priv->tx_rq.desc + pos;
543 addr = le32_to_cpu(desc->buff_addr);
544 dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
545 priv->tx_skb[pos] = NULL;
546 dev_consume_skb_any(skb);
547 pos = dma_ring_incr(pos, TX_DESC_NUM);
548 }
549
550 if (pos != start)
551 writel_relaxed(dma_byte(pos), priv->base + TX_RQ_RD_ADDR);
552
553 netif_tx_unlock(dev);
554
555 if (pkts_compl || bytes_compl)
556 netdev_completed_queue(dev, pkts_compl, bytes_compl);
557
558 if (unlikely(netif_queue_stopped(priv->netdev)) && pkts_compl)
559 netif_wake_queue(priv->netdev);
560}
561
562static int hix5hd2_poll(struct napi_struct *napi, int budget)
563{
564 struct hix5hd2_priv *priv = container_of(napi,
565 struct hix5hd2_priv, napi);
566 struct net_device *dev = priv->netdev;
567 int work_done = 0, task = budget;
568 int ints, num;
569
570 do {
571 hix5hd2_xmit_reclaim(dev);
572 num = hix5hd2_rx(dev, task);
573 work_done += num;
574 task -= num;
575 if ((work_done >= budget) || (num == 0))
576 break;
577
578 ints = readl_relaxed(priv->base + RAW_PMU_INT);
579 writel_relaxed(ints, priv->base + RAW_PMU_INT);
580 } while (ints & DEF_INT_MASK);
581
582 if (work_done < budget) {
583 napi_complete(napi);
584 hix5hd2_irq_enable(priv);
585 }
586
587 return work_done;
588}
589
590static irqreturn_t hix5hd2_interrupt(int irq, void *dev_id)
591{
592 struct net_device *dev = (struct net_device *)dev_id;
593 struct hix5hd2_priv *priv = netdev_priv(dev);
594 int ints = readl_relaxed(priv->base + RAW_PMU_INT);
595
596 writel_relaxed(ints, priv->base + RAW_PMU_INT);
597 if (likely(ints & DEF_INT_MASK)) {
598 hix5hd2_irq_disable(priv);
599 napi_schedule(&priv->napi);
600 }
601
602 return IRQ_HANDLED;
603}
604
605static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
606{
607 struct hix5hd2_priv *priv = netdev_priv(dev);
608 struct hix5hd2_desc *desc;
609 dma_addr_t addr;
610 u32 pos;
611
612 /* software write pointer */
613 pos = dma_cnt(readl_relaxed(priv->base + TX_BQ_WR_ADDR));
614 if (unlikely(priv->tx_skb[pos])) {
615 dev->stats.tx_dropped++;
616 dev->stats.tx_fifo_errors++;
617 netif_stop_queue(dev);
618 return NETDEV_TX_BUSY;
619 }
620
621 addr = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
622 if (dma_mapping_error(priv->dev, addr)) {
623 dev_kfree_skb_any(skb);
624 return NETDEV_TX_OK;
625 }
626
627 desc = priv->tx_bq.desc + pos;
628 desc->buff_addr = cpu_to_le32(addr);
629 priv->tx_skb[pos] = skb;
630 desc->cmd = cpu_to_le32(DESC_VLD_BUSY | DESC_FL_FULL |
631 (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF |
632 (skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF);
633
634 /* ensure desc updated */
635 wmb();
636
637 pos = dma_ring_incr(pos, TX_DESC_NUM);
638 writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR);
639
640 dev->trans_start = jiffies;
641 dev->stats.tx_packets++;
642 dev->stats.tx_bytes += skb->len;
643 netdev_sent_queue(dev, skb->len);
644
645 return NETDEV_TX_OK;
646}
647
648static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv)
649{
650 struct hix5hd2_desc *desc;
651 dma_addr_t addr;
652 int i;
653
654 for (i = 0; i < RX_DESC_NUM; i++) {
655 struct sk_buff *skb = priv->rx_skb[i];
656 if (skb == NULL)
657 continue;
658
659 desc = priv->rx_fq.desc + i;
660 addr = le32_to_cpu(desc->buff_addr);
661 dma_unmap_single(priv->dev, addr,
662 MAC_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
663 dev_kfree_skb_any(skb);
664 priv->rx_skb[i] = NULL;
665 }
666
667 for (i = 0; i < TX_DESC_NUM; i++) {
668 struct sk_buff *skb = priv->tx_skb[i];
669 if (skb == NULL)
670 continue;
671
672 desc = priv->tx_rq.desc + i;
673 addr = le32_to_cpu(desc->buff_addr);
674 dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
675 dev_kfree_skb_any(skb);
676 priv->tx_skb[i] = NULL;
677 }
678}
679
680static int hix5hd2_net_open(struct net_device *dev)
681{
682 struct hix5hd2_priv *priv = netdev_priv(dev);
683 int ret;
684
685 ret = clk_prepare_enable(priv->clk);
686 if (ret < 0) {
687 netdev_err(dev, "failed to enable clk %d\n", ret);
688 return ret;
689 }
690
691 priv->phy = of_phy_connect(dev, priv->phy_node,
692 &hix5hd2_adjust_link, 0, priv->phy_mode);
693 if (!priv->phy)
694 return -ENODEV;
695
696 phy_start(priv->phy);
697 hix5hd2_hw_init(priv);
698 hix5hd2_rx_refill(priv);
699
700 netdev_reset_queue(dev);
701 netif_start_queue(dev);
702 napi_enable(&priv->napi);
703
704 hix5hd2_port_enable(priv);
705 hix5hd2_irq_enable(priv);
706
707 return 0;
708}
709
710static int hix5hd2_net_close(struct net_device *dev)
711{
712 struct hix5hd2_priv *priv = netdev_priv(dev);
713
714 hix5hd2_port_disable(priv);
715 hix5hd2_irq_disable(priv);
716 napi_disable(&priv->napi);
717 netif_stop_queue(dev);
718 hix5hd2_free_dma_desc_rings(priv);
719
720 if (priv->phy) {
721 phy_stop(priv->phy);
722 phy_disconnect(priv->phy);
723 }
724
725 clk_disable_unprepare(priv->clk);
726
727 return 0;
728}
729
730static void hix5hd2_tx_timeout_task(struct work_struct *work)
731{
732 struct hix5hd2_priv *priv;
733
734 priv = container_of(work, struct hix5hd2_priv, tx_timeout_task);
735 hix5hd2_net_close(priv->netdev);
736 hix5hd2_net_open(priv->netdev);
737}
738
739static void hix5hd2_net_timeout(struct net_device *dev)
740{
741 struct hix5hd2_priv *priv = netdev_priv(dev);
742
743 schedule_work(&priv->tx_timeout_task);
744}
745
746static const struct net_device_ops hix5hd2_netdev_ops = {
747 .ndo_open = hix5hd2_net_open,
748 .ndo_stop = hix5hd2_net_close,
749 .ndo_start_xmit = hix5hd2_net_xmit,
750 .ndo_tx_timeout = hix5hd2_net_timeout,
751 .ndo_set_mac_address = hix5hd2_net_set_mac_address,
752};
753
754static int hix5hd2_get_settings(struct net_device *net_dev,
755 struct ethtool_cmd *cmd)
756{
757 struct hix5hd2_priv *priv = netdev_priv(net_dev);
758
759 if (!priv->phy)
760 return -ENODEV;
761
762 return phy_ethtool_gset(priv->phy, cmd);
763}
764
765static int hix5hd2_set_settings(struct net_device *net_dev,
766 struct ethtool_cmd *cmd)
767{
768 struct hix5hd2_priv *priv = netdev_priv(net_dev);
769
770 if (!priv->phy)
771 return -ENODEV;
772
773 return phy_ethtool_sset(priv->phy, cmd);
774}
775
776static struct ethtool_ops hix5hd2_ethtools_ops = {
777 .get_link = ethtool_op_get_link,
778 .get_settings = hix5hd2_get_settings,
779 .set_settings = hix5hd2_set_settings,
780};
781
782static int hix5hd2_mdio_wait_ready(struct mii_bus *bus)
783{
784 struct hix5hd2_priv *priv = bus->priv;
785 void __iomem *base = priv->base;
786 int i, timeout = 10000;
787
788 for (i = 0; readl_relaxed(base + MDIO_SINGLE_CMD) & MDIO_START; i++) {
789 if (i == timeout)
790 return -ETIMEDOUT;
791 usleep_range(10, 20);
792 }
793
794 return 0;
795}
796
797static int hix5hd2_mdio_read(struct mii_bus *bus, int phy, int reg)
798{
799 struct hix5hd2_priv *priv = bus->priv;
800 void __iomem *base = priv->base;
801 int val, ret;
802
803 ret = hix5hd2_mdio_wait_ready(bus);
804 if (ret < 0)
805 goto out;
806
807 writel_relaxed(MDIO_READ | phy << 8 | reg, base + MDIO_SINGLE_CMD);
808 ret = hix5hd2_mdio_wait_ready(bus);
809 if (ret < 0)
810 goto out;
811
812 val = readl_relaxed(base + MDIO_RDATA_STATUS);
813 if (val & MDIO_R_VALID) {
814 dev_err(bus->parent, "SMI bus read not valid\n");
815 ret = -ENODEV;
816 goto out;
817 }
818
819 val = readl_relaxed(priv->base + MDIO_SINGLE_DATA);
820 ret = (val >> 16) & 0xFFFF;
821out:
822 return ret;
823}
824
825static int hix5hd2_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
826{
827 struct hix5hd2_priv *priv = bus->priv;
828 void __iomem *base = priv->base;
829 int ret;
830
831 ret = hix5hd2_mdio_wait_ready(bus);
832 if (ret < 0)
833 goto out;
834
835 writel_relaxed(val, base + MDIO_SINGLE_DATA);
836 writel_relaxed(MDIO_WRITE | phy << 8 | reg, base + MDIO_SINGLE_CMD);
837 ret = hix5hd2_mdio_wait_ready(bus);
838out:
839 return ret;
840}
841
842static void hix5hd2_destroy_hw_desc_queue(struct hix5hd2_priv *priv)
843{
844 int i;
845
846 for (i = 0; i < QUEUE_NUMS; i++) {
847 if (priv->pool[i].desc) {
848 dma_free_coherent(priv->dev, priv->pool[i].size,
849 priv->pool[i].desc,
850 priv->pool[i].phys_addr);
851 priv->pool[i].desc = NULL;
852 }
853 }
854}
855
856static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
857{
858 struct device *dev = priv->dev;
859 struct hix5hd2_desc *virt_addr;
860 dma_addr_t phys_addr;
861 int size, i;
862
863 priv->rx_fq.count = RX_DESC_NUM;
864 priv->rx_bq.count = RX_DESC_NUM;
865 priv->tx_bq.count = TX_DESC_NUM;
866 priv->tx_rq.count = TX_DESC_NUM;
867
868 for (i = 0; i < QUEUE_NUMS; i++) {
869 size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
870 virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
871 GFP_KERNEL);
872 if (virt_addr == NULL)
873 goto error_free_pool;
874
875 memset(virt_addr, 0, size);
876 priv->pool[i].size = size;
877 priv->pool[i].desc = virt_addr;
878 priv->pool[i].phys_addr = phys_addr;
879 }
880 return 0;
881
882error_free_pool:
883 hix5hd2_destroy_hw_desc_queue(priv);
884
885 return -ENOMEM;
886}
887
888static int hix5hd2_dev_probe(struct platform_device *pdev)
889{
890 struct device *dev = &pdev->dev;
891 struct device_node *node = dev->of_node;
892 struct net_device *ndev;
893 struct hix5hd2_priv *priv;
894 struct resource *res;
895 struct mii_bus *bus;
896 const char *mac_addr;
897 int ret;
898
899 ndev = alloc_etherdev(sizeof(struct hix5hd2_priv));
900 if (!ndev)
901 return -ENOMEM;
902
903 platform_set_drvdata(pdev, ndev);
904
905 priv = netdev_priv(ndev);
906 priv->dev = dev;
907 priv->netdev = ndev;
908
909 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
910 priv->base = devm_ioremap_resource(dev, res);
911 if (IS_ERR(priv->base)) {
912 ret = PTR_ERR(priv->base);
913 goto out_free_netdev;
914 }
915
916 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
917 priv->ctrl_base = devm_ioremap_resource(dev, res);
918 if (IS_ERR(priv->ctrl_base)) {
919 ret = PTR_ERR(priv->ctrl_base);
920 goto out_free_netdev;
921 }
922
923 priv->clk = devm_clk_get(&pdev->dev, NULL);
924 if (IS_ERR(priv->clk)) {
925 netdev_err(ndev, "failed to get clk\n");
926 ret = -ENODEV;
927 goto out_free_netdev;
928 }
929
930 ret = clk_prepare_enable(priv->clk);
931 if (ret < 0) {
932 netdev_err(ndev, "failed to enable clk %d\n", ret);
933 goto out_free_netdev;
934 }
935
936 bus = mdiobus_alloc();
937 if (bus == NULL) {
938 ret = -ENOMEM;
939 goto out_free_netdev;
940 }
941
942 bus->priv = priv;
943 bus->name = "hix5hd2_mii_bus";
944 bus->read = hix5hd2_mdio_read;
945 bus->write = hix5hd2_mdio_write;
946 bus->parent = &pdev->dev;
947 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
948 priv->bus = bus;
949
950 ret = of_mdiobus_register(bus, node);
951 if (ret)
952 goto err_free_mdio;
953
954 priv->phy_mode = of_get_phy_mode(node);
955 if (priv->phy_mode < 0) {
956 netdev_err(ndev, "not find phy-mode\n");
957 ret = -EINVAL;
958 goto err_mdiobus;
959 }
960
961 priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
962 if (!priv->phy_node) {
963 netdev_err(ndev, "not find phy-handle\n");
964 ret = -EINVAL;
965 goto err_mdiobus;
966 }
967
968 ndev->irq = platform_get_irq(pdev, 0);
969 if (ndev->irq <= 0) {
970 netdev_err(ndev, "No irq resource\n");
971 ret = -EINVAL;
972 goto out_phy_node;
973 }
974
975 ret = devm_request_irq(dev, ndev->irq, hix5hd2_interrupt,
976 0, pdev->name, ndev);
977 if (ret) {
978 netdev_err(ndev, "devm_request_irq failed\n");
979 goto out_phy_node;
980 }
981
982 mac_addr = of_get_mac_address(node);
983 if (mac_addr)
984 ether_addr_copy(ndev->dev_addr, mac_addr);
985 if (!is_valid_ether_addr(ndev->dev_addr)) {
986 eth_hw_addr_random(ndev);
987 netdev_warn(ndev, "using random MAC address %pM\n",
988 ndev->dev_addr);
989 }
990
991 INIT_WORK(&priv->tx_timeout_task, hix5hd2_tx_timeout_task);
992 ndev->watchdog_timeo = 6 * HZ;
993 ndev->priv_flags |= IFF_UNICAST_FLT;
994 ndev->netdev_ops = &hix5hd2_netdev_ops;
995 ndev->ethtool_ops = &hix5hd2_ethtools_ops;
996 SET_NETDEV_DEV(ndev, dev);
997
998 ret = hix5hd2_init_hw_desc_queue(priv);
999 if (ret)
1000 goto out_phy_node;
1001
1002 netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT);
1003 ret = register_netdev(priv->netdev);
1004 if (ret) {
1005 netdev_err(ndev, "register_netdev failed!");
1006 goto out_destroy_queue;
1007 }
1008
1009 clk_disable_unprepare(priv->clk);
1010
1011 return ret;
1012
1013out_destroy_queue:
1014 netif_napi_del(&priv->napi);
1015 hix5hd2_destroy_hw_desc_queue(priv);
1016out_phy_node:
1017 of_node_put(priv->phy_node);
1018err_mdiobus:
1019 mdiobus_unregister(bus);
1020err_free_mdio:
1021 mdiobus_free(bus);
1022out_free_netdev:
1023 free_netdev(ndev);
1024
1025 return ret;
1026}
1027
1028static int hix5hd2_dev_remove(struct platform_device *pdev)
1029{
1030 struct net_device *ndev = platform_get_drvdata(pdev);
1031 struct hix5hd2_priv *priv = netdev_priv(ndev);
1032
1033 netif_napi_del(&priv->napi);
1034 unregister_netdev(ndev);
1035 mdiobus_unregister(priv->bus);
1036 mdiobus_free(priv->bus);
1037
1038 hix5hd2_destroy_hw_desc_queue(priv);
1039 of_node_put(priv->phy_node);
1040 cancel_work_sync(&priv->tx_timeout_task);
1041 free_netdev(ndev);
1042
1043 return 0;
1044}
1045
1046static const struct of_device_id hix5hd2_of_match[] = {
1047 {.compatible = "hisilicon,hix5hd2-gmac",},
1048 {},
1049};
1050
1051MODULE_DEVICE_TABLE(of, hix5hd2_of_match);
1052
1053static struct platform_driver hix5hd2_dev_driver = {
1054 .driver = {
1055 .name = "hix5hd2-gmac",
1056 .of_match_table = hix5hd2_of_match,
1057 },
1058 .probe = hix5hd2_dev_probe,
1059 .remove = hix5hd2_dev_remove,
1060};
1061
1062module_platform_driver(hix5hd2_dev_driver);
1063
1064MODULE_DESCRIPTION("HISILICON HIX5HD2 Ethernet driver");
1065MODULE_LICENSE("GPL v2");
1066MODULE_ALIAS("platform:hix5hd2-gmac");
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 95837b99a464..85a3866459cf 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -63,8 +63,8 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
63 cmd->duplex = port->full_duplex == 1 ? 63 cmd->duplex = port->full_duplex == 1 ?
64 DUPLEX_FULL : DUPLEX_HALF; 64 DUPLEX_FULL : DUPLEX_HALF;
65 } else { 65 } else {
66 speed = ~0; 66 speed = SPEED_UNKNOWN;
67 cmd->duplex = -1; 67 cmd->duplex = DUPLEX_UNKNOWN;
68 } 68 }
69 ethtool_cmd_speed_set(cmd, speed); 69 ethtool_cmd_speed_set(cmd, speed);
70 70
@@ -278,5 +278,5 @@ static const struct ethtool_ops ehea_ethtool_ops = {
278 278
279void ehea_set_ethtool_ops(struct net_device *netdev) 279void ehea_set_ethtool_ops(struct net_device *netdev)
280{ 280{
281 SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops); 281 netdev->ethtool_ops = &ehea_ethtool_ops;
282} 282}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 538903bf13bc..a0b418e007a0 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -28,6 +28,7 @@
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 30
31#include <linux/device.h>
31#include <linux/in.h> 32#include <linux/in.h>
32#include <linux/ip.h> 33#include <linux/ip.h>
33#include <linux/tcp.h> 34#include <linux/tcp.h>
@@ -3273,7 +3274,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
3273 return -EINVAL; 3274 return -EINVAL;
3274 } 3275 }
3275 3276
3276 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 3277 adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
3277 if (!adapter) { 3278 if (!adapter) {
3278 ret = -ENOMEM; 3279 ret = -ENOMEM;
3279 dev_err(&dev->dev, "no mem for ehea_adapter\n"); 3280 dev_err(&dev->dev, "no mem for ehea_adapter\n");
@@ -3359,7 +3360,6 @@ out_kill_eq:
3359 3360
3360out_free_ad: 3361out_free_ad:
3361 list_del(&adapter->list); 3362 list_del(&adapter->list);
3362 kfree(adapter);
3363 3363
3364out: 3364out:
3365 ehea_update_firmware_handles(); 3365 ehea_update_firmware_handles();
@@ -3386,7 +3386,6 @@ static int ehea_remove(struct platform_device *dev)
3386 ehea_destroy_eq(adapter->neq); 3386 ehea_destroy_eq(adapter->neq);
3387 ehea_remove_adapter_mr(adapter); 3387 ehea_remove_adapter_mr(adapter);
3388 list_del(&adapter->list); 3388 list_del(&adapter->list);
3389 kfree(adapter);
3390 3389
3391 ehea_update_firmware_handles(); 3390 ehea_update_firmware_handles();
3392 3391
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 9b03033bb557..a0820f72b25c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -103,12 +103,14 @@ out_nomem:
103 103
104static void hw_queue_dtor(struct hw_queue *queue) 104static void hw_queue_dtor(struct hw_queue *queue)
105{ 105{
106 int pages_per_kpage = PAGE_SIZE / queue->pagesize; 106 int pages_per_kpage;
107 int i, nr_pages; 107 int i, nr_pages;
108 108
109 if (!queue || !queue->queue_pages) 109 if (!queue || !queue->queue_pages)
110 return; 110 return;
111 111
112 pages_per_kpage = PAGE_SIZE / queue->pagesize;
113
112 nr_pages = queue->queue_length / queue->pagesize; 114 nr_pages = queue->queue_length / queue->pagesize;
113 115
114 for (i = 0; i < nr_pages; i += pages_per_kpage) 116 for (i = 0; i < nr_pages; i += pages_per_kpage)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index ae342fdb42c8..87bd953cc2ee 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2879,7 +2879,7 @@ static int emac_probe(struct platform_device *ofdev)
2879 dev->commac.ops = &emac_commac_sg_ops; 2879 dev->commac.ops = &emac_commac_sg_ops;
2880 } else 2880 } else
2881 ndev->netdev_ops = &emac_netdev_ops; 2881 ndev->netdev_ops = &emac_netdev_ops;
2882 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); 2882 ndev->ethtool_ops = &emac_ethtool_ops;
2883 2883
2884 netif_carrier_off(ndev); 2884 netif_carrier_off(ndev);
2885 2885
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 25045ae07171..5727779a7df2 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2245,7 +2245,7 @@ static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2245 */ 2245 */
2246 dev->netdev_ops = &ipg_netdev_ops; 2246 dev->netdev_ops = &ipg_netdev_ops;
2247 SET_NETDEV_DEV(dev, &pdev->dev); 2247 SET_NETDEV_DEV(dev, &pdev->dev);
2248 SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops); 2248 dev->ethtool_ops = &ipg_ethtool_ops;
2249 2249
2250 rc = pci_request_regions(pdev, DRV_NAME); 2250 rc = pci_request_regions(pdev, DRV_NAME);
2251 if (rc) 2251 if (rc)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index b56461ce674c..9d979d7debef 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2854,7 +2854,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2854 netdev->hw_features |= NETIF_F_RXALL; 2854 netdev->hw_features |= NETIF_F_RXALL;
2855 2855
2856 netdev->netdev_ops = &e100_netdev_ops; 2856 netdev->netdev_ops = &e100_netdev_ops;
2857 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); 2857 netdev->ethtool_ops = &e100_ethtool_ops;
2858 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; 2858 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2859 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 2859 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2860 2860
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 73a8aeefb92a..d50f78afb56d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -168,8 +168,8 @@ static int e1000_get_settings(struct net_device *netdev,
168 else 168 else
169 ecmd->duplex = DUPLEX_HALF; 169 ecmd->duplex = DUPLEX_HALF;
170 } else { 170 } else {
171 ethtool_cmd_speed_set(ecmd, -1); 171 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
172 ecmd->duplex = -1; 172 ecmd->duplex = DUPLEX_UNKNOWN;
173 } 173 }
174 174
175 ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || 175 ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
@@ -1460,7 +1460,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1460 * enough time to complete the receives, if it's 1460 * enough time to complete the receives, if it's
1461 * exceeded, break and error off 1461 * exceeded, break and error off
1462 */ 1462 */
1463 } while (good_cnt < 64 && jiffies < (time + 20)); 1463 } while (good_cnt < 64 && time_after(time + 20, jiffies));
1464
1464 if (good_cnt != 64) { 1465 if (good_cnt != 64) {
1465 ret_val = 13; /* ret_val is the same as mis-compare */ 1466 ret_val = 13; /* ret_val is the same as mis-compare */
1466 break; 1467 break;
@@ -1905,5 +1906,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
1905 1906
1906void e1000_set_ethtool_ops(struct net_device *netdev) 1907void e1000_set_ethtool_ops(struct net_device *netdev)
1907{ 1908{
1908 SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); 1909 netdev->ethtool_ops = &e1000_ethtool_ops;
1909} 1910}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index c1d3fdb296a0..e9b07ccc0eba 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -4877,10 +4877,10 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
4877 * since the test for a multicast frame will test positive on 4877 * since the test for a multicast frame will test positive on
4878 * a broadcast frame. 4878 * a broadcast frame.
4879 */ 4879 */
4880 if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff)) 4880 if (is_broadcast_ether_addr(mac_addr))
4881 /* Broadcast packet */ 4881 /* Broadcast packet */
4882 stats->bprc++; 4882 stats->bprc++;
4883 else if (*mac_addr & 0x01) 4883 else if (is_multicast_ether_addr(mac_addr))
4884 /* Multicast packet */ 4884 /* Multicast packet */
4885 stats->mprc++; 4885 stats->mprc++;
4886 4886
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 27058dfe418b..660971f304b2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3105,11 +3105,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3105 */ 3105 */
3106 tx_ring = adapter->tx_ring; 3106 tx_ring = adapter->tx_ring;
3107 3107
3108 if (unlikely(skb->len <= 0)) {
3109 dev_kfree_skb_any(skb);
3110 return NETDEV_TX_OK;
3111 }
3112
3113 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, 3108 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3114 * packets may get corrupted during padding by HW. 3109 * packets may get corrupted during padding by HW.
3115 * To WA this issue, pad all small packets manually. 3110 * To WA this issue, pad all small packets manually.
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index a5f6b11d6992..08f22f348800 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1365,6 +1365,7 @@ static const struct e1000_mac_operations es2_mac_ops = {
1365 .setup_led = e1000e_setup_led_generic, 1365 .setup_led = e1000e_setup_led_generic,
1366 .config_collision_dist = e1000e_config_collision_dist_generic, 1366 .config_collision_dist = e1000e_config_collision_dist_generic,
1367 .rar_set = e1000e_rar_set_generic, 1367 .rar_set = e1000e_rar_set_generic,
1368 .rar_get_count = e1000e_rar_get_count_generic,
1368}; 1369};
1369 1370
1370static const struct e1000_phy_operations es2_phy_ops = { 1371static const struct e1000_phy_operations es2_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index e0aa7f1efb08..218481e509f9 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1896,6 +1896,7 @@ static const struct e1000_mac_operations e82571_mac_ops = {
1896 .config_collision_dist = e1000e_config_collision_dist_generic, 1896 .config_collision_dist = e1000e_config_collision_dist_generic,
1897 .read_mac_addr = e1000_read_mac_addr_82571, 1897 .read_mac_addr = e1000_read_mac_addr_82571,
1898 .rar_set = e1000e_rar_set_generic, 1898 .rar_set = e1000e_rar_set_generic,
1899 .rar_get_count = e1000e_rar_get_count_generic,
1899}; 1900};
1900 1901
1901static const struct e1000_phy_operations e82_phy_ops_igp = { 1902static const struct e1000_phy_operations e82_phy_ops_igp = {
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1471c5464a89..7785240a0da1 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -265,10 +265,10 @@ struct e1000_adapter {
265 u32 tx_hwtstamp_timeouts; 265 u32 tx_hwtstamp_timeouts;
266 266
267 /* Rx */ 267 /* Rx */
268 bool (*clean_rx) (struct e1000_ring *ring, int *work_done, 268 bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
269 int work_to_do) ____cacheline_aligned_in_smp; 269 int work_to_do) ____cacheline_aligned_in_smp;
270 void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count, 270 void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count,
271 gfp_t gfp); 271 gfp_t gfp);
272 struct e1000_ring *rx_ring; 272 struct e1000_ring *rx_ring;
273 273
274 u32 rx_int_delay; 274 u32 rx_int_delay;
@@ -391,6 +391,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
391 * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours 391 * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours
392 */ 392 */
393#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4) 393#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4)
394#define E1000_MAX_82574_SYSTIM_REREADS 50
395#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL)
394 396
395/* hardware capability, feature, and workaround flags */ 397/* hardware capability, feature, and workaround flags */
396#define FLAG_HAS_AMT (1 << 0) 398#define FLAG_HAS_AMT (1 << 0)
@@ -573,35 +575,8 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
573 575
574#define er32(reg) __er32(hw, E1000_##reg) 576#define er32(reg) __er32(hw, E1000_##reg)
575 577
576/** 578s32 __ew32_prepare(struct e1000_hw *hw);
577 * __ew32_prepare - prepare to write to MAC CSR register on certain parts 579void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
578 * @hw: pointer to the HW structure
579 *
580 * When updating the MAC CSR registers, the Manageability Engine (ME) could
581 * be accessing the registers at the same time. Normally, this is handled in
582 * h/w by an arbiter but on some parts there is a bug that acknowledges Host
583 * accesses later than it should which could result in the register to have
584 * an incorrect value. Workaround this by checking the FWSM register which
585 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
586 * and try again a number of times.
587 **/
588static inline s32 __ew32_prepare(struct e1000_hw *hw)
589{
590 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
591
592 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
593 udelay(50);
594
595 return i;
596}
597
598static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
599{
600 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
601 __ew32_prepare(hw);
602
603 writel(val, hw->hw_addr + reg);
604}
605 580
606#define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) 581#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
607 582
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index cad250bc1b99..815e26c6d34b 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -159,8 +159,8 @@ static int e1000_get_settings(struct net_device *netdev,
159 ecmd->transceiver = XCVR_EXTERNAL; 159 ecmd->transceiver = XCVR_EXTERNAL;
160 } 160 }
161 161
162 speed = -1; 162 speed = SPEED_UNKNOWN;
163 ecmd->duplex = -1; 163 ecmd->duplex = DUPLEX_UNKNOWN;
164 164
165 if (netif_running(netdev)) { 165 if (netif_running(netdev)) {
166 if (netif_carrier_ok(netdev)) { 166 if (netif_carrier_ok(netdev)) {
@@ -169,6 +169,7 @@ static int e1000_get_settings(struct net_device *netdev,
169 } 169 }
170 } else if (!pm_runtime_suspended(netdev->dev.parent)) { 170 } else if (!pm_runtime_suspended(netdev->dev.parent)) {
171 u32 status = er32(STATUS); 171 u32 status = er32(STATUS);
172
172 if (status & E1000_STATUS_LU) { 173 if (status & E1000_STATUS_LU) {
173 if (status & E1000_STATUS_SPEED_1000) 174 if (status & E1000_STATUS_SPEED_1000)
174 speed = SPEED_1000; 175 speed = SPEED_1000;
@@ -783,25 +784,26 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
783 reg + (offset << 2), val, 784 reg + (offset << 2), val,
784 (test[pat] & write & mask)); 785 (test[pat] & write & mask));
785 *data = reg; 786 *data = reg;
786 return 1; 787 return true;
787 } 788 }
788 } 789 }
789 return 0; 790 return false;
790} 791}
791 792
792static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, 793static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
793 int reg, u32 mask, u32 write) 794 int reg, u32 mask, u32 write)
794{ 795{
795 u32 val; 796 u32 val;
797
796 __ew32(&adapter->hw, reg, write & mask); 798 __ew32(&adapter->hw, reg, write & mask);
797 val = __er32(&adapter->hw, reg); 799 val = __er32(&adapter->hw, reg);
798 if ((write & mask) != (val & mask)) { 800 if ((write & mask) != (val & mask)) {
799 e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", 801 e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
800 reg, (val & mask), (write & mask)); 802 reg, (val & mask), (write & mask));
801 *data = reg; 803 *data = reg;
802 return 1; 804 return true;
803 } 805 }
804 return 0; 806 return false;
805} 807}
806 808
807#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ 809#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
@@ -1717,6 +1719,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1717 *data = 0; 1719 *data = 0;
1718 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1720 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1719 int i = 0; 1721 int i = 0;
1722
1720 hw->mac.serdes_has_link = false; 1723 hw->mac.serdes_has_link = false;
1721 1724
1722 /* On some blade server designs, link establishment 1725 /* On some blade server designs, link establishment
@@ -2315,5 +2318,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
2315 2318
2316void e1000e_set_ethtool_ops(struct net_device *netdev) 2319void e1000e_set_ethtool_ops(struct net_device *netdev)
2317{ 2320{
2318 SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); 2321 netdev->ethtool_ops = &e1000_ethtool_ops;
2319} 2322}
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 6b3de5f39a97..72f5475c4b90 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -469,8 +469,9 @@ struct e1000_mac_operations {
469 s32 (*setup_led)(struct e1000_hw *); 469 s32 (*setup_led)(struct e1000_hw *);
470 void (*write_vfta)(struct e1000_hw *, u32, u32); 470 void (*write_vfta)(struct e1000_hw *, u32, u32);
471 void (*config_collision_dist)(struct e1000_hw *); 471 void (*config_collision_dist)(struct e1000_hw *);
472 void (*rar_set)(struct e1000_hw *, u8 *, u32); 472 int (*rar_set)(struct e1000_hw *, u8 *, u32);
473 s32 (*read_mac_addr)(struct e1000_hw *); 473 s32 (*read_mac_addr)(struct e1000_hw *);
474 u32 (*rar_get_count)(struct e1000_hw *);
474}; 475};
475 476
476/* When to use various PHY register access functions: 477/* When to use various PHY register access functions:
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f0bbd4246d71..8894ab8ed6bd 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -139,8 +139,9 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
139static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 139static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
140static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 140static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
141static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 141static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
142static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); 142static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
143static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); 143static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
144static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
144static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 145static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
145static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 146static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
146static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force); 147static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
@@ -704,6 +705,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
704 mac->ops.rar_set = e1000_rar_set_pch_lpt; 705 mac->ops.rar_set = e1000_rar_set_pch_lpt;
705 mac->ops.setup_physical_interface = 706 mac->ops.setup_physical_interface =
706 e1000_setup_copper_link_pch_lpt; 707 e1000_setup_copper_link_pch_lpt;
708 mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
707 } 709 }
708 710
709 /* Enable PCS Lock-loss workaround for ICH8 */ 711 /* Enable PCS Lock-loss workaround for ICH8 */
@@ -1334,6 +1336,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1334 if (((hw->mac.type == e1000_pch2lan) || 1336 if (((hw->mac.type == e1000_pch2lan) ||
1335 (hw->mac.type == e1000_pch_lpt)) && link) { 1337 (hw->mac.type == e1000_pch_lpt)) && link) {
1336 u32 reg; 1338 u32 reg;
1339
1337 reg = er32(STATUS); 1340 reg = er32(STATUS);
1338 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { 1341 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1339 u16 emi_addr; 1342 u16 emi_addr;
@@ -1634,9 +1637,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1634 u32 fwsm; 1637 u32 fwsm;
1635 1638
1636 fwsm = er32(FWSM); 1639 fwsm = er32(FWSM);
1637 return ((fwsm & E1000_ICH_FWSM_FW_VALID) && 1640 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1638 ((fwsm & E1000_FWSM_MODE_MASK) == 1641 ((fwsm & E1000_FWSM_MODE_MASK) ==
1639 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))); 1642 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1640} 1643}
1641 1644
1642/** 1645/**
@@ -1667,7 +1670,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1667 * contain the MAC address but RAR[1-6] are reserved for manageability (ME). 1670 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1668 * Use SHRA[0-3] in place of those reserved for ME. 1671 * Use SHRA[0-3] in place of those reserved for ME.
1669 **/ 1672 **/
1670static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) 1673static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1671{ 1674{
1672 u32 rar_low, rar_high; 1675 u32 rar_low, rar_high;
1673 1676
@@ -1689,7 +1692,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1689 e1e_flush(); 1692 e1e_flush();
1690 ew32(RAH(index), rar_high); 1693 ew32(RAH(index), rar_high);
1691 e1e_flush(); 1694 e1e_flush();
1692 return; 1695 return 0;
1693 } 1696 }
1694 1697
1695 /* RAR[1-6] are owned by manageability. Skip those and program the 1698 /* RAR[1-6] are owned by manageability. Skip those and program the
@@ -1712,7 +1715,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1712 /* verify the register updates */ 1715 /* verify the register updates */
1713 if ((er32(SHRAL(index - 1)) == rar_low) && 1716 if ((er32(SHRAL(index - 1)) == rar_low) &&
1714 (er32(SHRAH(index - 1)) == rar_high)) 1717 (er32(SHRAH(index - 1)) == rar_high))
1715 return; 1718 return 0;
1716 1719
1717 e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", 1720 e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1718 (index - 1), er32(FWSM)); 1721 (index - 1), er32(FWSM));
@@ -1720,6 +1723,43 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1720 1723
1721out: 1724out:
1722 e_dbg("Failed to write receive address at index %d\n", index); 1725 e_dbg("Failed to write receive address at index %d\n", index);
1726 return -E1000_ERR_CONFIG;
1727}
1728
1729/**
1730 * e1000_rar_get_count_pch_lpt - Get the number of available SHRA
1731 * @hw: pointer to the HW structure
1732 *
1733 * Get the number of available receive registers that the Host can
1734 * program. SHRA[0-10] are the shared receive address registers
1735 * that are shared between the Host and manageability engine (ME).
1736 * ME can reserve any number of addresses and the host needs to be
1737 * able to tell how many available registers it has access to.
1738 **/
1739static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
1740{
1741 u32 wlock_mac;
1742 u32 num_entries;
1743
1744 wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1745 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1746
1747 switch (wlock_mac) {
1748 case 0:
1749 /* All SHRA[0..10] and RAR[0] available */
1750 num_entries = hw->mac.rar_entry_count;
1751 break;
1752 case 1:
1753 /* Only RAR[0] available */
1754 num_entries = 1;
1755 break;
1756 default:
1757 /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
1758 num_entries = wlock_mac + 1;
1759 break;
1760 }
1761
1762 return num_entries;
1723} 1763}
1724 1764
1725/** 1765/**
@@ -1733,7 +1773,7 @@ out:
1733 * contain the MAC address. SHRA[0-10] are the shared receive address 1773 * contain the MAC address. SHRA[0-10] are the shared receive address
1734 * registers that are shared between the Host and manageability engine (ME). 1774 * registers that are shared between the Host and manageability engine (ME).
1735 **/ 1775 **/
1736static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) 1776static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1737{ 1777{
1738 u32 rar_low, rar_high; 1778 u32 rar_low, rar_high;
1739 u32 wlock_mac; 1779 u32 wlock_mac;
@@ -1755,7 +1795,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1755 e1e_flush(); 1795 e1e_flush();
1756 ew32(RAH(index), rar_high); 1796 ew32(RAH(index), rar_high);
1757 e1e_flush(); 1797 e1e_flush();
1758 return; 1798 return 0;
1759 } 1799 }
1760 1800
1761 /* The manageability engine (ME) can lock certain SHRAR registers that 1801 /* The manageability engine (ME) can lock certain SHRAR registers that
@@ -1787,12 +1827,13 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1787 /* verify the register updates */ 1827 /* verify the register updates */
1788 if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) && 1828 if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1789 (er32(SHRAH_PCH_LPT(index - 1)) == rar_high)) 1829 (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
1790 return; 1830 return 0;
1791 } 1831 }
1792 } 1832 }
1793 1833
1794out: 1834out:
1795 e_dbg("Failed to write receive address at index %d\n", index); 1835 e_dbg("Failed to write receive address at index %d\n", index);
1836 return -E1000_ERR_CONFIG;
1796} 1837}
1797 1838
1798/** 1839/**
@@ -4976,6 +5017,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
4976 /* id_led_init dependent on mac type */ 5017 /* id_led_init dependent on mac type */
4977 .config_collision_dist = e1000e_config_collision_dist_generic, 5018 .config_collision_dist = e1000e_config_collision_dist_generic,
4978 .rar_set = e1000e_rar_set_generic, 5019 .rar_set = e1000e_rar_set_generic,
5020 .rar_get_count = e1000e_rar_get_count_generic,
4979}; 5021};
4980 5022
4981static const struct e1000_phy_operations ich8_phy_ops = { 5023static const struct e1000_phy_operations ich8_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index baa0a466d1d0..8c386f3a15eb 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -211,6 +211,11 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
211 return 0; 211 return 0;
212} 212}
213 213
214u32 e1000e_rar_get_count_generic(struct e1000_hw *hw)
215{
216 return hw->mac.rar_entry_count;
217}
218
214/** 219/**
215 * e1000e_rar_set_generic - Set receive address register 220 * e1000e_rar_set_generic - Set receive address register
216 * @hw: pointer to the HW structure 221 * @hw: pointer to the HW structure
@@ -220,7 +225,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
220 * Sets the receive address array register at index to the address passed 225 * Sets the receive address array register at index to the address passed
221 * in by addr. 226 * in by addr.
222 **/ 227 **/
223void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) 228int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
224{ 229{
225 u32 rar_low, rar_high; 230 u32 rar_low, rar_high;
226 231
@@ -244,6 +249,8 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
244 e1e_flush(); 249 e1e_flush();
245 ew32(RAH(index), rar_high); 250 ew32(RAH(index), rar_high);
246 e1e_flush(); 251 e1e_flush();
252
253 return 0;
247} 254}
248 255
249/** 256/**
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index 4e81c2825b7a..0513d90cdeea 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -61,7 +61,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw);
61void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); 61void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
62 62
63void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); 63void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
64void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); 64u32 e1000e_rar_get_count_generic(struct e1000_hw *hw);
65int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
65void e1000e_config_collision_dist_generic(struct e1000_hw *hw); 66void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
66 67
67#endif 68#endif
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3e69386add04..201cc93f3625 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -124,6 +124,36 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
124}; 124};
125 125
126/** 126/**
127 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
128 * @hw: pointer to the HW structure
129 *
130 * When updating the MAC CSR registers, the Manageability Engine (ME) could
131 * be accessing the registers at the same time. Normally, this is handled in
132 * h/w by an arbiter but on some parts there is a bug that acknowledges Host
133 * accesses later than it should which could result in the register to have
134 * an incorrect value. Workaround this by checking the FWSM register which
135 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
136 * and try again a number of times.
137 **/
138s32 __ew32_prepare(struct e1000_hw *hw)
139{
140 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
141
142 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
143 udelay(50);
144
145 return i;
146}
147
148void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
149{
150 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
151 __ew32_prepare(hw);
152
153 writel(val, hw->hw_addr + reg);
154}
155
156/**
127 * e1000_regdump - register printout routine 157 * e1000_regdump - register printout routine
128 * @hw: pointer to the HW structure 158 * @hw: pointer to the HW structure
129 * @reginfo: pointer to the register info table 159 * @reginfo: pointer to the register info table
@@ -599,6 +629,7 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
599 629
600 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { 630 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
601 u32 rctl = er32(RCTL); 631 u32 rctl = er32(RCTL);
632
602 ew32(RCTL, rctl & ~E1000_RCTL_EN); 633 ew32(RCTL, rctl & ~E1000_RCTL_EN);
603 e_err("ME firmware caused invalid RDT - resetting\n"); 634 e_err("ME firmware caused invalid RDT - resetting\n");
604 schedule_work(&adapter->reset_task); 635 schedule_work(&adapter->reset_task);
@@ -615,6 +646,7 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
615 646
616 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { 647 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
617 u32 tctl = er32(TCTL); 648 u32 tctl = er32(TCTL);
649
618 ew32(TCTL, tctl & ~E1000_TCTL_EN); 650 ew32(TCTL, tctl & ~E1000_TCTL_EN);
619 e_err("ME firmware caused invalid TDT - resetting\n"); 651 e_err("ME firmware caused invalid TDT - resetting\n");
620 schedule_work(&adapter->reset_task); 652 schedule_work(&adapter->reset_task);
@@ -1198,6 +1230,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1198 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 1230 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1199 (count < tx_ring->count)) { 1231 (count < tx_ring->count)) {
1200 bool cleaned = false; 1232 bool cleaned = false;
1233
1201 rmb(); /* read buffer_info after eop_desc */ 1234 rmb(); /* read buffer_info after eop_desc */
1202 for (; !cleaned; count++) { 1235 for (; !cleaned; count++) {
1203 tx_desc = E1000_TX_DESC(*tx_ring, i); 1236 tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -1753,6 +1786,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
1753 adapter->flags & FLAG_RX_NEEDS_RESTART) { 1786 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1754 /* disable receives */ 1787 /* disable receives */
1755 u32 rctl = er32(RCTL); 1788 u32 rctl = er32(RCTL);
1789
1756 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1790 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1757 adapter->flags |= FLAG_RESTART_NOW; 1791 adapter->flags |= FLAG_RESTART_NOW;
1758 } 1792 }
@@ -1960,6 +1994,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
1960 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ 1994 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1961 if (hw->mac.type == e1000_82574) { 1995 if (hw->mac.type == e1000_82574) {
1962 u32 rfctl = er32(RFCTL); 1996 u32 rfctl = er32(RFCTL);
1997
1963 rfctl |= E1000_RFCTL_ACK_DIS; 1998 rfctl |= E1000_RFCTL_ACK_DIS;
1964 ew32(RFCTL, rfctl); 1999 ew32(RFCTL, rfctl);
1965 } 2000 }
@@ -2204,6 +2239,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
2204 2239
2205 if (adapter->msix_entries) { 2240 if (adapter->msix_entries) {
2206 int i; 2241 int i;
2242
2207 for (i = 0; i < adapter->num_vectors; i++) 2243 for (i = 0; i < adapter->num_vectors; i++)
2208 synchronize_irq(adapter->msix_entries[i].vector); 2244 synchronize_irq(adapter->msix_entries[i].vector);
2209 } else { 2245 } else {
@@ -2921,6 +2957,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2921 2957
2922 if (adapter->flags2 & FLAG2_DMA_BURST) { 2958 if (adapter->flags2 & FLAG2_DMA_BURST) {
2923 u32 txdctl = er32(TXDCTL(0)); 2959 u32 txdctl = er32(TXDCTL(0));
2960
2924 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | 2961 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2925 E1000_TXDCTL_WTHRESH); 2962 E1000_TXDCTL_WTHRESH);
2926 /* set up some performance related parameters to encourage the 2963 /* set up some performance related parameters to encourage the
@@ -3239,6 +3276,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3239 3276
3240 if (adapter->flags & FLAG_IS_ICH) { 3277 if (adapter->flags & FLAG_IS_ICH) {
3241 u32 rxdctl = er32(RXDCTL(0)); 3278 u32 rxdctl = er32(RXDCTL(0));
3279
3242 ew32(RXDCTL(0), rxdctl | 0x3); 3280 ew32(RXDCTL(0), rxdctl | 0x3);
3243 } 3281 }
3244 3282
@@ -3303,9 +3341,11 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
3303{ 3341{
3304 struct e1000_adapter *adapter = netdev_priv(netdev); 3342 struct e1000_adapter *adapter = netdev_priv(netdev);
3305 struct e1000_hw *hw = &adapter->hw; 3343 struct e1000_hw *hw = &adapter->hw;
3306 unsigned int rar_entries = hw->mac.rar_entry_count; 3344 unsigned int rar_entries;
3307 int count = 0; 3345 int count = 0;
3308 3346
3347 rar_entries = hw->mac.ops.rar_get_count(hw);
3348
3309 /* save a rar entry for our hardware address */ 3349 /* save a rar entry for our hardware address */
3310 rar_entries--; 3350 rar_entries--;
3311 3351
@@ -3324,9 +3364,13 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
3324 * combining 3364 * combining
3325 */ 3365 */
3326 netdev_for_each_uc_addr(ha, netdev) { 3366 netdev_for_each_uc_addr(ha, netdev) {
3367 int rval;
3368
3327 if (!rar_entries) 3369 if (!rar_entries)
3328 break; 3370 break;
3329 hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); 3371 rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3372 if (rval < 0)
3373 return -ENOMEM;
3330 count++; 3374 count++;
3331 } 3375 }
3332 } 3376 }
@@ -4085,12 +4129,37 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4085 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, 4129 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
4086 cc); 4130 cc);
4087 struct e1000_hw *hw = &adapter->hw; 4131 struct e1000_hw *hw = &adapter->hw;
4088 cycle_t systim; 4132 cycle_t systim, systim_next;
4089 4133
4090 /* latch SYSTIMH on read of SYSTIML */ 4134 /* latch SYSTIMH on read of SYSTIML */
4091 systim = (cycle_t)er32(SYSTIML); 4135 systim = (cycle_t)er32(SYSTIML);
4092 systim |= (cycle_t)er32(SYSTIMH) << 32; 4136 systim |= (cycle_t)er32(SYSTIMH) << 32;
4093 4137
4138 if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
4139 u64 incvalue, time_delta, rem, temp;
4140 int i;
4141
4142 /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
4143 * check to see that the time is incrementing at a reasonable
4144 * rate and is a multiple of incvalue
4145 */
4146 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
4147 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
4148 /* latch SYSTIMH on read of SYSTIML */
4149 systim_next = (cycle_t)er32(SYSTIML);
4150 systim_next |= (cycle_t)er32(SYSTIMH) << 32;
4151
4152 time_delta = systim_next - systim;
4153 temp = time_delta;
4154 rem = do_div(temp, incvalue);
4155
4156 systim = systim_next;
4157
4158 if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
4159 (rem == 0))
4160 break;
4161 }
4162 }
4094 return systim; 4163 return systim;
4095} 4164}
4096 4165
@@ -4491,7 +4560,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
4491 e1000_get_phy_info(hw); 4560 e1000_get_phy_info(hw);
4492 4561
4493 /* Enable EEE on 82579 after link up */ 4562 /* Enable EEE on 82579 after link up */
4494 if (hw->phy.type == e1000_phy_82579) 4563 if (hw->phy.type >= e1000_phy_82579)
4495 e1000_set_eee_pchlan(hw); 4564 e1000_set_eee_pchlan(hw);
4496} 4565}
4497 4566
@@ -4695,6 +4764,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
4695 /* Correctable ECC Errors */ 4764 /* Correctable ECC Errors */
4696 if (hw->mac.type == e1000_pch_lpt) { 4765 if (hw->mac.type == e1000_pch_lpt) {
4697 u32 pbeccsts = er32(PBECCSTS); 4766 u32 pbeccsts = er32(PBECCSTS);
4767
4698 adapter->corr_errors += 4768 adapter->corr_errors +=
4699 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 4769 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
4700 adapter->uncorr_errors += 4770 adapter->uncorr_errors +=
@@ -4808,6 +4878,7 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
4808 (adapter->flags & FLAG_RESTART_NOW)) { 4878 (adapter->flags & FLAG_RESTART_NOW)) {
4809 struct e1000_hw *hw = &adapter->hw; 4879 struct e1000_hw *hw = &adapter->hw;
4810 u32 rctl = er32(RCTL); 4880 u32 rctl = er32(RCTL);
4881
4811 ew32(RCTL, rctl | E1000_RCTL_EN); 4882 ew32(RCTL, rctl | E1000_RCTL_EN);
4812 adapter->flags &= ~FLAG_RESTART_NOW; 4883 adapter->flags &= ~FLAG_RESTART_NOW;
4813 } 4884 }
@@ -4930,6 +5001,7 @@ static void e1000_watchdog_task(struct work_struct *work)
4930 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 5001 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4931 !txb2b) { 5002 !txb2b) {
4932 u32 tarc0; 5003 u32 tarc0;
5004
4933 tarc0 = er32(TARC(0)); 5005 tarc0 = er32(TARC(0));
4934 tarc0 &= ~SPEED_MODE_BIT; 5006 tarc0 &= ~SPEED_MODE_BIT;
4935 ew32(TARC(0), tarc0); 5007 ew32(TARC(0), tarc0);
@@ -5170,7 +5242,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
5170 __be16 protocol; 5242 __be16 protocol;
5171 5243
5172 if (skb->ip_summed != CHECKSUM_PARTIAL) 5244 if (skb->ip_summed != CHECKSUM_PARTIAL)
5173 return 0; 5245 return false;
5174 5246
5175 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) 5247 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5176 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; 5248 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -5215,7 +5287,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
5215 i = 0; 5287 i = 0;
5216 tx_ring->next_to_use = i; 5288 tx_ring->next_to_use = i;
5217 5289
5218 return 1; 5290 return true;
5219} 5291}
5220 5292
5221static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, 5293static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
@@ -6209,6 +6281,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6209 e1e_wphy(&adapter->hw, BM_WUS, ~0); 6281 e1e_wphy(&adapter->hw, BM_WUS, ~0);
6210 } else { 6282 } else {
6211 u32 wus = er32(WUS); 6283 u32 wus = er32(WUS);
6284
6212 if (wus) { 6285 if (wus) {
6213 e_info("MAC Wakeup cause - %s\n", 6286 e_info("MAC Wakeup cause - %s\n",
6214 wus & E1000_WUS_EX ? "Unicast Packet" : 6287 wus & E1000_WUS_EX ? "Unicast Packet" :
@@ -7027,7 +7100,7 @@ static const struct pci_error_handlers e1000_err_handler = {
7027 .resume = e1000_io_resume, 7100 .resume = e1000_io_resume,
7028}; 7101};
7029 7102
7030static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { 7103static const struct pci_device_id e1000_pci_tbl[] = {
7031 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 7104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
7032 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 7105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
7033 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 7106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
@@ -7144,6 +7217,7 @@ static struct pci_driver e1000_driver = {
7144static int __init e1000_init_module(void) 7217static int __init e1000_init_module(void)
7145{ 7218{
7146 int ret; 7219 int ret;
7220
7147 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 7221 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
7148 e1000e_driver_version); 7222 e1000e_driver_version);
7149 pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n"); 7223 pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index a9a976f04bff..b1f212b7baf7 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -398,6 +398,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
398 /* Loop to allow for up to whole page write of eeprom */ 398 /* Loop to allow for up to whole page write of eeprom */
399 while (widx < words) { 399 while (widx < words) {
400 u16 word_out = data[widx]; 400 u16 word_out = data[widx];
401
401 word_out = (word_out >> 8) | (word_out << 8); 402 word_out = (word_out >> 8) | (word_out << 8);
402 e1000_shift_out_eec_bits(hw, word_out, 16); 403 e1000_shift_out_eec_bits(hw, word_out, 16);
403 widx++; 404 widx++;
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index d0ac0f3249c8..aa1923f7ebdd 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -436,6 +436,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
436 436
437 if (num_IntMode > bd) { 437 if (num_IntMode > bd) {
438 unsigned int int_mode = IntMode[bd]; 438 unsigned int int_mode = IntMode[bd];
439
439 e1000_validate_option(&int_mode, &opt, adapter); 440 e1000_validate_option(&int_mode, &opt, adapter);
440 adapter->int_mode = int_mode; 441 adapter->int_mode = int_mode;
441 } else { 442 } else {
@@ -457,6 +458,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
457 458
458 if (num_SmartPowerDownEnable > bd) { 459 if (num_SmartPowerDownEnable > bd) {
459 unsigned int spd = SmartPowerDownEnable[bd]; 460 unsigned int spd = SmartPowerDownEnable[bd];
461
460 e1000_validate_option(&spd, &opt, adapter); 462 e1000_validate_option(&spd, &opt, adapter);
461 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd) 463 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
462 adapter->flags |= FLAG_SMART_POWER_DOWN; 464 adapter->flags |= FLAG_SMART_POWER_DOWN;
@@ -473,6 +475,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
473 475
474 if (num_CrcStripping > bd) { 476 if (num_CrcStripping > bd) {
475 unsigned int crc_stripping = CrcStripping[bd]; 477 unsigned int crc_stripping = CrcStripping[bd];
478
476 e1000_validate_option(&crc_stripping, &opt, adapter); 479 e1000_validate_option(&crc_stripping, &opt, adapter);
477 if (crc_stripping == OPTION_ENABLED) { 480 if (crc_stripping == OPTION_ENABLED) {
478 adapter->flags2 |= FLAG2_CRC_STRIPPING; 481 adapter->flags2 |= FLAG2_CRC_STRIPPING;
@@ -495,6 +498,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
495 498
496 if (num_KumeranLockLoss > bd) { 499 if (num_KumeranLockLoss > bd) {
497 unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; 500 unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
501
498 e1000_validate_option(&kmrn_lock_loss, &opt, adapter); 502 e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
499 enabled = kmrn_lock_loss; 503 enabled = kmrn_lock_loss;
500 } 504 }
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 00b3fc98bf30..b2005e13fb01 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2896,6 +2896,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2896 (hw->phy.addr == 2) && 2896 (hw->phy.addr == 2) &&
2897 !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) { 2897 !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
2898 u16 data2 = 0x7EFF; 2898 u16 data2 = 0x7EFF;
2899
2899 ret_val = e1000_access_phy_debug_regs_hv(hw, 2900 ret_val = e1000_access_phy_debug_regs_hv(hw,
2900 (1 << 6) | 0x3, 2901 (1 << 6) | 0x3,
2901 &data2, false); 2902 &data2, false);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index beb7b4393a6c..65985846345d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -72,6 +72,7 @@
72#define I40E_MIN_NUM_DESCRIPTORS 64 72#define I40E_MIN_NUM_DESCRIPTORS 64
73#define I40E_MIN_MSIX 2 73#define I40E_MIN_MSIX 2
74#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */ 74#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
75#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
75#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */ 76#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
76#define I40E_DEFAULT_QUEUES_PER_VF 4 77#define I40E_DEFAULT_QUEUES_PER_VF 4
77#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ 78#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
@@ -97,10 +98,6 @@
97#define STRINGIFY(foo) #foo 98#define STRINGIFY(foo) #foo
98#define XSTRINGIFY(bar) STRINGIFY(bar) 99#define XSTRINGIFY(bar) STRINGIFY(bar)
99 100
100#ifndef ARCH_HAS_PREFETCH
101#define prefetch(X)
102#endif
103
104#define I40E_RX_DESC(R, i) \ 101#define I40E_RX_DESC(R, i) \
105 ((ring_is_16byte_desc_enabled(R)) \ 102 ((ring_is_16byte_desc_enabled(R)) \
106 ? (union i40e_32byte_rx_desc *) \ 103 ? (union i40e_32byte_rx_desc *) \
@@ -157,11 +154,23 @@ struct i40e_lump_tracking {
157#define I40E_FDIR_BUFFER_FULL_MARGIN 10 154#define I40E_FDIR_BUFFER_FULL_MARGIN 10
158#define I40E_FDIR_BUFFER_HEAD_ROOM 200 155#define I40E_FDIR_BUFFER_HEAD_ROOM 200
159 156
157enum i40e_fd_stat_idx {
158 I40E_FD_STAT_ATR,
159 I40E_FD_STAT_SB,
160 I40E_FD_STAT_PF_COUNT
161};
162#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
163#define I40E_FD_ATR_STAT_IDX(pf_id) \
164 (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
165#define I40E_FD_SB_STAT_IDX(pf_id) \
166 (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
167
160struct i40e_fdir_filter { 168struct i40e_fdir_filter {
161 struct hlist_node fdir_node; 169 struct hlist_node fdir_node;
162 /* filter ipnut set */ 170 /* filter ipnut set */
163 u8 flow_type; 171 u8 flow_type;
164 u8 ip4_proto; 172 u8 ip4_proto;
173 /* TX packet view of src and dst */
165 __be32 dst_ip[4]; 174 __be32 dst_ip[4];
166 __be32 src_ip[4]; 175 __be32 src_ip[4];
167 __be16 src_port; 176 __be16 src_port;
@@ -205,7 +214,6 @@ struct i40e_pf {
205 unsigned long state; 214 unsigned long state;
206 unsigned long link_check_timeout; 215 unsigned long link_check_timeout;
207 struct msix_entry *msix_entries; 216 struct msix_entry *msix_entries;
208 u16 num_msix_entries;
209 bool fc_autoneg_status; 217 bool fc_autoneg_status;
210 218
211 u16 eeprom_version; 219 u16 eeprom_version;
@@ -220,11 +228,14 @@ struct i40e_pf {
220 u16 rss_size; /* num queues in the RSS array */ 228 u16 rss_size; /* num queues in the RSS array */
221 u16 rss_size_max; /* HW defined max RSS queues */ 229 u16 rss_size_max; /* HW defined max RSS queues */
222 u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */ 230 u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
231 u16 num_alloc_vsi; /* num VSIs this driver supports */
223 u8 atr_sample_rate; 232 u8 atr_sample_rate;
224 bool wol_en; 233 bool wol_en;
225 234
226 struct hlist_head fdir_filter_list; 235 struct hlist_head fdir_filter_list;
227 u16 fdir_pf_active_filters; 236 u16 fdir_pf_active_filters;
237 u16 fd_sb_cnt_idx;
238 u16 fd_atr_cnt_idx;
228 239
229#ifdef CONFIG_I40E_VXLAN 240#ifdef CONFIG_I40E_VXLAN
230 __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; 241 __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
@@ -266,6 +277,7 @@ struct i40e_pf {
266#ifdef CONFIG_I40E_VXLAN 277#ifdef CONFIG_I40E_VXLAN
267#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) 278#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
268#endif 279#endif
280#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
269 281
270 /* tracks features that get auto disabled by errors */ 282 /* tracks features that get auto disabled by errors */
271 u64 auto_disable_flags; 283 u64 auto_disable_flags;
@@ -300,7 +312,6 @@ struct i40e_pf {
300 u16 pf_seid; 312 u16 pf_seid;
301 u16 main_vsi_seid; 313 u16 main_vsi_seid;
302 u16 mac_seid; 314 u16 mac_seid;
303 struct i40e_aqc_get_switch_config_data *sw_config;
304 struct kobject *switch_kobj; 315 struct kobject *switch_kobj;
305#ifdef CONFIG_DEBUG_FS 316#ifdef CONFIG_DEBUG_FS
306 struct dentry *i40e_dbg_pf; 317 struct dentry *i40e_dbg_pf;
@@ -329,9 +340,7 @@ struct i40e_pf {
329 struct ptp_clock *ptp_clock; 340 struct ptp_clock *ptp_clock;
330 struct ptp_clock_info ptp_caps; 341 struct ptp_clock_info ptp_caps;
331 struct sk_buff *ptp_tx_skb; 342 struct sk_buff *ptp_tx_skb;
332 struct work_struct ptp_tx_work;
333 struct hwtstamp_config tstamp_config; 343 struct hwtstamp_config tstamp_config;
334 unsigned long ptp_tx_start;
335 unsigned long last_rx_ptp_check; 344 unsigned long last_rx_ptp_check;
336 spinlock_t tmreg_lock; /* Used to protect the device time registers. */ 345 spinlock_t tmreg_lock; /* Used to protect the device time registers. */
337 u64 ptp_base_adj; 346 u64 ptp_base_adj;
@@ -420,6 +429,7 @@ struct i40e_vsi {
420 struct i40e_q_vector **q_vectors; 429 struct i40e_q_vector **q_vectors;
421 int num_q_vectors; 430 int num_q_vectors;
422 int base_vector; 431 int base_vector;
432 bool irqs_ready;
423 433
424 u16 seid; /* HW index of this VSI (absolute index) */ 434 u16 seid; /* HW index of this VSI (absolute index) */
425 u16 id; /* VSI number */ 435 u16 id; /* VSI number */
@@ -540,6 +550,15 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
540 (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT); 550 (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
541} 551}
542 552
553/**
554 * i40e_get_fd_cnt_all - get the total FD filter space available
555 * @pf: pointer to the pf struct
556 **/
557static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
558{
559 return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
560}
561
543/* needed by i40e_ethtool.c */ 562/* needed by i40e_ethtool.c */
544int i40e_up(struct i40e_vsi *vsi); 563int i40e_up(struct i40e_vsi *vsi);
545void i40e_down(struct i40e_vsi *vsi); 564void i40e_down(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index ed3902bf249b..7a027499fc57 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -33,6 +33,16 @@
33static void i40e_resume_aq(struct i40e_hw *hw); 33static void i40e_resume_aq(struct i40e_hw *hw);
34 34
35/** 35/**
36 * i40e_is_nvm_update_op - return true if this is an NVM update operation
37 * @desc: API request descriptor
38 **/
39static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
40{
41 return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
42 (desc->opcode == i40e_aqc_opc_nvm_update);
43}
44
45/**
36 * i40e_adminq_init_regs - Initialize AdminQ registers 46 * i40e_adminq_init_regs - Initialize AdminQ registers
37 * @hw: pointer to the hardware structure 47 * @hw: pointer to the hardware structure
38 * 48 *
@@ -281,8 +291,11 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
281 * 291 *
282 * Configure base address and length registers for the transmit queue 292 * Configure base address and length registers for the transmit queue
283 **/ 293 **/
284static void i40e_config_asq_regs(struct i40e_hw *hw) 294static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
285{ 295{
296 i40e_status ret_code = 0;
297 u32 reg = 0;
298
286 if (hw->mac.type == I40E_MAC_VF) { 299 if (hw->mac.type == I40E_MAC_VF) {
287 /* configure the transmit queue */ 300 /* configure the transmit queue */
288 wr32(hw, I40E_VF_ATQBAH1, 301 wr32(hw, I40E_VF_ATQBAH1,
@@ -291,6 +304,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
291 lower_32_bits(hw->aq.asq.desc_buf.pa)); 304 lower_32_bits(hw->aq.asq.desc_buf.pa));
292 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries | 305 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
293 I40E_VF_ATQLEN1_ATQENABLE_MASK)); 306 I40E_VF_ATQLEN1_ATQENABLE_MASK));
307 reg = rd32(hw, I40E_VF_ATQBAL1);
294 } else { 308 } else {
295 /* configure the transmit queue */ 309 /* configure the transmit queue */
296 wr32(hw, I40E_PF_ATQBAH, 310 wr32(hw, I40E_PF_ATQBAH,
@@ -299,7 +313,14 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
299 lower_32_bits(hw->aq.asq.desc_buf.pa)); 313 lower_32_bits(hw->aq.asq.desc_buf.pa));
300 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries | 314 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
301 I40E_PF_ATQLEN_ATQENABLE_MASK)); 315 I40E_PF_ATQLEN_ATQENABLE_MASK));
316 reg = rd32(hw, I40E_PF_ATQBAL);
302 } 317 }
318
319 /* Check one register to verify that config was applied */
320 if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
321 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
322
323 return ret_code;
303} 324}
304 325
305/** 326/**
@@ -308,8 +329,11 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
308 * 329 *
309 * Configure base address and length registers for the receive (event queue) 330 * Configure base address and length registers for the receive (event queue)
310 **/ 331 **/
311static void i40e_config_arq_regs(struct i40e_hw *hw) 332static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
312{ 333{
334 i40e_status ret_code = 0;
335 u32 reg = 0;
336
313 if (hw->mac.type == I40E_MAC_VF) { 337 if (hw->mac.type == I40E_MAC_VF) {
314 /* configure the receive queue */ 338 /* configure the receive queue */
315 wr32(hw, I40E_VF_ARQBAH1, 339 wr32(hw, I40E_VF_ARQBAH1,
@@ -318,6 +342,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
318 lower_32_bits(hw->aq.arq.desc_buf.pa)); 342 lower_32_bits(hw->aq.arq.desc_buf.pa));
319 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries | 343 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
320 I40E_VF_ARQLEN1_ARQENABLE_MASK)); 344 I40E_VF_ARQLEN1_ARQENABLE_MASK));
345 reg = rd32(hw, I40E_VF_ARQBAL1);
321 } else { 346 } else {
322 /* configure the receive queue */ 347 /* configure the receive queue */
323 wr32(hw, I40E_PF_ARQBAH, 348 wr32(hw, I40E_PF_ARQBAH,
@@ -326,10 +351,17 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
326 lower_32_bits(hw->aq.arq.desc_buf.pa)); 351 lower_32_bits(hw->aq.arq.desc_buf.pa));
327 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries | 352 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
328 I40E_PF_ARQLEN_ARQENABLE_MASK)); 353 I40E_PF_ARQLEN_ARQENABLE_MASK));
354 reg = rd32(hw, I40E_PF_ARQBAL);
329 } 355 }
330 356
331 /* Update tail in the HW to post pre-allocated buffers */ 357 /* Update tail in the HW to post pre-allocated buffers */
332 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); 358 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
359
360 /* Check one register to verify that config was applied */
361 if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
362 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
363
364 return ret_code;
333} 365}
334 366
335/** 367/**
@@ -377,7 +409,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
377 goto init_adminq_free_rings; 409 goto init_adminq_free_rings;
378 410
379 /* initialize base registers */ 411 /* initialize base registers */
380 i40e_config_asq_regs(hw); 412 ret_code = i40e_config_asq_regs(hw);
413 if (ret_code)
414 goto init_adminq_free_rings;
381 415
382 /* success! */ 416 /* success! */
383 goto init_adminq_exit; 417 goto init_adminq_exit;
@@ -434,7 +468,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
434 goto init_adminq_free_rings; 468 goto init_adminq_free_rings;
435 469
436 /* initialize base registers */ 470 /* initialize base registers */
437 i40e_config_arq_regs(hw); 471 ret_code = i40e_config_arq_regs(hw);
472 if (ret_code)
473 goto init_adminq_free_rings;
438 474
439 /* success! */ 475 /* success! */
440 goto init_adminq_exit; 476 goto init_adminq_exit;
@@ -577,14 +613,14 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
577 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 613 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
578 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; 614 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
579 615
580 if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR || 616 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
581 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
582 ret_code = I40E_ERR_FIRMWARE_API_VERSION; 617 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
583 goto init_adminq_free_arq; 618 goto init_adminq_free_arq;
584 } 619 }
585 620
586 /* pre-emptive resource lock release */ 621 /* pre-emptive resource lock release */
587 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 622 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
623 hw->aq.nvm_busy = false;
588 624
589 ret_code = i40e_aq_set_hmc_resource_profile(hw, 625 ret_code = i40e_aq_set_hmc_resource_profile(hw,
590 I40E_HMC_PROFILE_DEFAULT, 626 I40E_HMC_PROFILE_DEFAULT,
@@ -708,6 +744,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
708 goto asq_send_command_exit; 744 goto asq_send_command_exit;
709 } 745 }
710 746
747 if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
748 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
749 status = I40E_ERR_NVM;
750 goto asq_send_command_exit;
751 }
752
711 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 753 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
712 if (cmd_details) { 754 if (cmd_details) {
713 *details = *cmd_details; 755 *details = *cmd_details;
@@ -835,6 +877,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
835 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 877 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
836 } 878 }
837 879
880 if (i40e_is_nvm_update_op(desc))
881 hw->aq.nvm_busy = true;
882
838 /* update the error if time out occurred */ 883 /* update the error if time out occurred */
839 if ((!cmd_completed) && 884 if ((!cmd_completed) &&
840 (!details->async && !details->postpone)) { 885 (!details->async && !details->postpone)) {
@@ -929,6 +974,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
929 e->msg_size); 974 e->msg_size);
930 } 975 }
931 976
977 if (i40e_is_nvm_update_op(&e->desc))
978 hw->aq.nvm_busy = false;
979
932 /* Restore the original datalen and buffer address in the desc, 980 /* Restore the original datalen and buffer address in the desc,
933 * FW updates datalen to indicate the event message 981 * FW updates datalen to indicate the event message
934 * size 982 * size
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 993f7685a911..b1552fbc48a0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -90,6 +90,7 @@ struct i40e_adminq_info {
90 u16 fw_min_ver; /* firmware minor version */ 90 u16 fw_min_ver; /* firmware minor version */
91 u16 api_maj_ver; /* api major version */ 91 u16 api_maj_ver; /* api major version */
92 u16 api_min_ver; /* api minor version */ 92 u16 api_min_ver; /* api minor version */
93 bool nvm_busy;
93 94
94 struct mutex asq_mutex; /* Send queue lock */ 95 struct mutex asq_mutex; /* Send queue lock */
95 struct mutex arq_mutex; /* Receive queue lock */ 96 struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 7b6374a8f8da..15f289f2917f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -34,7 +34,7 @@
34 */ 34 */
35 35
36#define I40E_FW_API_VERSION_MAJOR 0x0001 36#define I40E_FW_API_VERSION_MAJOR 0x0001
37#define I40E_FW_API_VERSION_MINOR 0x0001 37#define I40E_FW_API_VERSION_MINOR 0x0002
38 38
39struct i40e_aq_desc { 39struct i40e_aq_desc {
40 __le16 flags; 40 __le16 flags;
@@ -123,6 +123,7 @@ enum i40e_admin_queue_opc {
123 i40e_aqc_opc_get_version = 0x0001, 123 i40e_aqc_opc_get_version = 0x0001,
124 i40e_aqc_opc_driver_version = 0x0002, 124 i40e_aqc_opc_driver_version = 0x0002,
125 i40e_aqc_opc_queue_shutdown = 0x0003, 125 i40e_aqc_opc_queue_shutdown = 0x0003,
126 i40e_aqc_opc_set_pf_context = 0x0004,
126 127
127 /* resource ownership */ 128 /* resource ownership */
128 i40e_aqc_opc_request_resource = 0x0008, 129 i40e_aqc_opc_request_resource = 0x0008,
@@ -182,9 +183,6 @@ enum i40e_admin_queue_opc {
182 i40e_aqc_opc_add_mirror_rule = 0x0260, 183 i40e_aqc_opc_add_mirror_rule = 0x0260,
183 i40e_aqc_opc_delete_mirror_rule = 0x0261, 184 i40e_aqc_opc_delete_mirror_rule = 0x0261,
184 185
185 i40e_aqc_opc_set_storm_control_config = 0x0280,
186 i40e_aqc_opc_get_storm_control_config = 0x0281,
187
188 /* DCB commands */ 186 /* DCB commands */
189 i40e_aqc_opc_dcb_ignore_pfc = 0x0301, 187 i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
190 i40e_aqc_opc_dcb_updated = 0x0302, 188 i40e_aqc_opc_dcb_updated = 0x0302,
@@ -207,6 +205,7 @@ enum i40e_admin_queue_opc {
207 i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, 205 i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
208 i40e_aqc_opc_suspend_port_tx = 0x041B, 206 i40e_aqc_opc_suspend_port_tx = 0x041B,
209 i40e_aqc_opc_resume_port_tx = 0x041C, 207 i40e_aqc_opc_resume_port_tx = 0x041C,
208 i40e_aqc_opc_configure_partition_bw = 0x041D,
210 209
211 /* hmc */ 210 /* hmc */
212 i40e_aqc_opc_query_hmc_resource_profile = 0x0500, 211 i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -224,13 +223,15 @@ enum i40e_admin_queue_opc {
224 i40e_aqc_opc_get_partner_advt = 0x0616, 223 i40e_aqc_opc_get_partner_advt = 0x0616,
225 i40e_aqc_opc_set_lb_modes = 0x0618, 224 i40e_aqc_opc_set_lb_modes = 0x0618,
226 i40e_aqc_opc_get_phy_wol_caps = 0x0621, 225 i40e_aqc_opc_get_phy_wol_caps = 0x0621,
227 i40e_aqc_opc_set_phy_reset = 0x0622, 226 i40e_aqc_opc_set_phy_debug = 0x0622,
228 i40e_aqc_opc_upload_ext_phy_fm = 0x0625, 227 i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
229 228
230 /* NVM commands */ 229 /* NVM commands */
231 i40e_aqc_opc_nvm_read = 0x0701, 230 i40e_aqc_opc_nvm_read = 0x0701,
232 i40e_aqc_opc_nvm_erase = 0x0702, 231 i40e_aqc_opc_nvm_erase = 0x0702,
233 i40e_aqc_opc_nvm_update = 0x0703, 232 i40e_aqc_opc_nvm_update = 0x0703,
233 i40e_aqc_opc_nvm_config_read = 0x0704,
234 i40e_aqc_opc_nvm_config_write = 0x0705,
234 235
235 /* virtualization commands */ 236 /* virtualization commands */
236 i40e_aqc_opc_send_msg_to_pf = 0x0801, 237 i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -272,8 +273,6 @@ enum i40e_admin_queue_opc {
272 i40e_aqc_opc_debug_set_mode = 0xFF01, 273 i40e_aqc_opc_debug_set_mode = 0xFF01,
273 i40e_aqc_opc_debug_read_reg = 0xFF03, 274 i40e_aqc_opc_debug_read_reg = 0xFF03,
274 i40e_aqc_opc_debug_write_reg = 0xFF04, 275 i40e_aqc_opc_debug_write_reg = 0xFF04,
275 i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
276 i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
277 i40e_aqc_opc_debug_modify_reg = 0xFF07, 276 i40e_aqc_opc_debug_modify_reg = 0xFF07,
278 i40e_aqc_opc_debug_dump_internals = 0xFF08, 277 i40e_aqc_opc_debug_dump_internals = 0xFF08,
279 i40e_aqc_opc_debug_modify_internals = 0xFF09, 278 i40e_aqc_opc_debug_modify_internals = 0xFF09,
@@ -341,6 +340,14 @@ struct i40e_aqc_queue_shutdown {
341 340
342I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); 341I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
343 342
343/* Set PF context (0x0004, direct) */
344struct i40e_aqc_set_pf_context {
345 u8 pf_id;
346 u8 reserved[15];
347};
348
349I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
350
344/* Request resource ownership (direct 0x0008) 351/* Request resource ownership (direct 0x0008)
345 * Release resource ownership (direct 0x0009) 352 * Release resource ownership (direct 0x0009)
346 */ 353 */
@@ -1289,27 +1296,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
1289 1296
1290I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); 1297I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
1291 1298
1292/* Set Storm Control Configuration (direct 0x0280)
1293 * Get Storm Control Configuration (direct 0x0281)
1294 * the command and response use the same descriptor structure
1295 */
1296struct i40e_aqc_set_get_storm_control_config {
1297 __le32 broadcast_threshold;
1298 __le32 multicast_threshold;
1299 __le32 control_flags;
1300#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
1301#define I40E_AQC_STORM_CONTROL_MDICW 0x02
1302#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
1303#define I40E_AQC_STORM_CONTROL_BDICW 0x08
1304#define I40E_AQC_STORM_CONTROL_BIDU 0x10
1305#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
1306#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
1307 I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
1308 u8 reserved[4];
1309};
1310
1311I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
1312
1313/* DCB 0x03xx*/ 1299/* DCB 0x03xx*/
1314 1300
1315/* PFC Ignore (direct 0x0301) 1301/* PFC Ignore (direct 0x0301)
@@ -1427,11 +1413,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
1427struct i40e_aqc_configure_switching_comp_ets_data { 1413struct i40e_aqc_configure_switching_comp_ets_data {
1428 u8 reserved[4]; 1414 u8 reserved[4];
1429 u8 tc_valid_bits; 1415 u8 tc_valid_bits;
1430 u8 reserved1; 1416 u8 seepage;
1417#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
1431 u8 tc_strict_priority_flags; 1418 u8 tc_strict_priority_flags;
1432 u8 reserved2[17]; 1419 u8 reserved1[17];
1433 u8 tc_bw_share_credits[8]; 1420 u8 tc_bw_share_credits[8];
1434 u8 reserved3[96]; 1421 u8 reserved2[96];
1435}; 1422};
1436 1423
1437/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ 1424/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
@@ -1499,6 +1486,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
1499 * (direct 0x041B and 0x041C) uses the generic SEID struct 1486 * (direct 0x041B and 0x041C) uses the generic SEID struct
1500 */ 1487 */
1501 1488
1489/* Configure partition BW
1490 * (indirect 0x041D)
1491 */
1492struct i40e_aqc_configure_partition_bw_data {
1493 __le16 pf_valid_bits;
1494 u8 min_bw[16]; /* guaranteed bandwidth */
1495 u8 max_bw[16]; /* bandwidth limit */
1496};
1497
1502/* Get and set the active HMC resource profile and status. 1498/* Get and set the active HMC resource profile and status.
1503 * (direct 0x0500) and (direct 0x0501) 1499 * (direct 0x0500) and (direct 0x0501)
1504 */ 1500 */
@@ -1539,6 +1535,8 @@ enum i40e_aq_phy_type {
1539 I40E_PHY_TYPE_XLPPI = 0x9, 1535 I40E_PHY_TYPE_XLPPI = 0x9,
1540 I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, 1536 I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
1541 I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, 1537 I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
1538 I40E_PHY_TYPE_10GBASE_AOC = 0xC,
1539 I40E_PHY_TYPE_40GBASE_AOC = 0xD,
1542 I40E_PHY_TYPE_100BASE_TX = 0x11, 1540 I40E_PHY_TYPE_100BASE_TX = 0x11,
1543 I40E_PHY_TYPE_1000BASE_T = 0x12, 1541 I40E_PHY_TYPE_1000BASE_T = 0x12,
1544 I40E_PHY_TYPE_10GBASE_T = 0x13, 1542 I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1549,7 +1547,10 @@ enum i40e_aq_phy_type {
1549 I40E_PHY_TYPE_40GBASE_CR4 = 0x18, 1547 I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
1550 I40E_PHY_TYPE_40GBASE_SR4 = 0x19, 1548 I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
1551 I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, 1549 I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
1552 I40E_PHY_TYPE_20GBASE_KR2 = 0x1B, 1550 I40E_PHY_TYPE_1000BASE_SX = 0x1B,
1551 I40E_PHY_TYPE_1000BASE_LX = 0x1C,
1552 I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
1553 I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
1553 I40E_PHY_TYPE_MAX 1554 I40E_PHY_TYPE_MAX
1554}; 1555};
1555 1556
@@ -1583,11 +1584,8 @@ struct i40e_aq_get_phy_abilities_resp {
1583#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 1584#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
1584#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 1585#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
1585#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 1586#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
1586#define I40E_AQ_PHY_FLAG_AN_SHIFT 3 1587#define I40E_AQ_PHY_LINK_ENABLED 0x08
1587#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT) 1588#define I40E_AQ_PHY_AN_ENABLED 0x10
1588#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
1589#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
1590#define I40E_AQ_PHY_FLAG_AN_ON 0x02
1591#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 1589#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
1592 __le16 eee_capability; 1590 __le16 eee_capability;
1593#define I40E_AQ_EEE_100BASE_TX 0x0002 1591#define I40E_AQ_EEE_100BASE_TX 0x0002
@@ -1696,6 +1694,7 @@ struct i40e_aqc_get_link_status {
1696#define I40E_AQ_LINK_TX_ACTIVE 0x00 1694#define I40E_AQ_LINK_TX_ACTIVE 0x00
1697#define I40E_AQ_LINK_TX_DRAINED 0x01 1695#define I40E_AQ_LINK_TX_DRAINED 0x01
1698#define I40E_AQ_LINK_TX_FLUSHED 0x03 1696#define I40E_AQ_LINK_TX_FLUSHED 0x03
1697#define I40E_AQ_LINK_FORCED_40G 0x10
1699 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ 1698 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
1700 __le16 max_frame_size; 1699 __le16 max_frame_size;
1701 u8 config; 1700 u8 config;
@@ -1747,14 +1746,21 @@ struct i40e_aqc_set_lb_mode {
1747 1746
1748I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); 1747I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
1749 1748
1750/* Set PHY Reset command (0x0622) */ 1749/* Set PHY Debug command (0x0622) */
1751struct i40e_aqc_set_phy_reset { 1750struct i40e_aqc_set_phy_debug {
1752 u8 reset_flags; 1751 u8 command_flags;
1753#define I40E_AQ_PHY_RESET_REQUEST 0x02 1752#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
1753#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
1754#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
1755 I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
1756#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
1757#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
1758#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
1759#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
1754 u8 reserved[15]; 1760 u8 reserved[15];
1755}; 1761};
1756 1762
1757I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset); 1763I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
1758 1764
1759enum i40e_aq_phy_reg_type { 1765enum i40e_aq_phy_reg_type {
1760 I40E_AQC_PHY_REG_INTERNAL = 0x1, 1766 I40E_AQC_PHY_REG_INTERNAL = 0x1,
@@ -1779,6 +1785,47 @@ struct i40e_aqc_nvm_update {
1779 1785
1780I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); 1786I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
1781 1787
1788/* NVM Config Read (indirect 0x0704) */
1789struct i40e_aqc_nvm_config_read {
1790 __le16 cmd_flags;
1791#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
1792#define ANVM_READ_SINGLE_FEATURE 0
1793#define ANVM_READ_MULTIPLE_FEATURES 1
1794 __le16 element_count;
1795 __le16 element_id; /* Feature/field ID */
1796 u8 reserved[2];
1797 __le32 address_high;
1798 __le32 address_low;
1799};
1800
1801I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
1802
1803/* NVM Config Write (indirect 0x0705) */
1804struct i40e_aqc_nvm_config_write {
1805 __le16 cmd_flags;
1806 __le16 element_count;
1807 u8 reserved[4];
1808 __le32 address_high;
1809 __le32 address_low;
1810};
1811
1812I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
1813
1814struct i40e_aqc_nvm_config_data_feature {
1815 __le16 feature_id;
1816 __le16 instance_id;
1817 __le16 feature_options;
1818 __le16 feature_selection;
1819};
1820
1821struct i40e_aqc_nvm_config_data_immediate_field {
1822#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
1823 __le16 field_id;
1824 __le16 instance_id;
1825 __le16 field_options;
1826 __le16 field_value;
1827};
1828
1782/* Send to PF command (indirect 0x0801) id is only used by PF 1829/* Send to PF command (indirect 0x0801) id is only used by PF
1783 * Send to VF command (indirect 0x0802) id is only used by PF 1830 * Send to VF command (indirect 0x0802) id is only used by PF
1784 * Send to Peer PF command (indirect 0x0803) 1831 * Send to Peer PF command (indirect 0x0803)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 922cdcc45c54..6e65f19dd6e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -43,12 +43,10 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
43 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 43 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
44 switch (hw->device_id) { 44 switch (hw->device_id) {
45 case I40E_DEV_ID_SFP_XL710: 45 case I40E_DEV_ID_SFP_XL710:
46 case I40E_DEV_ID_SFP_X710:
47 case I40E_DEV_ID_QEMU: 46 case I40E_DEV_ID_QEMU:
48 case I40E_DEV_ID_KX_A: 47 case I40E_DEV_ID_KX_A:
49 case I40E_DEV_ID_KX_B: 48 case I40E_DEV_ID_KX_B:
50 case I40E_DEV_ID_KX_C: 49 case I40E_DEV_ID_KX_C:
51 case I40E_DEV_ID_KX_D:
52 case I40E_DEV_ID_QSFP_A: 50 case I40E_DEV_ID_QSFP_A:
53 case I40E_DEV_ID_QSFP_B: 51 case I40E_DEV_ID_QSFP_B:
54 case I40E_DEV_ID_QSFP_C: 52 case I40E_DEV_ID_QSFP_C:
@@ -133,7 +131,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
133 **/ 131 **/
134bool i40e_check_asq_alive(struct i40e_hw *hw) 132bool i40e_check_asq_alive(struct i40e_hw *hw)
135{ 133{
136 return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK); 134 if (hw->aq.asq.len)
135 return !!(rd32(hw, hw->aq.asq.len) &
136 I40E_PF_ATQLEN_ATQENABLE_MASK);
137 else
138 return false;
137} 139}
138 140
139/** 141/**
@@ -653,6 +655,36 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
653} 655}
654 656
655/** 657/**
658 * i40e_pre_tx_queue_cfg - pre tx queue configure
659 * @hw: pointer to the HW structure
660 * @queue: target pf queue index
661 * @enable: state change request
662 *
663 * Handles hw requirement to indicate intention to enable
664 * or disable target queue.
665 **/
666void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
667{
668 u32 abs_queue_idx = hw->func_caps.base_queue + queue;
669 u32 reg_block = 0;
670 u32 reg_val;
671
672 if (abs_queue_idx >= 128)
673 reg_block = abs_queue_idx / 128;
674
675 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
676 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
677 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
678
679 if (enable)
680 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
681 else
682 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
683
684 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
685}
686
687/**
656 * i40e_get_media_type - Gets media type 688 * i40e_get_media_type - Gets media type
657 * @hw: pointer to the hardware structure 689 * @hw: pointer to the hardware structure
658 **/ 690 **/
@@ -699,7 +731,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
699} 731}
700 732
701#define I40E_PF_RESET_WAIT_COUNT_A0 200 733#define I40E_PF_RESET_WAIT_COUNT_A0 200
702#define I40E_PF_RESET_WAIT_COUNT 10 734#define I40E_PF_RESET_WAIT_COUNT 100
703/** 735/**
704 * i40e_pf_reset - Reset the PF 736 * i40e_pf_reset - Reset the PF
705 * @hw: pointer to the hardware structure 737 * @hw: pointer to the hardware structure
@@ -789,6 +821,9 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
789{ 821{
790 u32 reg; 822 u32 reg;
791 823
824 if (i40e_check_asq_alive(hw))
825 i40e_aq_clear_pxe_mode(hw, NULL);
826
792 /* Clear single descriptor fetch/write-back mode */ 827 /* Clear single descriptor fetch/write-back mode */
793 reg = rd32(hw, I40E_GLLAN_RCTL_0); 828 reg = rd32(hw, I40E_GLLAN_RCTL_0);
794 829
@@ -907,6 +942,33 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
907/* Admin command wrappers */ 942/* Admin command wrappers */
908 943
909/** 944/**
945 * i40e_aq_clear_pxe_mode
946 * @hw: pointer to the hw struct
947 * @cmd_details: pointer to command details structure or NULL
948 *
949 * Tell the firmware that the driver is taking over from PXE
950 **/
951i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
952 struct i40e_asq_cmd_details *cmd_details)
953{
954 i40e_status status;
955 struct i40e_aq_desc desc;
956 struct i40e_aqc_clear_pxe *cmd =
957 (struct i40e_aqc_clear_pxe *)&desc.params.raw;
958
959 i40e_fill_default_direct_cmd_desc(&desc,
960 i40e_aqc_opc_clear_pxe_mode);
961
962 cmd->rx_cnt = 0x2;
963
964 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
965
966 wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
967
968 return status;
969}
970
971/**
910 * i40e_aq_set_link_restart_an 972 * i40e_aq_set_link_restart_an
911 * @hw: pointer to the hw struct 973 * @hw: pointer to the hw struct
912 * @cmd_details: pointer to command details structure or NULL 974 * @cmd_details: pointer to command details structure or NULL
@@ -975,6 +1037,13 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
975 hw_link_info->an_info = resp->an_info; 1037 hw_link_info->an_info = resp->an_info;
976 hw_link_info->ext_info = resp->ext_info; 1038 hw_link_info->ext_info = resp->ext_info;
977 hw_link_info->loopback = resp->loopback; 1039 hw_link_info->loopback = resp->loopback;
1040 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1041 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1042
1043 if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1044 hw_link_info->crc_enable = true;
1045 else
1046 hw_link_info->crc_enable = false;
978 1047
979 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE)) 1048 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
980 hw_link_info->lse_enable = true; 1049 hw_link_info->lse_enable = true;
@@ -1021,8 +1090,6 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
1021 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1090 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1022 1091
1023 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1092 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1024 if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
1025 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1026 1093
1027 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 1094 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1028 sizeof(vsi_ctx->info), cmd_details); 1095 sizeof(vsi_ctx->info), cmd_details);
@@ -1163,8 +1230,6 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
1163 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 1230 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
1164 1231
1165 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1232 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1166 if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
1167 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1168 1233
1169 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 1234 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1170 sizeof(vsi_ctx->info), NULL); 1235 sizeof(vsi_ctx->info), NULL);
@@ -1203,8 +1268,6 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
1203 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 1268 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
1204 1269
1205 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1270 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1206 if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
1207 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1208 1271
1209 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 1272 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1210 sizeof(vsi_ctx->info), cmd_details); 1273 sizeof(vsi_ctx->info), cmd_details);
@@ -1300,6 +1363,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
1300 struct i40e_aqc_driver_version *cmd = 1363 struct i40e_aqc_driver_version *cmd =
1301 (struct i40e_aqc_driver_version *)&desc.params.raw; 1364 (struct i40e_aqc_driver_version *)&desc.params.raw;
1302 i40e_status status; 1365 i40e_status status;
1366 u16 len;
1303 1367
1304 if (dv == NULL) 1368 if (dv == NULL)
1305 return I40E_ERR_PARAM; 1369 return I40E_ERR_PARAM;
@@ -1311,7 +1375,14 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
1311 cmd->driver_minor_ver = dv->minor_version; 1375 cmd->driver_minor_ver = dv->minor_version;
1312 cmd->driver_build_ver = dv->build_version; 1376 cmd->driver_build_ver = dv->build_version;
1313 cmd->driver_subbuild_ver = dv->subbuild_version; 1377 cmd->driver_subbuild_ver = dv->subbuild_version;
1314 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1378
1379 len = 0;
1380 while (len < sizeof(dv->driver_string) &&
1381 (dv->driver_string[len] < 0x80) &&
1382 dv->driver_string[len])
1383 len++;
1384 status = i40e_asq_send_command(hw, &desc, dv->driver_string,
1385 len, cmd_details);
1315 1386
1316 return status; 1387 return status;
1317} 1388}
@@ -1900,6 +1971,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
1900 } 1971 }
1901 } 1972 }
1902 1973
1974 /* Software override ensuring FCoE is disabled if npar or mfp
1975 * mode because it is not supported in these modes.
1976 */
1977 if (p->npar_enable || p->mfp_mode_1)
1978 p->fcoe = false;
1979
1903 /* additional HW specific goodies that might 1980 /* additional HW specific goodies that might
1904 * someday be HW version specific 1981 * someday be HW version specific
1905 */ 1982 */
@@ -2094,8 +2171,8 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
2094 * @cmd_details: pointer to command details structure or NULL 2171 * @cmd_details: pointer to command details structure or NULL
2095 **/ 2172 **/
2096i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 2173i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
2097 u16 udp_port, u8 header_len, 2174 u16 udp_port, u8 protocol_index,
2098 u8 protocol_index, u8 *filter_index, 2175 u8 *filter_index,
2099 struct i40e_asq_cmd_details *cmd_details) 2176 struct i40e_asq_cmd_details *cmd_details)
2100{ 2177{
2101 struct i40e_aq_desc desc; 2178 struct i40e_aq_desc desc;
@@ -2253,6 +2330,35 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
2253} 2330}
2254 2331
2255/** 2332/**
2333 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
2334 * @hw: pointer to the hw struct
2335 * @seid: VSI seid
2336 * @credit: BW limit credits (0 = disabled)
2337 * @max_credit: Max BW limit credits
2338 * @cmd_details: pointer to command details structure or NULL
2339 **/
2340i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
2341 u16 seid, u16 credit, u8 max_credit,
2342 struct i40e_asq_cmd_details *cmd_details)
2343{
2344 struct i40e_aq_desc desc;
2345 struct i40e_aqc_configure_vsi_bw_limit *cmd =
2346 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
2347 i40e_status status;
2348
2349 i40e_fill_default_direct_cmd_desc(&desc,
2350 i40e_aqc_opc_configure_vsi_bw_limit);
2351
2352 cmd->vsi_seid = cpu_to_le16(seid);
2353 cmd->credit = cpu_to_le16(credit);
2354 cmd->max_credit = max_credit;
2355
2356 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2357
2358 return status;
2359}
2360
2361/**
2256 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 2362 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
2257 * @hw: pointer to the hw struct 2363 * @hw: pointer to the hw struct
2258 * @seid: VSI seid 2364 * @seid: VSI seid
@@ -2405,7 +2511,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
2405{ 2511{
2406 u32 fcoe_cntx_size, fcoe_filt_size; 2512 u32 fcoe_cntx_size, fcoe_filt_size;
2407 u32 pe_cntx_size, pe_filt_size; 2513 u32 pe_cntx_size, pe_filt_size;
2408 u32 fcoe_fmax, pe_fmax; 2514 u32 fcoe_fmax;
2409 u32 val; 2515 u32 val;
2410 2516
2411 /* Validate FCoE settings passed */ 2517 /* Validate FCoE settings passed */
@@ -2480,13 +2586,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
2480 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 2586 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
2481 return I40E_ERR_INVALID_SIZE; 2587 return I40E_ERR_INVALID_SIZE;
2482 2588
2483 /* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */
2484 val = rd32(hw, I40E_GLHMC_PEXFMAX);
2485 pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK)
2486 >> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT;
2487 if (pe_filt_size + pe_cntx_size > pe_fmax)
2488 return I40E_ERR_INVALID_SIZE;
2489
2490 return 0; 2589 return 0;
2491} 2590}
2492 2591
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 6e8103abfd0d..00bc0cdb3a03 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -232,7 +232,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
232 struct i40e_ieee_app_priority_table *app) 232 struct i40e_ieee_app_priority_table *app)
233{ 233{
234 int v, err; 234 int v, err;
235 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 235 for (v = 0; v < pf->num_alloc_vsi; v++) {
236 if (pf->vsi[v] && pf->vsi[v]->netdev) { 236 if (pf->vsi[v] && pf->vsi[v]->netdev) {
237 err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); 237 err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
238 if (err) 238 if (err)
@@ -302,8 +302,8 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
302 struct net_device *dev = vsi->netdev; 302 struct net_device *dev = vsi->netdev;
303 struct i40e_pf *pf = i40e_netdev_to_pf(dev); 303 struct i40e_pf *pf = i40e_netdev_to_pf(dev);
304 304
305 /* DCB not enabled */ 305 /* Not DCB capable */
306 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 306 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
307 return; 307 return;
308 308
309 /* Do not setup DCB NL ops for MFP mode */ 309 /* Do not setup DCB NL ops for MFP mode */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 3c37386fd138..cffdfc21290f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -45,7 +45,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
45 if (seid < 0) 45 if (seid < 0)
46 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); 46 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
47 else 47 else
48 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 48 for (i = 0; i < pf->num_alloc_vsi; i++)
49 if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) 49 if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
50 return pf->vsi[i]; 50 return pf->vsi[i];
51 51
@@ -843,7 +843,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
843{ 843{
844 int i; 844 int i;
845 845
846 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 846 for (i = 0; i < pf->num_alloc_vsi; i++)
847 if (pf->vsi[i]) 847 if (pf->vsi[i])
848 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", 848 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
849 i, pf->vsi[i]->seid); 849 i, pf->vsi[i]->seid);
@@ -862,12 +862,11 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
862 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n", 862 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
863 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast); 863 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
864 dev_info(&pf->pdev->dev, 864 dev_info(&pf->pdev->dev,
865 " rx_broadcast = \t%lld \trx_discards = \t\t%lld \trx_errors = \t%lld\n", 865 " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
866 estats->rx_broadcast, estats->rx_discards, estats->rx_errors); 866 estats->rx_broadcast, estats->rx_discards);
867 dev_info(&pf->pdev->dev, 867 dev_info(&pf->pdev->dev,
868 " rx_missed = \t%lld \trx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n", 868 " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
869 estats->rx_missed, estats->rx_unknown_protocol, 869 estats->rx_unknown_protocol, estats->tx_bytes);
870 estats->tx_bytes);
871 dev_info(&pf->pdev->dev, 870 dev_info(&pf->pdev->dev,
872 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n", 871 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
873 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast); 872 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
@@ -1527,7 +1526,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1527 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); 1526 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
1528 if (cnt == 0) { 1527 if (cnt == 0) {
1529 int i; 1528 int i;
1530 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 1529 for (i = 0; i < pf->num_alloc_vsi; i++)
1531 i40e_vsi_reset_stats(pf->vsi[i]); 1530 i40e_vsi_reset_stats(pf->vsi[i]);
1532 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); 1531 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
1533 } else if (cnt == 1) { 1532 } else if (cnt == 1) {
@@ -1744,10 +1743,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1744 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false); 1743 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
1745 } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) { 1744 } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
1746 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true); 1745 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
1747 } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) {
1748 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false);
1749 } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) {
1750 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true);
1751 } else if (strncmp(cmd_buf, "lldp", 4) == 0) { 1746 } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
1752 if (strncmp(&cmd_buf[5], "stop", 4) == 0) { 1747 if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
1753 int ret; 1748 int ret;
@@ -1967,8 +1962,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1967 dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n"); 1962 dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
1968 dev_info(&pf->pdev->dev, " fd-atr off\n"); 1963 dev_info(&pf->pdev->dev, " fd-atr off\n");
1969 dev_info(&pf->pdev->dev, " fd-atr on\n"); 1964 dev_info(&pf->pdev->dev, " fd-atr on\n");
1970 dev_info(&pf->pdev->dev, " fd-sb off\n");
1971 dev_info(&pf->pdev->dev, " fd-sb on\n");
1972 dev_info(&pf->pdev->dev, " lldp start\n"); 1965 dev_info(&pf->pdev->dev, " lldp start\n");
1973 dev_info(&pf->pdev->dev, " lldp stop\n"); 1966 dev_info(&pf->pdev->dev, " lldp stop\n");
1974 dev_info(&pf->pdev->dev, " lldp get local\n"); 1967 dev_info(&pf->pdev->dev, " lldp get local\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index b2380daef8c1..56438bd579e6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -67,17 +67,25 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
67 67
68struct i40e_diag_reg_test_info i40e_reg_list[] = { 68struct i40e_diag_reg_test_info i40e_reg_list[] = {
69 /* offset mask elements stride */ 69 /* offset mask elements stride */
70 {I40E_QTX_CTL(0), 0x0000FFBF, 4, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, 70 {I40E_QTX_CTL(0), 0x0000FFBF, 1,
71 {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, 71 I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
72 {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)}, 72 {I40E_PFINT_ITR0(0), 0x00000FFF, 3,
73 {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)}, 73 I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
74 {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)}, 74 {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1,
75 {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0}, 75 I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
76 {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0}, 76 {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1,
77 {I40E_PFINT_LNKLSTN(0), 0x000007FF, 64, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)}, 77 I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
78 {I40E_QINT_TQCTL(0), 0x000000FF, 64, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)}, 78 {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1,
79 {I40E_QINT_RQCTL(0), 0x000000FF, 64, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)}, 79 I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
80 {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0}, 80 {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
81 {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
82 {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1,
83 I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
84 {I40E_QINT_TQCTL(0), 0x000000FF, 1,
85 I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
86 {I40E_QINT_RQCTL(0), 0x000000FF, 1,
87 I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
88 {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
81 { 0 } 89 { 0 }
82}; 90};
83 91
@@ -93,9 +101,25 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
93 u32 reg, mask; 101 u32 reg, mask;
94 u32 i, j; 102 u32 i, j;
95 103
96 for (i = 0; (i40e_reg_list[i].offset != 0) && !ret_code; i++) { 104 for (i = 0; i40e_reg_list[i].offset != 0 &&
105 !ret_code; i++) {
106
107 /* set actual reg range for dynamically allocated resources */
108 if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
109 hw->func_caps.num_tx_qp != 0)
110 i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
111 if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
112 i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
113 i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
114 i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
115 i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
116 hw->func_caps.num_msix_vectors != 0)
117 i40e_reg_list[i].elements =
118 hw->func_caps.num_msix_vectors - 1;
119
120 /* test register access */
97 mask = i40e_reg_list[i].mask; 121 mask = i40e_reg_list[i].mask;
98 for (j = 0; (j < i40e_reg_list[i].elements) && !ret_code; j++) { 122 for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
99 reg = i40e_reg_list[i].offset + 123 reg = i40e_reg_list[i].offset +
100 (j * i40e_reg_list[i].stride); 124 (j * i40e_reg_list[i].stride);
101 ret_code = i40e_diag_reg_pattern_test(hw, reg, mask); 125 ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 03d99cbc5c25..4a488ffcd6b0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -46,6 +46,8 @@ struct i40e_stats {
46 I40E_STAT(struct i40e_pf, _name, _stat) 46 I40E_STAT(struct i40e_pf, _name, _stat)
47#define I40E_VSI_STAT(_name, _stat) \ 47#define I40E_VSI_STAT(_name, _stat) \
48 I40E_STAT(struct i40e_vsi, _name, _stat) 48 I40E_STAT(struct i40e_vsi, _name, _stat)
49#define I40E_VEB_STAT(_name, _stat) \
50 I40E_STAT(struct i40e_veb, _name, _stat)
49 51
50static const struct i40e_stats i40e_gstrings_net_stats[] = { 52static const struct i40e_stats i40e_gstrings_net_stats[] = {
51 I40E_NETDEV_STAT(rx_packets), 53 I40E_NETDEV_STAT(rx_packets),
@@ -56,12 +58,36 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
56 I40E_NETDEV_STAT(tx_errors), 58 I40E_NETDEV_STAT(tx_errors),
57 I40E_NETDEV_STAT(rx_dropped), 59 I40E_NETDEV_STAT(rx_dropped),
58 I40E_NETDEV_STAT(tx_dropped), 60 I40E_NETDEV_STAT(tx_dropped),
59 I40E_NETDEV_STAT(multicast),
60 I40E_NETDEV_STAT(collisions), 61 I40E_NETDEV_STAT(collisions),
61 I40E_NETDEV_STAT(rx_length_errors), 62 I40E_NETDEV_STAT(rx_length_errors),
62 I40E_NETDEV_STAT(rx_crc_errors), 63 I40E_NETDEV_STAT(rx_crc_errors),
63}; 64};
64 65
66static const struct i40e_stats i40e_gstrings_veb_stats[] = {
67 I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
68 I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
69 I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
70 I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
71 I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
72 I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
73 I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
74 I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
75 I40E_VEB_STAT("rx_discards", stats.rx_discards),
76 I40E_VEB_STAT("tx_discards", stats.tx_discards),
77 I40E_VEB_STAT("tx_errors", stats.tx_errors),
78 I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
79};
80
81static const struct i40e_stats i40e_gstrings_misc_stats[] = {
82 I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
83 I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
84 I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
85 I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
86 I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
87 I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
88 I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
89};
90
65static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, 91static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
66 struct ethtool_rxnfc *cmd); 92 struct ethtool_rxnfc *cmd);
67 93
@@ -78,7 +104,12 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
78static struct i40e_stats i40e_gstrings_stats[] = { 104static struct i40e_stats i40e_gstrings_stats[] = {
79 I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes), 105 I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
80 I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes), 106 I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
81 I40E_PF_STAT("rx_errors", stats.eth.rx_errors), 107 I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
108 I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
109 I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
110 I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
111 I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
112 I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
82 I40E_PF_STAT("tx_errors", stats.eth.tx_errors), 113 I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
83 I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), 114 I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
84 I40E_PF_STAT("tx_dropped", stats.eth.tx_discards), 115 I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
@@ -88,6 +119,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
88 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), 119 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
89 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), 120 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
90 I40E_PF_STAT("tx_timeout", tx_timeout_count), 121 I40E_PF_STAT("tx_timeout", tx_timeout_count),
122 I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
91 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors), 123 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
92 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx), 124 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
93 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), 125 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
@@ -112,8 +144,10 @@ static struct i40e_stats i40e_gstrings_stats[] = {
112 I40E_PF_STAT("rx_oversize", stats.rx_oversize), 144 I40E_PF_STAT("rx_oversize", stats.rx_oversize),
113 I40E_PF_STAT("rx_jabber", stats.rx_jabber), 145 I40E_PF_STAT("rx_jabber", stats.rx_jabber),
114 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), 146 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
115 I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
116 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), 147 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
148 I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
149 I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
150
117 /* LPI stats */ 151 /* LPI stats */
118 I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), 152 I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
119 I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status), 153 I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
@@ -122,11 +156,14 @@ static struct i40e_stats i40e_gstrings_stats[] = {
122}; 156};
123 157
124#define I40E_QUEUE_STATS_LEN(n) \ 158#define I40E_QUEUE_STATS_LEN(n) \
125 ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \ 159 (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
126 ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2) 160 * 2 /* Tx and Rx together */ \
161 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
127#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) 162#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
128#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) 163#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
164#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
129#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ 165#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
166 I40E_MISC_STATS_LEN + \
130 I40E_QUEUE_STATS_LEN((n))) 167 I40E_QUEUE_STATS_LEN((n)))
131#define I40E_PFC_STATS_LEN ( \ 168#define I40E_PFC_STATS_LEN ( \
132 (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \ 169 (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
@@ -135,6 +172,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
135 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \ 172 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
136 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \ 173 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
137 / sizeof(u64)) 174 / sizeof(u64))
175#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats)
138#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \ 176#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
139 I40E_PFC_STATS_LEN + \ 177 I40E_PFC_STATS_LEN + \
140 I40E_VSI_STATS_LEN((n))) 178 I40E_VSI_STATS_LEN((n)))
@@ -620,10 +658,15 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
620 case ETH_SS_TEST: 658 case ETH_SS_TEST:
621 return I40E_TEST_LEN; 659 return I40E_TEST_LEN;
622 case ETH_SS_STATS: 660 case ETH_SS_STATS:
623 if (vsi == pf->vsi[pf->lan_vsi]) 661 if (vsi == pf->vsi[pf->lan_vsi]) {
624 return I40E_PF_STATS_LEN(netdev); 662 int len = I40E_PF_STATS_LEN(netdev);
625 else 663
664 if (pf->lan_veb != I40E_NO_VEB)
665 len += I40E_VEB_STATS_LEN;
666 return len;
667 } else {
626 return I40E_VSI_STATS_LEN(netdev); 668 return I40E_VSI_STATS_LEN(netdev);
669 }
627 default: 670 default:
628 return -EOPNOTSUPP; 671 return -EOPNOTSUPP;
629 } 672 }
@@ -633,6 +676,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
633 struct ethtool_stats *stats, u64 *data) 676 struct ethtool_stats *stats, u64 *data)
634{ 677{
635 struct i40e_netdev_priv *np = netdev_priv(netdev); 678 struct i40e_netdev_priv *np = netdev_priv(netdev);
679 struct i40e_ring *tx_ring, *rx_ring;
636 struct i40e_vsi *vsi = np->vsi; 680 struct i40e_vsi *vsi = np->vsi;
637 struct i40e_pf *pf = vsi->back; 681 struct i40e_pf *pf = vsi->back;
638 int i = 0; 682 int i = 0;
@@ -648,10 +692,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
648 data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == 692 data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
649 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 693 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
650 } 694 }
695 for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
696 p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
697 data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
698 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
699 }
651 rcu_read_lock(); 700 rcu_read_lock();
652 for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) { 701 for (j = 0; j < vsi->num_queue_pairs; j++) {
653 struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); 702 tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
654 struct i40e_ring *rx_ring;
655 703
656 if (!tx_ring) 704 if (!tx_ring)
657 continue; 705 continue;
@@ -662,33 +710,45 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
662 data[i] = tx_ring->stats.packets; 710 data[i] = tx_ring->stats.packets;
663 data[i + 1] = tx_ring->stats.bytes; 711 data[i + 1] = tx_ring->stats.bytes;
664 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 712 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
713 i += 2;
665 714
666 /* Rx ring is the 2nd half of the queue pair */ 715 /* Rx ring is the 2nd half of the queue pair */
667 rx_ring = &tx_ring[1]; 716 rx_ring = &tx_ring[1];
668 do { 717 do {
669 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 718 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
670 data[i + 2] = rx_ring->stats.packets; 719 data[i] = rx_ring->stats.packets;
671 data[i + 3] = rx_ring->stats.bytes; 720 data[i + 1] = rx_ring->stats.bytes;
672 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 721 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
722 i += 2;
673 } 723 }
674 rcu_read_unlock(); 724 rcu_read_unlock();
675 if (vsi == pf->vsi[pf->lan_vsi]) { 725 if (vsi != pf->vsi[pf->lan_vsi])
676 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { 726 return;
677 p = (char *)pf + i40e_gstrings_stats[j].stat_offset; 727
678 data[i++] = (i40e_gstrings_stats[j].sizeof_stat == 728 if (pf->lan_veb != I40E_NO_VEB) {
679 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 729 struct i40e_veb *veb = pf->veb[pf->lan_veb];
680 } 730 for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
681 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { 731 p = (char *)veb;
682 data[i++] = pf->stats.priority_xon_tx[j]; 732 p += i40e_gstrings_veb_stats[j].stat_offset;
683 data[i++] = pf->stats.priority_xoff_tx[j]; 733 data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
684 } 734 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
685 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
686 data[i++] = pf->stats.priority_xon_rx[j];
687 data[i++] = pf->stats.priority_xoff_rx[j];
688 } 735 }
689 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
690 data[i++] = pf->stats.priority_xon_2_xoff[j];
691 } 736 }
737 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
738 p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
739 data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
740 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
741 }
742 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
743 data[i++] = pf->stats.priority_xon_tx[j];
744 data[i++] = pf->stats.priority_xoff_tx[j];
745 }
746 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
747 data[i++] = pf->stats.priority_xon_rx[j];
748 data[i++] = pf->stats.priority_xoff_rx[j];
749 }
750 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
751 data[i++] = pf->stats.priority_xon_2_xoff[j];
692} 752}
693 753
694static void i40e_get_strings(struct net_device *netdev, u32 stringset, 754static void i40e_get_strings(struct net_device *netdev, u32 stringset,
@@ -713,6 +773,11 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
713 i40e_gstrings_net_stats[i].stat_string); 773 i40e_gstrings_net_stats[i].stat_string);
714 p += ETH_GSTRING_LEN; 774 p += ETH_GSTRING_LEN;
715 } 775 }
776 for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
777 snprintf(p, ETH_GSTRING_LEN, "%s",
778 i40e_gstrings_misc_stats[i].stat_string);
779 p += ETH_GSTRING_LEN;
780 }
716 for (i = 0; i < vsi->num_queue_pairs; i++) { 781 for (i = 0; i < vsi->num_queue_pairs; i++) {
717 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); 782 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
718 p += ETH_GSTRING_LEN; 783 p += ETH_GSTRING_LEN;
@@ -723,34 +788,42 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
723 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); 788 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
724 p += ETH_GSTRING_LEN; 789 p += ETH_GSTRING_LEN;
725 } 790 }
726 if (vsi == pf->vsi[pf->lan_vsi]) { 791 if (vsi != pf->vsi[pf->lan_vsi])
727 for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { 792 return;
728 snprintf(p, ETH_GSTRING_LEN, "port.%s", 793
729 i40e_gstrings_stats[i].stat_string); 794 if (pf->lan_veb != I40E_NO_VEB) {
730 p += ETH_GSTRING_LEN; 795 for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
731 } 796 snprintf(p, ETH_GSTRING_LEN, "veb.%s",
732 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 797 i40e_gstrings_veb_stats[i].stat_string);
733 snprintf(p, ETH_GSTRING_LEN,
734 "port.tx_priority_%u_xon", i);
735 p += ETH_GSTRING_LEN;
736 snprintf(p, ETH_GSTRING_LEN,
737 "port.tx_priority_%u_xoff", i);
738 p += ETH_GSTRING_LEN;
739 }
740 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
741 snprintf(p, ETH_GSTRING_LEN,
742 "port.rx_priority_%u_xon", i);
743 p += ETH_GSTRING_LEN;
744 snprintf(p, ETH_GSTRING_LEN,
745 "port.rx_priority_%u_xoff", i);
746 p += ETH_GSTRING_LEN;
747 }
748 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
749 snprintf(p, ETH_GSTRING_LEN,
750 "port.rx_priority_%u_xon_2_xoff", i);
751 p += ETH_GSTRING_LEN; 798 p += ETH_GSTRING_LEN;
752 } 799 }
753 } 800 }
801 for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
802 snprintf(p, ETH_GSTRING_LEN, "port.%s",
803 i40e_gstrings_stats[i].stat_string);
804 p += ETH_GSTRING_LEN;
805 }
806 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
807 snprintf(p, ETH_GSTRING_LEN,
808 "port.tx_priority_%u_xon", i);
809 p += ETH_GSTRING_LEN;
810 snprintf(p, ETH_GSTRING_LEN,
811 "port.tx_priority_%u_xoff", i);
812 p += ETH_GSTRING_LEN;
813 }
814 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
815 snprintf(p, ETH_GSTRING_LEN,
816 "port.rx_priority_%u_xon", i);
817 p += ETH_GSTRING_LEN;
818 snprintf(p, ETH_GSTRING_LEN,
819 "port.rx_priority_%u_xoff", i);
820 p += ETH_GSTRING_LEN;
821 }
822 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
823 snprintf(p, ETH_GSTRING_LEN,
824 "port.rx_priority_%u_xon_2_xoff", i);
825 p += ETH_GSTRING_LEN;
826 }
754 /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ 827 /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
755 break; 828 break;
756 } 829 }
@@ -1007,14 +1080,13 @@ static int i40e_get_coalesce(struct net_device *netdev,
1007 ec->rx_max_coalesced_frames_irq = vsi->work_limit; 1080 ec->rx_max_coalesced_frames_irq = vsi->work_limit;
1008 1081
1009 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) 1082 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
1010 ec->rx_coalesce_usecs = 1; 1083 ec->use_adaptive_rx_coalesce = 1;
1011 else
1012 ec->rx_coalesce_usecs = vsi->rx_itr_setting;
1013 1084
1014 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) 1085 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
1015 ec->tx_coalesce_usecs = 1; 1086 ec->use_adaptive_tx_coalesce = 1;
1016 else 1087
1017 ec->tx_coalesce_usecs = vsi->tx_itr_setting; 1088 ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
1089 ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
1018 1090
1019 return 0; 1091 return 0;
1020} 1092}
@@ -1033,37 +1105,27 @@ static int i40e_set_coalesce(struct net_device *netdev,
1033 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) 1105 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
1034 vsi->work_limit = ec->tx_max_coalesced_frames_irq; 1106 vsi->work_limit = ec->tx_max_coalesced_frames_irq;
1035 1107
1036 switch (ec->rx_coalesce_usecs) { 1108 if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
1037 case 0: 1109 (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
1038 vsi->rx_itr_setting = 0;
1039 break;
1040 case 1:
1041 vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
1042 ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
1043 break;
1044 default:
1045 if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
1046 (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
1047 return -EINVAL;
1048 vsi->rx_itr_setting = ec->rx_coalesce_usecs; 1110 vsi->rx_itr_setting = ec->rx_coalesce_usecs;
1049 break; 1111 else
1050 } 1112 return -EINVAL;
1051 1113
1052 switch (ec->tx_coalesce_usecs) { 1114 if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
1053 case 0: 1115 (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
1054 vsi->tx_itr_setting = 0;
1055 break;
1056 case 1:
1057 vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
1058 ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
1059 break;
1060 default:
1061 if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
1062 (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
1063 return -EINVAL;
1064 vsi->tx_itr_setting = ec->tx_coalesce_usecs; 1116 vsi->tx_itr_setting = ec->tx_coalesce_usecs;
1065 break; 1117 else
1066 } 1118 return -EINVAL;
1119
1120 if (ec->use_adaptive_rx_coalesce)
1121 vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
1122 else
1123 vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
1124
1125 if (ec->use_adaptive_tx_coalesce)
1126 vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
1127 else
1128 vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
1067 1129
1068 vector = vsi->base_vector; 1130 vector = vsi->base_vector;
1069 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 1131 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
@@ -1140,8 +1202,7 @@ static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
1140 int cnt = 0; 1202 int cnt = 0;
1141 1203
1142 /* report total rule count */ 1204 /* report total rule count */
1143 cmd->data = pf->hw.fdir_shared_filter_count + 1205 cmd->data = i40e_get_fd_cnt_all(pf);
1144 pf->fdir_pf_filter_count;
1145 1206
1146 hlist_for_each_entry_safe(rule, node2, 1207 hlist_for_each_entry_safe(rule, node2,
1147 &pf->fdir_filter_list, fdir_node) { 1208 &pf->fdir_filter_list, fdir_node) {
@@ -1175,10 +1236,6 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
1175 struct i40e_fdir_filter *rule = NULL; 1236 struct i40e_fdir_filter *rule = NULL;
1176 struct hlist_node *node2; 1237 struct hlist_node *node2;
1177 1238
1178 /* report total rule count */
1179 cmd->data = pf->hw.fdir_shared_filter_count +
1180 pf->fdir_pf_filter_count;
1181
1182 hlist_for_each_entry_safe(rule, node2, 1239 hlist_for_each_entry_safe(rule, node2,
1183 &pf->fdir_filter_list, fdir_node) { 1240 &pf->fdir_filter_list, fdir_node) {
1184 if (fsp->location <= rule->fd_id) 1241 if (fsp->location <= rule->fd_id)
@@ -1189,11 +1246,24 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
1189 return -EINVAL; 1246 return -EINVAL;
1190 1247
1191 fsp->flow_type = rule->flow_type; 1248 fsp->flow_type = rule->flow_type;
1192 fsp->h_u.tcp_ip4_spec.psrc = rule->src_port; 1249 if (fsp->flow_type == IP_USER_FLOW) {
1193 fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port; 1250 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
1194 fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0]; 1251 fsp->h_u.usr_ip4_spec.proto = 0;
1195 fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0]; 1252 fsp->m_u.usr_ip4_spec.proto = 0;
1196 fsp->ring_cookie = rule->q_index; 1253 }
1254
1255 /* Reverse the src and dest notion, since the HW views them from
1256 * Tx perspective where as the user expects it from Rx filter view.
1257 */
1258 fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
1259 fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
1260 fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
1261 fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
1262
1263 if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
1264 fsp->ring_cookie = RX_CLS_FLOW_DISC;
1265 else
1266 fsp->ring_cookie = rule->q_index;
1197 1267
1198 return 0; 1268 return 0;
1199} 1269}
@@ -1223,6 +1293,8 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1223 break; 1293 break;
1224 case ETHTOOL_GRXCLSRLCNT: 1294 case ETHTOOL_GRXCLSRLCNT:
1225 cmd->rule_cnt = pf->fdir_pf_active_filters; 1295 cmd->rule_cnt = pf->fdir_pf_active_filters;
1296 /* report total rule count */
1297 cmd->data = i40e_get_fd_cnt_all(pf);
1226 ret = 0; 1298 ret = 0;
1227 break; 1299 break;
1228 case ETHTOOL_GRXCLSRULE: 1300 case ETHTOOL_GRXCLSRULE:
@@ -1291,16 +1363,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1291 case UDP_V4_FLOW: 1363 case UDP_V4_FLOW:
1292 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1364 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1293 case 0: 1365 case 0:
1294 hena &= 1366 hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
1295 ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | 1367 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1296 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1297 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1298 break; 1368 break;
1299 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 1369 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1300 hena |= 1370 hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
1301 (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | 1371 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1302 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1303 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1304 break; 1372 break;
1305 default: 1373 default:
1306 return -EINVAL; 1374 return -EINVAL;
@@ -1309,16 +1377,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1309 case UDP_V6_FLOW: 1377 case UDP_V6_FLOW:
1310 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1378 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1311 case 0: 1379 case 0:
1312 hena &= 1380 hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
1313 ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | 1381 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1314 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1315 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1316 break; 1382 break;
1317 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 1383 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1318 hena |= 1384 hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
1319 (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | 1385 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1320 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1321 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1322 break; 1386 break;
1323 default: 1387 default:
1324 return -EINVAL; 1388 return -EINVAL;
@@ -1503,7 +1567,8 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
1503 return -EINVAL; 1567 return -EINVAL;
1504 } 1568 }
1505 1569
1506 if (fsp->ring_cookie >= vsi->num_queue_pairs) 1570 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
1571 (fsp->ring_cookie >= vsi->num_queue_pairs))
1507 return -EINVAL; 1572 return -EINVAL;
1508 1573
1509 input = kzalloc(sizeof(*input), GFP_KERNEL); 1574 input = kzalloc(sizeof(*input), GFP_KERNEL);
@@ -1524,13 +1589,17 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
1524 input->pctype = 0; 1589 input->pctype = 0;
1525 input->dest_vsi = vsi->id; 1590 input->dest_vsi = vsi->id;
1526 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; 1591 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
1527 input->cnt_index = 0; 1592 input->cnt_index = pf->fd_sb_cnt_idx;
1528 input->flow_type = fsp->flow_type; 1593 input->flow_type = fsp->flow_type;
1529 input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; 1594 input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
1530 input->src_port = fsp->h_u.tcp_ip4_spec.psrc; 1595
1531 input->dst_port = fsp->h_u.tcp_ip4_spec.pdst; 1596 /* Reverse the src and dest notion, since the HW expects them to be from
1532 input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 1597 * Tx perspective where as the input from user is from Rx filter view.
1533 input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 1598 */
1599 input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
1600 input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
1601 input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
1602 input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
1534 1603
1535 ret = i40e_add_del_fdir(vsi, input, true); 1604 ret = i40e_add_del_fdir(vsi, input, true);
1536 if (ret) 1605 if (ret)
@@ -1692,5 +1761,5 @@ static const struct ethtool_ops i40e_ethtool_ops = {
1692 1761
1693void i40e_set_ethtool_ops(struct net_device *netdev) 1762void i40e_set_ethtool_ops(struct net_device *netdev)
1694{ 1763{
1695 SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops); 1764 netdev->ethtool_ops = &i40e_ethtool_ops;
1696} 1765}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index bf2d4cc5b569..9b987ccc9e82 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -201,7 +201,7 @@ exit:
201 **/ 201 **/
202i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, 202i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
203 struct i40e_hmc_info *hmc_info, 203 struct i40e_hmc_info *hmc_info,
204 u32 idx, bool is_pf) 204 u32 idx)
205{ 205{
206 i40e_status ret_code = 0; 206 i40e_status ret_code = 0;
207 struct i40e_hmc_pd_entry *pd_entry; 207 struct i40e_hmc_pd_entry *pd_entry;
@@ -237,10 +237,7 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
237 pd_addr = (u64 *)pd_table->pd_page_addr.va; 237 pd_addr = (u64 *)pd_table->pd_page_addr.va;
238 pd_addr += rel_pd_idx; 238 pd_addr += rel_pd_idx;
239 memset(pd_addr, 0, sizeof(u64)); 239 memset(pd_addr, 0, sizeof(u64));
240 if (is_pf) 240 I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
241 I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
242 else
243 I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id);
244 241
245 /* free memory here */ 242 /* free memory here */
246 ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); 243 ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 0cd4701234f8..b45d8fedc5e7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -163,11 +163,6 @@ struct i40e_hmc_info {
163 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ 163 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
164 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) 164 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
165 165
166#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
167 wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
168 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
169 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
170
171/** 166/**
172 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit 167 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
173 * @hmc_info: pointer to the HMC configuration information structure 168 * @hmc_info: pointer to the HMC configuration information structure
@@ -226,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
226 u32 pd_index); 221 u32 pd_index);
227i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, 222i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
228 struct i40e_hmc_info *hmc_info, 223 struct i40e_hmc_info *hmc_info,
229 u32 idx, bool is_pf); 224 u32 idx);
230i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, 225i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
231 u32 idx); 226 u32 idx);
232i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, 227i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index d5d98fe2691d..870ab1ee072c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -397,7 +397,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
397 /* remove the backing pages from pd_idx1 to i */ 397 /* remove the backing pages from pd_idx1 to i */
398 while (i && (i > pd_idx1)) { 398 while (i && (i > pd_idx1)) {
399 i40e_remove_pd_bp(hw, info->hmc_info, 399 i40e_remove_pd_bp(hw, info->hmc_info,
400 (i - 1), true); 400 (i - 1));
401 i--; 401 i--;
402 } 402 }
403 } 403 }
@@ -433,11 +433,7 @@ exit_sd_error:
433 ((j - 1) * I40E_HMC_MAX_BP_COUNT)); 433 ((j - 1) * I40E_HMC_MAX_BP_COUNT));
434 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); 434 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
435 for (i = pd_idx1; i < pd_lmt1; i++) { 435 for (i = pd_idx1; i < pd_lmt1; i++) {
436 i40e_remove_pd_bp( 436 i40e_remove_pd_bp(hw, info->hmc_info, i);
437 hw,
438 info->hmc_info,
439 i,
440 true);
441 } 437 }
442 i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); 438 i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
443 break; 439 break;
@@ -616,8 +612,7 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
616 pd_table = 612 pd_table =
617 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; 613 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
618 if (pd_table->pd_entry[rel_pd_idx].valid) { 614 if (pd_table->pd_entry[rel_pd_idx].valid) {
619 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, 615 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
620 j, true);
621 if (ret_code) 616 if (ret_code)
622 goto exit; 617 goto exit;
623 } 618 }
@@ -747,6 +742,7 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
747 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 }, 742 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
748 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 }, 743 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
749 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 }, 744 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
745 { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
750 { 0 } 746 { 0 }
751}; 747};
752 748
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 341de925a298..eb65fe23c4a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -56,6 +56,7 @@ struct i40e_hmc_obj_rxq {
56 u8 tphdata_ena; 56 u8 tphdata_ena;
57 u8 tphhead_ena; 57 u8 tphhead_ena;
58 u8 lrxqthresh; 58 u8 lrxqthresh;
59 u8 prefena; /* NOTE: normally must be set to 1 at init */
59}; 60};
60 61
61/* Tx queue context data */ 62/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2e72449f1265..275ca9a1719e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -38,8 +38,8 @@ static const char i40e_driver_string[] =
38#define DRV_KERN "-k" 38#define DRV_KERN "-k"
39 39
40#define DRV_VERSION_MAJOR 0 40#define DRV_VERSION_MAJOR 0
41#define DRV_VERSION_MINOR 3 41#define DRV_VERSION_MINOR 4
42#define DRV_VERSION_BUILD 36 42#define DRV_VERSION_BUILD 10
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN 45 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -67,12 +67,10 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb);
67 */ 67 */
68static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = { 68static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, 71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
@@ -356,6 +354,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
356 struct rtnl_link_stats64 *stats) 354 struct rtnl_link_stats64 *stats)
357{ 355{
358 struct i40e_netdev_priv *np = netdev_priv(netdev); 356 struct i40e_netdev_priv *np = netdev_priv(netdev);
357 struct i40e_ring *tx_ring, *rx_ring;
359 struct i40e_vsi *vsi = np->vsi; 358 struct i40e_vsi *vsi = np->vsi;
360 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 359 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
361 int i; 360 int i;
@@ -368,7 +367,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
368 367
369 rcu_read_lock(); 368 rcu_read_lock();
370 for (i = 0; i < vsi->num_queue_pairs; i++) { 369 for (i = 0; i < vsi->num_queue_pairs; i++) {
371 struct i40e_ring *tx_ring, *rx_ring;
372 u64 bytes, packets; 370 u64 bytes, packets;
373 unsigned int start; 371 unsigned int start;
374 372
@@ -397,7 +395,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
397 } 395 }
398 rcu_read_unlock(); 396 rcu_read_unlock();
399 397
400 /* following stats updated by ixgbe_watchdog_task() */ 398 /* following stats updated by i40e_watchdog_subtask() */
401 stats->multicast = vsi_stats->multicast; 399 stats->multicast = vsi_stats->multicast;
402 stats->tx_errors = vsi_stats->tx_errors; 400 stats->tx_errors = vsi_stats->tx_errors;
403 stats->tx_dropped = vsi_stats->tx_dropped; 401 stats->tx_dropped = vsi_stats->tx_dropped;
@@ -530,6 +528,12 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
530 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 528 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
531 vsi->stat_offsets_loaded, 529 vsi->stat_offsets_loaded,
532 &oes->rx_discards, &es->rx_discards); 530 &oes->rx_discards, &es->rx_discards);
531 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
532 vsi->stat_offsets_loaded,
533 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
534 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
535 vsi->stat_offsets_loaded,
536 &oes->tx_errors, &es->tx_errors);
533 537
534 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 538 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
535 I40E_GLV_GORCL(stat_idx), 539 I40E_GLV_GORCL(stat_idx),
@@ -648,10 +652,10 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
648 return; 652 return;
649 653
650 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ 654 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
651 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 655 for (v = 0; v < pf->num_alloc_vsi; v++) {
652 struct i40e_vsi *vsi = pf->vsi[v]; 656 struct i40e_vsi *vsi = pf->vsi[v];
653 657
654 if (!vsi) 658 if (!vsi || !vsi->tx_rings[0])
655 continue; 659 continue;
656 660
657 for (i = 0; i < vsi->num_queue_pairs; i++) { 661 for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -702,10 +706,10 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
702 } 706 }
703 707
704 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ 708 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
705 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 709 for (v = 0; v < pf->num_alloc_vsi; v++) {
706 struct i40e_vsi *vsi = pf->vsi[v]; 710 struct i40e_vsi *vsi = pf->vsi[v];
707 711
708 if (!vsi) 712 if (!vsi || !vsi->tx_rings[0])
709 continue; 713 continue;
710 714
711 for (i = 0; i < vsi->num_queue_pairs; i++) { 715 for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -720,19 +724,18 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
720} 724}
721 725
722/** 726/**
723 * i40e_update_stats - Update the board statistics counters. 727 * i40e_update_vsi_stats - Update the vsi statistics counters.
724 * @vsi: the VSI to be updated 728 * @vsi: the VSI to be updated
725 * 729 *
726 * There are a few instances where we store the same stat in a 730 * There are a few instances where we store the same stat in a
727 * couple of different structs. This is partly because we have 731 * couple of different structs. This is partly because we have
728 * the netdev stats that need to be filled out, which is slightly 732 * the netdev stats that need to be filled out, which is slightly
729 * different from the "eth_stats" defined by the chip and used in 733 * different from the "eth_stats" defined by the chip and used in
730 * VF communications. We sort it all out here in a central place. 734 * VF communications. We sort it out here.
731 **/ 735 **/
732void i40e_update_stats(struct i40e_vsi *vsi) 736static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
733{ 737{
734 struct i40e_pf *pf = vsi->back; 738 struct i40e_pf *pf = vsi->back;
735 struct i40e_hw *hw = &pf->hw;
736 struct rtnl_link_stats64 *ons; 739 struct rtnl_link_stats64 *ons;
737 struct rtnl_link_stats64 *ns; /* netdev stats */ 740 struct rtnl_link_stats64 *ns; /* netdev stats */
738 struct i40e_eth_stats *oes; 741 struct i40e_eth_stats *oes;
@@ -741,8 +744,6 @@ void i40e_update_stats(struct i40e_vsi *vsi)
741 u32 rx_page, rx_buf; 744 u32 rx_page, rx_buf;
742 u64 rx_p, rx_b; 745 u64 rx_p, rx_b;
743 u64 tx_p, tx_b; 746 u64 tx_p, tx_b;
744 u32 val;
745 int i;
746 u16 q; 747 u16 q;
747 748
748 if (test_bit(__I40E_DOWN, &vsi->state) || 749 if (test_bit(__I40E_DOWN, &vsi->state) ||
@@ -804,196 +805,256 @@ void i40e_update_stats(struct i40e_vsi *vsi)
804 ns->tx_packets = tx_p; 805 ns->tx_packets = tx_p;
805 ns->tx_bytes = tx_b; 806 ns->tx_bytes = tx_b;
806 807
807 i40e_update_eth_stats(vsi);
808 /* update netdev stats from eth stats */ 808 /* update netdev stats from eth stats */
809 ons->rx_errors = oes->rx_errors; 809 i40e_update_eth_stats(vsi);
810 ns->rx_errors = es->rx_errors;
811 ons->tx_errors = oes->tx_errors; 810 ons->tx_errors = oes->tx_errors;
812 ns->tx_errors = es->tx_errors; 811 ns->tx_errors = es->tx_errors;
813 ons->multicast = oes->rx_multicast; 812 ons->multicast = oes->rx_multicast;
814 ns->multicast = es->rx_multicast; 813 ns->multicast = es->rx_multicast;
814 ons->rx_dropped = oes->rx_discards;
815 ns->rx_dropped = es->rx_discards;
815 ons->tx_dropped = oes->tx_discards; 816 ons->tx_dropped = oes->tx_discards;
816 ns->tx_dropped = es->tx_discards; 817 ns->tx_dropped = es->tx_discards;
817 818
818 /* Get the port data only if this is the main PF VSI */ 819 /* pull in a couple PF stats if this is the main vsi */
819 if (vsi == pf->vsi[pf->lan_vsi]) { 820 if (vsi == pf->vsi[pf->lan_vsi]) {
820 struct i40e_hw_port_stats *nsd = &pf->stats; 821 ns->rx_crc_errors = pf->stats.crc_errors;
821 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 822 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
823 ns->rx_length_errors = pf->stats.rx_length_errors;
824 }
825}
822 826
823 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 827/**
824 I40E_GLPRT_GORCL(hw->port), 828 * i40e_update_pf_stats - Update the pf statistics counters.
825 pf->stat_offsets_loaded, 829 * @pf: the PF to be updated
826 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 830 **/
827 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 831static void i40e_update_pf_stats(struct i40e_pf *pf)
828 I40E_GLPRT_GOTCL(hw->port), 832{
829 pf->stat_offsets_loaded, 833 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
830 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 834 struct i40e_hw_port_stats *nsd = &pf->stats;
831 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 835 struct i40e_hw *hw = &pf->hw;
832 pf->stat_offsets_loaded, 836 u32 val;
833 &osd->eth.rx_discards, 837 int i;
834 &nsd->eth.rx_discards);
835 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
836 pf->stat_offsets_loaded,
837 &osd->eth.tx_discards,
838 &nsd->eth.tx_discards);
839 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
840 I40E_GLPRT_MPRCL(hw->port),
841 pf->stat_offsets_loaded,
842 &osd->eth.rx_multicast,
843 &nsd->eth.rx_multicast);
844 838
845 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 839 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
846 pf->stat_offsets_loaded, 840 I40E_GLPRT_GORCL(hw->port),
847 &osd->tx_dropped_link_down, 841 pf->stat_offsets_loaded,
848 &nsd->tx_dropped_link_down); 842 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
843 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
844 I40E_GLPRT_GOTCL(hw->port),
845 pf->stat_offsets_loaded,
846 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
847 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
848 pf->stat_offsets_loaded,
849 &osd->eth.rx_discards,
850 &nsd->eth.rx_discards);
851 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
852 pf->stat_offsets_loaded,
853 &osd->eth.tx_discards,
854 &nsd->eth.tx_discards);
849 855
850 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 856 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
851 pf->stat_offsets_loaded, 857 I40E_GLPRT_UPRCL(hw->port),
852 &osd->crc_errors, &nsd->crc_errors); 858 pf->stat_offsets_loaded,
853 ns->rx_crc_errors = nsd->crc_errors; 859 &osd->eth.rx_unicast,
860 &nsd->eth.rx_unicast);
861 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
862 I40E_GLPRT_MPRCL(hw->port),
863 pf->stat_offsets_loaded,
864 &osd->eth.rx_multicast,
865 &nsd->eth.rx_multicast);
866 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
867 I40E_GLPRT_BPRCL(hw->port),
868 pf->stat_offsets_loaded,
869 &osd->eth.rx_broadcast,
870 &nsd->eth.rx_broadcast);
871 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
872 I40E_GLPRT_UPTCL(hw->port),
873 pf->stat_offsets_loaded,
874 &osd->eth.tx_unicast,
875 &nsd->eth.tx_unicast);
876 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
877 I40E_GLPRT_MPTCL(hw->port),
878 pf->stat_offsets_loaded,
879 &osd->eth.tx_multicast,
880 &nsd->eth.tx_multicast);
881 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
882 I40E_GLPRT_BPTCL(hw->port),
883 pf->stat_offsets_loaded,
884 &osd->eth.tx_broadcast,
885 &nsd->eth.tx_broadcast);
854 886
855 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 887 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
856 pf->stat_offsets_loaded, 888 pf->stat_offsets_loaded,
857 &osd->illegal_bytes, &nsd->illegal_bytes); 889 &osd->tx_dropped_link_down,
858 ns->rx_errors = nsd->crc_errors 890 &nsd->tx_dropped_link_down);
859 + nsd->illegal_bytes;
860 891
861 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 892 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
862 pf->stat_offsets_loaded, 893 pf->stat_offsets_loaded,
863 &osd->mac_local_faults, 894 &osd->crc_errors, &nsd->crc_errors);
864 &nsd->mac_local_faults);
865 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
866 pf->stat_offsets_loaded,
867 &osd->mac_remote_faults,
868 &nsd->mac_remote_faults);
869 895
870 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 896 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
871 pf->stat_offsets_loaded, 897 pf->stat_offsets_loaded,
872 &osd->rx_length_errors, 898 &osd->illegal_bytes, &nsd->illegal_bytes);
873 &nsd->rx_length_errors);
874 ns->rx_length_errors = nsd->rx_length_errors;
875 899
876 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 900 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
877 pf->stat_offsets_loaded, 901 pf->stat_offsets_loaded,
878 &osd->link_xon_rx, &nsd->link_xon_rx); 902 &osd->mac_local_faults,
879 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 903 &nsd->mac_local_faults);
880 pf->stat_offsets_loaded, 904 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
881 &osd->link_xon_tx, &nsd->link_xon_tx); 905 pf->stat_offsets_loaded,
882 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ 906 &osd->mac_remote_faults,
883 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 907 &nsd->mac_remote_faults);
884 pf->stat_offsets_loaded,
885 &osd->link_xoff_tx, &nsd->link_xoff_tx);
886
887 for (i = 0; i < 8; i++) {
888 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
889 pf->stat_offsets_loaded,
890 &osd->priority_xon_rx[i],
891 &nsd->priority_xon_rx[i]);
892 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
893 pf->stat_offsets_loaded,
894 &osd->priority_xon_tx[i],
895 &nsd->priority_xon_tx[i]);
896 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
897 pf->stat_offsets_loaded,
898 &osd->priority_xoff_tx[i],
899 &nsd->priority_xoff_tx[i]);
900 i40e_stat_update32(hw,
901 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
902 pf->stat_offsets_loaded,
903 &osd->priority_xon_2_xoff[i],
904 &nsd->priority_xon_2_xoff[i]);
905 }
906 908
907 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 909 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
908 I40E_GLPRT_PRC64L(hw->port), 910 pf->stat_offsets_loaded,
909 pf->stat_offsets_loaded, 911 &osd->rx_length_errors,
910 &osd->rx_size_64, &nsd->rx_size_64); 912 &nsd->rx_length_errors);
911 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
912 I40E_GLPRT_PRC127L(hw->port),
913 pf->stat_offsets_loaded,
914 &osd->rx_size_127, &nsd->rx_size_127);
915 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
916 I40E_GLPRT_PRC255L(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->rx_size_255, &nsd->rx_size_255);
919 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
920 I40E_GLPRT_PRC511L(hw->port),
921 pf->stat_offsets_loaded,
922 &osd->rx_size_511, &nsd->rx_size_511);
923 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
924 I40E_GLPRT_PRC1023L(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->rx_size_1023, &nsd->rx_size_1023);
927 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
928 I40E_GLPRT_PRC1522L(hw->port),
929 pf->stat_offsets_loaded,
930 &osd->rx_size_1522, &nsd->rx_size_1522);
931 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
932 I40E_GLPRT_PRC9522L(hw->port),
933 pf->stat_offsets_loaded,
934 &osd->rx_size_big, &nsd->rx_size_big);
935 913
936 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 914 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
937 I40E_GLPRT_PTC64L(hw->port), 915 pf->stat_offsets_loaded,
938 pf->stat_offsets_loaded, 916 &osd->link_xon_rx, &nsd->link_xon_rx);
939 &osd->tx_size_64, &nsd->tx_size_64); 917 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
940 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 918 pf->stat_offsets_loaded,
941 I40E_GLPRT_PTC127L(hw->port), 919 &osd->link_xon_tx, &nsd->link_xon_tx);
942 pf->stat_offsets_loaded, 920 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
943 &osd->tx_size_127, &nsd->tx_size_127); 921 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
944 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 922 pf->stat_offsets_loaded,
945 I40E_GLPRT_PTC255L(hw->port), 923 &osd->link_xoff_tx, &nsd->link_xoff_tx);
946 pf->stat_offsets_loaded,
947 &osd->tx_size_255, &nsd->tx_size_255);
948 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
949 I40E_GLPRT_PTC511L(hw->port),
950 pf->stat_offsets_loaded,
951 &osd->tx_size_511, &nsd->tx_size_511);
952 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
953 I40E_GLPRT_PTC1023L(hw->port),
954 pf->stat_offsets_loaded,
955 &osd->tx_size_1023, &nsd->tx_size_1023);
956 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
957 I40E_GLPRT_PTC1522L(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->tx_size_1522, &nsd->tx_size_1522);
960 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
961 I40E_GLPRT_PTC9522L(hw->port),
962 pf->stat_offsets_loaded,
963 &osd->tx_size_big, &nsd->tx_size_big);
964 924
965 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 925 for (i = 0; i < 8; i++) {
966 pf->stat_offsets_loaded, 926 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
967 &osd->rx_undersize, &nsd->rx_undersize);
968 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
969 pf->stat_offsets_loaded, 927 pf->stat_offsets_loaded,
970 &osd->rx_fragments, &nsd->rx_fragments); 928 &osd->priority_xon_rx[i],
971 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 929 &nsd->priority_xon_rx[i]);
930 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
972 pf->stat_offsets_loaded, 931 pf->stat_offsets_loaded,
973 &osd->rx_oversize, &nsd->rx_oversize); 932 &osd->priority_xon_tx[i],
974 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 933 &nsd->priority_xon_tx[i]);
934 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
975 pf->stat_offsets_loaded, 935 pf->stat_offsets_loaded,
976 &osd->rx_jabber, &nsd->rx_jabber); 936 &osd->priority_xoff_tx[i],
977 937 &nsd->priority_xoff_tx[i]);
978 val = rd32(hw, I40E_PRTPM_EEE_STAT); 938 i40e_stat_update32(hw,
979 nsd->tx_lpi_status = 939 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
980 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
981 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
982 nsd->rx_lpi_status =
983 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
984 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
985 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
986 pf->stat_offsets_loaded, 940 pf->stat_offsets_loaded,
987 &osd->tx_lpi_count, &nsd->tx_lpi_count); 941 &osd->priority_xon_2_xoff[i],
988 i40e_stat_update32(hw, I40E_PRTPM_RLPIC, 942 &nsd->priority_xon_2_xoff[i]);
989 pf->stat_offsets_loaded,
990 &osd->rx_lpi_count, &nsd->rx_lpi_count);
991 } 943 }
992 944
945 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
946 I40E_GLPRT_PRC64L(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->rx_size_64, &nsd->rx_size_64);
949 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
950 I40E_GLPRT_PRC127L(hw->port),
951 pf->stat_offsets_loaded,
952 &osd->rx_size_127, &nsd->rx_size_127);
953 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
954 I40E_GLPRT_PRC255L(hw->port),
955 pf->stat_offsets_loaded,
956 &osd->rx_size_255, &nsd->rx_size_255);
957 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
958 I40E_GLPRT_PRC511L(hw->port),
959 pf->stat_offsets_loaded,
960 &osd->rx_size_511, &nsd->rx_size_511);
961 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
962 I40E_GLPRT_PRC1023L(hw->port),
963 pf->stat_offsets_loaded,
964 &osd->rx_size_1023, &nsd->rx_size_1023);
965 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
966 I40E_GLPRT_PRC1522L(hw->port),
967 pf->stat_offsets_loaded,
968 &osd->rx_size_1522, &nsd->rx_size_1522);
969 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
970 I40E_GLPRT_PRC9522L(hw->port),
971 pf->stat_offsets_loaded,
972 &osd->rx_size_big, &nsd->rx_size_big);
973
974 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
975 I40E_GLPRT_PTC64L(hw->port),
976 pf->stat_offsets_loaded,
977 &osd->tx_size_64, &nsd->tx_size_64);
978 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
979 I40E_GLPRT_PTC127L(hw->port),
980 pf->stat_offsets_loaded,
981 &osd->tx_size_127, &nsd->tx_size_127);
982 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
983 I40E_GLPRT_PTC255L(hw->port),
984 pf->stat_offsets_loaded,
985 &osd->tx_size_255, &nsd->tx_size_255);
986 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
987 I40E_GLPRT_PTC511L(hw->port),
988 pf->stat_offsets_loaded,
989 &osd->tx_size_511, &nsd->tx_size_511);
990 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
991 I40E_GLPRT_PTC1023L(hw->port),
992 pf->stat_offsets_loaded,
993 &osd->tx_size_1023, &nsd->tx_size_1023);
994 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
995 I40E_GLPRT_PTC1522L(hw->port),
996 pf->stat_offsets_loaded,
997 &osd->tx_size_1522, &nsd->tx_size_1522);
998 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
999 I40E_GLPRT_PTC9522L(hw->port),
1000 pf->stat_offsets_loaded,
1001 &osd->tx_size_big, &nsd->tx_size_big);
1002
1003 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1004 pf->stat_offsets_loaded,
1005 &osd->rx_undersize, &nsd->rx_undersize);
1006 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1007 pf->stat_offsets_loaded,
1008 &osd->rx_fragments, &nsd->rx_fragments);
1009 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1010 pf->stat_offsets_loaded,
1011 &osd->rx_oversize, &nsd->rx_oversize);
1012 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1013 pf->stat_offsets_loaded,
1014 &osd->rx_jabber, &nsd->rx_jabber);
1015
1016 /* FDIR stats */
1017 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
1018 pf->stat_offsets_loaded,
1019 &osd->fd_atr_match, &nsd->fd_atr_match);
1020 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
1021 pf->stat_offsets_loaded,
1022 &osd->fd_sb_match, &nsd->fd_sb_match);
1023
1024 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1025 nsd->tx_lpi_status =
1026 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1027 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1028 nsd->rx_lpi_status =
1029 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1030 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1031 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1032 pf->stat_offsets_loaded,
1033 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1034 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1035 pf->stat_offsets_loaded,
1036 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1037
993 pf->stat_offsets_loaded = true; 1038 pf->stat_offsets_loaded = true;
994} 1039}
995 1040
996/** 1041/**
1042 * i40e_update_stats - Update the various statistics counters.
1043 * @vsi: the VSI to be updated
1044 *
1045 * Update the various stats for this VSI and its related entities.
1046 **/
1047void i40e_update_stats(struct i40e_vsi *vsi)
1048{
1049 struct i40e_pf *pf = vsi->back;
1050
1051 if (vsi == pf->vsi[pf->lan_vsi])
1052 i40e_update_pf_stats(pf);
1053
1054 i40e_update_vsi_stats(vsi);
1055}
1056
1057/**
997 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 1058 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
998 * @vsi: the VSI to be searched 1059 * @vsi: the VSI to be searched
999 * @macaddr: the MAC address 1060 * @macaddr: the MAC address
@@ -1101,6 +1162,30 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1101} 1162}
1102 1163
1103/** 1164/**
1165 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1166 * @vsi: the PF Main VSI - inappropriate for any other VSI
1167 * @macaddr: the MAC address
1168 **/
1169static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1170{
1171 struct i40e_aqc_remove_macvlan_element_data element;
1172 struct i40e_pf *pf = vsi->back;
1173 i40e_status aq_ret;
1174
1175 /* Only appropriate for the PF main VSI */
1176 if (vsi->type != I40E_VSI_MAIN)
1177 return;
1178
1179 ether_addr_copy(element.mac_addr, macaddr);
1180 element.vlan_tag = 0;
1181 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1182 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1183 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1184 if (aq_ret)
1185 dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n");
1186}
1187
1188/**
1104 * i40e_add_filter - Add a mac/vlan filter to the VSI 1189 * i40e_add_filter - Add a mac/vlan filter to the VSI
1105 * @vsi: the VSI to be searched 1190 * @vsi: the VSI to be searched
1106 * @macaddr: the MAC address 1191 * @macaddr: the MAC address
@@ -1125,7 +1210,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1125 if (!f) 1210 if (!f)
1126 goto add_filter_out; 1211 goto add_filter_out;
1127 1212
1128 memcpy(f->macaddr, macaddr, ETH_ALEN); 1213 ether_addr_copy(f->macaddr, macaddr);
1129 f->vlan = vlan; 1214 f->vlan = vlan;
1130 f->changed = true; 1215 f->changed = true;
1131 1216
@@ -1249,7 +1334,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
1249 return -EADDRNOTAVAIL; 1334 return -EADDRNOTAVAIL;
1250 } 1335 }
1251 1336
1252 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len); 1337 ether_addr_copy(vsi->back->hw.mac.addr, addr->sa_data);
1253 } 1338 }
1254 1339
1255 /* In order to be sure to not drop any packets, add the new address 1340 /* In order to be sure to not drop any packets, add the new address
@@ -1263,7 +1348,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
1263 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); 1348 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
1264 i40e_sync_vsi_filters(vsi); 1349 i40e_sync_vsi_filters(vsi);
1265 1350
1266 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1351 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1267 1352
1268 return 0; 1353 return 0;
1269} 1354}
@@ -1313,7 +1398,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1313 vsi->tc_config.numtc = numtc; 1398 vsi->tc_config.numtc = numtc;
1314 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1399 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1315 /* Number of queues per enabled TC */ 1400 /* Number of queues per enabled TC */
1316 num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc); 1401 num_tc_qps = vsi->alloc_queue_pairs/numtc;
1317 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); 1402 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1318 1403
1319 /* Setup queue offset/count for all TCs for given VSI */ 1404 /* Setup queue offset/count for all TCs for given VSI */
@@ -1520,8 +1605,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1520 cmd_flags = 0; 1605 cmd_flags = 0;
1521 1606
1522 /* add to delete list */ 1607 /* add to delete list */
1523 memcpy(del_list[num_del].mac_addr, 1608 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1524 f->macaddr, ETH_ALEN);
1525 del_list[num_del].vlan_tag = 1609 del_list[num_del].vlan_tag =
1526 cpu_to_le16((u16)(f->vlan == 1610 cpu_to_le16((u16)(f->vlan ==
1527 I40E_VLAN_ANY ? 0 : f->vlan)); 1611 I40E_VLAN_ANY ? 0 : f->vlan));
@@ -1542,7 +1626,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1542 num_del = 0; 1626 num_del = 0;
1543 memset(del_list, 0, sizeof(*del_list)); 1627 memset(del_list, 0, sizeof(*del_list));
1544 1628
1545 if (aq_ret) 1629 if (aq_ret &&
1630 pf->hw.aq.asq_last_status !=
1631 I40E_AQ_RC_ENOENT)
1546 dev_info(&pf->pdev->dev, 1632 dev_info(&pf->pdev->dev,
1547 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1633 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1548 aq_ret, 1634 aq_ret,
@@ -1554,7 +1640,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1554 del_list, num_del, NULL); 1640 del_list, num_del, NULL);
1555 num_del = 0; 1641 num_del = 0;
1556 1642
1557 if (aq_ret) 1643 if (aq_ret &&
1644 pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
1558 dev_info(&pf->pdev->dev, 1645 dev_info(&pf->pdev->dev,
1559 "ignoring delete macvlan error, err %d, aq_err %d\n", 1646 "ignoring delete macvlan error, err %d, aq_err %d\n",
1560 aq_ret, pf->hw.aq.asq_last_status); 1647 aq_ret, pf->hw.aq.asq_last_status);
@@ -1583,8 +1670,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1583 cmd_flags = 0; 1670 cmd_flags = 0;
1584 1671
1585 /* add to add array */ 1672 /* add to add array */
1586 memcpy(add_list[num_add].mac_addr, 1673 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1587 f->macaddr, ETH_ALEN);
1588 add_list[num_add].vlan_tag = 1674 add_list[num_add].vlan_tag =
1589 cpu_to_le16( 1675 cpu_to_le16(
1590 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); 1676 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
@@ -1681,7 +1767,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1681 return; 1767 return;
1682 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 1768 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1683 1769
1684 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 1770 for (v = 0; v < pf->num_alloc_vsi; v++) {
1685 if (pf->vsi[v] && 1771 if (pf->vsi[v] &&
1686 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) 1772 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1687 i40e_sync_vsi_filters(pf->vsi[v]); 1773 i40e_sync_vsi_filters(pf->vsi[v]);
@@ -1698,7 +1784,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1698static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 1784static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1699{ 1785{
1700 struct i40e_netdev_priv *np = netdev_priv(netdev); 1786 struct i40e_netdev_priv *np = netdev_priv(netdev);
1701 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 1787 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1702 struct i40e_vsi *vsi = np->vsi; 1788 struct i40e_vsi *vsi = np->vsi;
1703 1789
1704 /* MTU < 68 is an error and causes problems on some kernels */ 1790 /* MTU < 68 is an error and causes problems on some kernels */
@@ -2312,6 +2398,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
2312 rx_ctx.crcstrip = 1; 2398 rx_ctx.crcstrip = 1;
2313 rx_ctx.l2tsel = 1; 2399 rx_ctx.l2tsel = 1;
2314 rx_ctx.showiv = 1; 2400 rx_ctx.showiv = 1;
2401 /* set the prefena field to 1 because the manual says to */
2402 rx_ctx.prefena = 1;
2315 2403
2316 /* clear the context in the HMC */ 2404 /* clear the context in the HMC */
2317 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 2405 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
@@ -2413,6 +2501,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2413 **/ 2501 **/
2414static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 2502static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2415{ 2503{
2504 struct i40e_ring *tx_ring, *rx_ring;
2416 u16 qoffset, qcount; 2505 u16 qoffset, qcount;
2417 int i, n; 2506 int i, n;
2418 2507
@@ -2426,8 +2515,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2426 qoffset = vsi->tc_config.tc_info[n].qoffset; 2515 qoffset = vsi->tc_config.tc_info[n].qoffset;
2427 qcount = vsi->tc_config.tc_info[n].qcount; 2516 qcount = vsi->tc_config.tc_info[n].qcount;
2428 for (i = qoffset; i < (qoffset + qcount); i++) { 2517 for (i = qoffset; i < (qoffset + qcount); i++) {
2429 struct i40e_ring *rx_ring = vsi->rx_rings[i]; 2518 rx_ring = vsi->rx_rings[i];
2430 struct i40e_ring *tx_ring = vsi->tx_rings[i]; 2519 tx_ring = vsi->tx_rings[i];
2431 rx_ring->dcb_tc = n; 2520 rx_ring->dcb_tc = n;
2432 tx_ring->dcb_tc = n; 2521 tx_ring->dcb_tc = n;
2433 } 2522 }
@@ -2565,7 +2654,6 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2565 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 2654 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2566 I40E_PFINT_ICR0_ENA_GPIO_MASK | 2655 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2567 I40E_PFINT_ICR0_ENA_TIMESYNC_MASK | 2656 I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
2568 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
2569 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 2657 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2570 I40E_PFINT_ICR0_ENA_VFLR_MASK | 2658 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2571 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2659 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
@@ -2733,6 +2821,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2733 &q_vector->affinity_mask); 2821 &q_vector->affinity_mask);
2734 } 2822 }
2735 2823
2824 vsi->irqs_ready = true;
2736 return 0; 2825 return 0;
2737 2826
2738free_queue_irqs: 2827free_queue_irqs:
@@ -3152,6 +3241,12 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3152 3241
3153 pf_q = vsi->base_queue; 3242 pf_q = vsi->base_queue;
3154 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3243 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3244
3245 /* warn the TX unit of coming changes */
3246 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3247 if (!enable)
3248 udelay(10);
3249
3155 for (j = 0; j < 50; j++) { 3250 for (j = 0; j < 50; j++) {
3156 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3251 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3157 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == 3252 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
@@ -3160,9 +3255,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3160 usleep_range(1000, 2000); 3255 usleep_range(1000, 2000);
3161 } 3256 }
3162 /* Skip if the queue is already in the requested state */ 3257 /* Skip if the queue is already in the requested state */
3163 if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3258 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3164 continue;
3165 if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3166 continue; 3259 continue;
3167 3260
3168 /* turn on/off the queue */ 3261 /* turn on/off the queue */
@@ -3178,13 +3271,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3178 /* wait for the change to finish */ 3271 /* wait for the change to finish */
3179 for (j = 0; j < 10; j++) { 3272 for (j = 0; j < 10; j++) {
3180 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3273 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3181 if (enable) { 3274 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3182 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3275 break;
3183 break;
3184 } else {
3185 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3186 break;
3187 }
3188 3276
3189 udelay(10); 3277 udelay(10);
3190 } 3278 }
@@ -3223,15 +3311,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3223 usleep_range(1000, 2000); 3311 usleep_range(1000, 2000);
3224 } 3312 }
3225 3313
3226 if (enable) { 3314 /* Skip if the queue is already in the requested state */
3227 /* is STAT set ? */ 3315 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3228 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3316 continue;
3229 continue;
3230 } else {
3231 /* is !STAT set ? */
3232 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3233 continue;
3234 }
3235 3317
3236 /* turn on/off the queue */ 3318 /* turn on/off the queue */
3237 if (enable) 3319 if (enable)
@@ -3244,13 +3326,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3244 for (j = 0; j < 10; j++) { 3326 for (j = 0; j < 10; j++) {
3245 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3327 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3246 3328
3247 if (enable) { 3329 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3248 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3330 break;
3249 break;
3250 } else {
3251 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3252 break;
3253 }
3254 3331
3255 udelay(10); 3332 udelay(10);
3256 } 3333 }
@@ -3304,6 +3381,10 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3304 if (!vsi->q_vectors) 3381 if (!vsi->q_vectors)
3305 return; 3382 return;
3306 3383
3384 if (!vsi->irqs_ready)
3385 return;
3386
3387 vsi->irqs_ready = false;
3307 for (i = 0; i < vsi->num_q_vectors; i++) { 3388 for (i = 0; i < vsi->num_q_vectors; i++) {
3308 u16 vector = i + base; 3389 u16 vector = i + base;
3309 3390
@@ -3476,7 +3557,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3476 int i; 3557 int i;
3477 3558
3478 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3559 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3479 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 3560 for (i = 0; i < pf->num_alloc_vsi; i++)
3480 if (pf->vsi[i]) 3561 if (pf->vsi[i])
3481 i40e_vsi_free_q_vectors(pf->vsi[i]); 3562 i40e_vsi_free_q_vectors(pf->vsi[i]);
3482 i40e_reset_interrupt_capability(pf); 3563 i40e_reset_interrupt_capability(pf);
@@ -3513,6 +3594,19 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3513} 3594}
3514 3595
3515/** 3596/**
3597 * i40e_vsi_close - Shut down a VSI
3598 * @vsi: the vsi to be quelled
3599 **/
3600static void i40e_vsi_close(struct i40e_vsi *vsi)
3601{
3602 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3603 i40e_down(vsi);
3604 i40e_vsi_free_irq(vsi);
3605 i40e_vsi_free_tx_resources(vsi);
3606 i40e_vsi_free_rx_resources(vsi);
3607}
3608
3609/**
3516 * i40e_quiesce_vsi - Pause a given VSI 3610 * i40e_quiesce_vsi - Pause a given VSI
3517 * @vsi: the VSI being paused 3611 * @vsi: the VSI being paused
3518 **/ 3612 **/
@@ -3525,8 +3619,7 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3525 if (vsi->netdev && netif_running(vsi->netdev)) { 3619 if (vsi->netdev && netif_running(vsi->netdev)) {
3526 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3620 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3527 } else { 3621 } else {
3528 set_bit(__I40E_DOWN, &vsi->state); 3622 i40e_vsi_close(vsi);
3529 i40e_down(vsi);
3530 } 3623 }
3531} 3624}
3532 3625
@@ -3543,7 +3636,7 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3543 if (vsi->netdev && netif_running(vsi->netdev)) 3636 if (vsi->netdev && netif_running(vsi->netdev))
3544 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3637 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3545 else 3638 else
3546 i40e_up(vsi); /* this clears the DOWN bit */ 3639 i40e_vsi_open(vsi); /* this clears the DOWN bit */
3547} 3640}
3548 3641
3549/** 3642/**
@@ -3554,7 +3647,7 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3554{ 3647{
3555 int v; 3648 int v;
3556 3649
3557 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3650 for (v = 0; v < pf->num_alloc_vsi; v++) {
3558 if (pf->vsi[v]) 3651 if (pf->vsi[v])
3559 i40e_quiesce_vsi(pf->vsi[v]); 3652 i40e_quiesce_vsi(pf->vsi[v]);
3560 } 3653 }
@@ -3568,7 +3661,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3568{ 3661{
3569 int v; 3662 int v;
3570 3663
3571 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3664 for (v = 0; v < pf->num_alloc_vsi; v++) {
3572 if (pf->vsi[v]) 3665 if (pf->vsi[v])
3573 i40e_unquiesce_vsi(pf->vsi[v]); 3666 i40e_unquiesce_vsi(pf->vsi[v]);
3574 } 3667 }
@@ -4009,7 +4102,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4009 } 4102 }
4010 4103
4011 /* Update each VSI */ 4104 /* Update each VSI */
4012 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4105 for (v = 0; v < pf->num_alloc_vsi; v++) {
4013 if (!pf->vsi[v]) 4106 if (!pf->vsi[v])
4014 continue; 4107 continue;
4015 4108
@@ -4028,6 +4121,8 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4028 pf->vsi[v]->seid); 4121 pf->vsi[v]->seid);
4029 /* Will try to configure as many components */ 4122 /* Will try to configure as many components */
4030 } else { 4123 } else {
4124 /* Re-configure VSI vectors based on updated TC map */
4125 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4031 if (pf->vsi[v]->netdev) 4126 if (pf->vsi[v]->netdev)
4032 i40e_dcbnl_set_all(pf->vsi[v]); 4127 i40e_dcbnl_set_all(pf->vsi[v]);
4033 } 4128 }
@@ -4065,14 +4160,69 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
4065 /* When status is not DISABLED then DCBX in FW */ 4160 /* When status is not DISABLED then DCBX in FW */
4066 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 4161 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4067 DCB_CAP_DCBX_VER_IEEE; 4162 DCB_CAP_DCBX_VER_IEEE;
4068 pf->flags |= I40E_FLAG_DCB_ENABLED; 4163
4164 pf->flags |= I40E_FLAG_DCB_CAPABLE;
4165 /* Enable DCB tagging only when more than one TC */
4166 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4167 pf->flags |= I40E_FLAG_DCB_ENABLED;
4069 } 4168 }
4169 } else {
4170 dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
4171 pf->hw.aq.asq_last_status);
4070 } 4172 }
4071 4173
4072out: 4174out:
4073 return err; 4175 return err;
4074} 4176}
4075#endif /* CONFIG_I40E_DCB */ 4177#endif /* CONFIG_I40E_DCB */
4178#define SPEED_SIZE 14
4179#define FC_SIZE 8
4180/**
4181 * i40e_print_link_message - print link up or down
4182 * @vsi: the VSI for which link needs a message
4183 */
4184static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4185{
4186 char speed[SPEED_SIZE] = "Unknown";
4187 char fc[FC_SIZE] = "RX/TX";
4188
4189 if (!isup) {
4190 netdev_info(vsi->netdev, "NIC Link is Down\n");
4191 return;
4192 }
4193
4194 switch (vsi->back->hw.phy.link_info.link_speed) {
4195 case I40E_LINK_SPEED_40GB:
4196 strncpy(speed, "40 Gbps", SPEED_SIZE);
4197 break;
4198 case I40E_LINK_SPEED_10GB:
4199 strncpy(speed, "10 Gbps", SPEED_SIZE);
4200 break;
4201 case I40E_LINK_SPEED_1GB:
4202 strncpy(speed, "1000 Mbps", SPEED_SIZE);
4203 break;
4204 default:
4205 break;
4206 }
4207
4208 switch (vsi->back->hw.fc.current_mode) {
4209 case I40E_FC_FULL:
4210 strncpy(fc, "RX/TX", FC_SIZE);
4211 break;
4212 case I40E_FC_TX_PAUSE:
4213 strncpy(fc, "TX", FC_SIZE);
4214 break;
4215 case I40E_FC_RX_PAUSE:
4216 strncpy(fc, "RX", FC_SIZE);
4217 break;
4218 default:
4219 strncpy(fc, "None", FC_SIZE);
4220 break;
4221 }
4222
4223 netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
4224 speed, fc);
4225}
4076 4226
4077/** 4227/**
4078 * i40e_up_complete - Finish the last steps of bringing up a connection 4228 * i40e_up_complete - Finish the last steps of bringing up a connection
@@ -4099,11 +4249,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
4099 4249
4100 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 4250 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4101 (vsi->netdev)) { 4251 (vsi->netdev)) {
4102 netdev_info(vsi->netdev, "NIC Link is Up\n"); 4252 i40e_print_link_message(vsi, true);
4103 netif_tx_start_all_queues(vsi->netdev); 4253 netif_tx_start_all_queues(vsi->netdev);
4104 netif_carrier_on(vsi->netdev); 4254 netif_carrier_on(vsi->netdev);
4105 } else if (vsi->netdev) { 4255 } else if (vsi->netdev) {
4106 netdev_info(vsi->netdev, "NIC Link is Down\n"); 4256 i40e_print_link_message(vsi, false);
4107 } 4257 }
4108 4258
4109 /* replay FDIR SB filters */ 4259 /* replay FDIR SB filters */
@@ -4309,24 +4459,32 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
4309 if (err) 4459 if (err)
4310 goto err_setup_rx; 4460 goto err_setup_rx;
4311 4461
4312 if (!vsi->netdev) { 4462 if (vsi->netdev) {
4313 err = EINVAL; 4463 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4314 goto err_setup_rx; 4464 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4315 } 4465 err = i40e_vsi_request_irq(vsi, int_name);
4316 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 4466 if (err)
4317 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 4467 goto err_setup_rx;
4318 err = i40e_vsi_request_irq(vsi, int_name);
4319 if (err)
4320 goto err_setup_rx;
4321 4468
4322 /* Notify the stack of the actual queue counts. */ 4469 /* Notify the stack of the actual queue counts. */
4323 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs); 4470 err = netif_set_real_num_tx_queues(vsi->netdev,
4324 if (err) 4471 vsi->num_queue_pairs);
4325 goto err_set_queues; 4472 if (err)
4473 goto err_set_queues;
4326 4474
4327 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs); 4475 err = netif_set_real_num_rx_queues(vsi->netdev,
4328 if (err) 4476 vsi->num_queue_pairs);
4329 goto err_set_queues; 4477 if (err)
4478 goto err_set_queues;
4479
4480 } else if (vsi->type == I40E_VSI_FDIR) {
4481 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
4482 dev_driver_string(&pf->pdev->dev));
4483 err = i40e_vsi_request_irq(vsi, int_name);
4484 } else {
4485 err = -EINVAL;
4486 goto err_setup_rx;
4487 }
4330 4488
4331 err = i40e_up_complete(vsi); 4489 err = i40e_up_complete(vsi);
4332 if (err) 4490 if (err)
@@ -4383,14 +4541,7 @@ static int i40e_close(struct net_device *netdev)
4383 struct i40e_netdev_priv *np = netdev_priv(netdev); 4541 struct i40e_netdev_priv *np = netdev_priv(netdev);
4384 struct i40e_vsi *vsi = np->vsi; 4542 struct i40e_vsi *vsi = np->vsi;
4385 4543
4386 if (test_and_set_bit(__I40E_DOWN, &vsi->state)) 4544 i40e_vsi_close(vsi);
4387 return 0;
4388
4389 i40e_down(vsi);
4390 i40e_vsi_free_irq(vsi);
4391
4392 i40e_vsi_free_tx_resources(vsi);
4393 i40e_vsi_free_rx_resources(vsi);
4394 4545
4395 return 0; 4546 return 0;
4396} 4547}
@@ -4410,6 +4561,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4410 4561
4411 WARN_ON(in_interrupt()); 4562 WARN_ON(in_interrupt());
4412 4563
4564 if (i40e_check_asq_alive(&pf->hw))
4565 i40e_vc_notify_reset(pf);
4566
4413 /* do the biggest reset indicated */ 4567 /* do the biggest reset indicated */
4414 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { 4568 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
4415 4569
@@ -4475,7 +4629,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4475 /* Find the VSI(s) that requested a re-init */ 4629 /* Find the VSI(s) that requested a re-init */
4476 dev_info(&pf->pdev->dev, 4630 dev_info(&pf->pdev->dev,
4477 "VSI reinit requested\n"); 4631 "VSI reinit requested\n");
4478 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4632 for (v = 0; v < pf->num_alloc_vsi; v++) {
4479 struct i40e_vsi *vsi = pf->vsi[v]; 4633 struct i40e_vsi *vsi = pf->vsi[v];
4480 if (vsi != NULL && 4634 if (vsi != NULL &&
4481 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 4635 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
@@ -4565,6 +4719,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
4565 int ret = 0; 4719 int ret = 0;
4566 u8 type; 4720 u8 type;
4567 4721
4722 /* Not DCB capable or capability disabled */
4723 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
4724 return ret;
4725
4568 /* Ignore if event is not for Nearest Bridge */ 4726 /* Ignore if event is not for Nearest Bridge */
4569 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 4727 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
4570 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 4728 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
@@ -4606,6 +4764,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
4606 if (!need_reconfig) 4764 if (!need_reconfig)
4607 goto exit; 4765 goto exit;
4608 4766
4767 /* Enable DCB tagging only when more than one TC */
4768 if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
4769 pf->flags |= I40E_FLAG_DCB_ENABLED;
4770 else
4771 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
4772
4609 /* Reconfiguration needed quiesce all VSIs */ 4773 /* Reconfiguration needed quiesce all VSIs */
4610 i40e_pf_quiesce_all_vsi(pf); 4774 i40e_pf_quiesce_all_vsi(pf);
4611 4775
@@ -4709,8 +4873,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
4709 (pf->flags & I40E_FLAG_FD_SB_ENABLED)) 4873 (pf->flags & I40E_FLAG_FD_SB_ENABLED))
4710 return; 4874 return;
4711 fcnt_prog = i40e_get_current_fd_count(pf); 4875 fcnt_prog = i40e_get_current_fd_count(pf);
4712 fcnt_avail = pf->hw.fdir_shared_filter_count + 4876 fcnt_avail = i40e_get_fd_cnt_all(pf);
4713 pf->fdir_pf_filter_count;
4714 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) { 4877 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
4715 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 4878 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
4716 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 4879 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
@@ -4803,7 +4966,7 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4803 i40e_veb_link_event(pf->veb[i], link_up); 4966 i40e_veb_link_event(pf->veb[i], link_up);
4804 4967
4805 /* ... now the local VSIs */ 4968 /* ... now the local VSIs */
4806 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 4969 for (i = 0; i < pf->num_alloc_vsi; i++)
4807 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 4970 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4808 i40e_vsi_link_event(pf->vsi[i], link_up); 4971 i40e_vsi_link_event(pf->vsi[i], link_up);
4809} 4972}
@@ -4821,10 +4984,8 @@ static void i40e_link_event(struct i40e_pf *pf)
4821 4984
4822 if (new_link == old_link) 4985 if (new_link == old_link)
4823 return; 4986 return;
4824
4825 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) 4987 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
4826 netdev_info(pf->vsi[pf->lan_vsi]->netdev, 4988 i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link);
4827 "NIC Link is %s\n", (new_link ? "Up" : "Down"));
4828 4989
4829 /* Notify the base of the switch tree connected to 4990 /* Notify the base of the switch tree connected to
4830 * the link. Floating VEBs are not notified. 4991 * the link. Floating VEBs are not notified.
@@ -4862,7 +5023,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
4862 * for each q_vector 5023 * for each q_vector
4863 * force an interrupt 5024 * force an interrupt
4864 */ 5025 */
4865 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5026 for (v = 0; v < pf->num_alloc_vsi; v++) {
4866 struct i40e_vsi *vsi = pf->vsi[v]; 5027 struct i40e_vsi *vsi = pf->vsi[v];
4867 int armed = 0; 5028 int armed = 0;
4868 5029
@@ -4912,7 +5073,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
4912 /* Update the stats for active netdevs so the network stack 5073 /* Update the stats for active netdevs so the network stack
4913 * can look at updated numbers whenever it cares to 5074 * can look at updated numbers whenever it cares to
4914 */ 5075 */
4915 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 5076 for (i = 0; i < pf->num_alloc_vsi; i++)
4916 if (pf->vsi[i] && pf->vsi[i]->netdev) 5077 if (pf->vsi[i] && pf->vsi[i]->netdev)
4917 i40e_update_stats(pf->vsi[i]); 5078 i40e_update_stats(pf->vsi[i]);
4918 5079
@@ -5018,11 +5179,47 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5018 u16 pending, i = 0; 5179 u16 pending, i = 0;
5019 i40e_status ret; 5180 i40e_status ret;
5020 u16 opcode; 5181 u16 opcode;
5182 u32 oldval;
5021 u32 val; 5183 u32 val;
5022 5184
5023 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)) 5185 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
5024 return; 5186 return;
5025 5187
5188 /* check for error indications */
5189 val = rd32(&pf->hw, pf->hw.aq.arq.len);
5190 oldval = val;
5191 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
5192 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
5193 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
5194 }
5195 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
5196 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
5197 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
5198 }
5199 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
5200 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
5201 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
5202 }
5203 if (oldval != val)
5204 wr32(&pf->hw, pf->hw.aq.arq.len, val);
5205
5206 val = rd32(&pf->hw, pf->hw.aq.asq.len);
5207 oldval = val;
5208 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
5209 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
5210 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
5211 }
5212 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
5213 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
5214 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
5215 }
5216 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
5217 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
5218 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
5219 }
5220 if (oldval != val)
5221 wr32(&pf->hw, pf->hw.aq.asq.len, val);
5222
5026 event.msg_size = I40E_MAX_AQ_BUF_SIZE; 5223 event.msg_size = I40E_MAX_AQ_BUF_SIZE;
5027 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 5224 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
5028 if (!event.msg_buf) 5225 if (!event.msg_buf)
@@ -5128,7 +5325,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
5128 int ret; 5325 int ret;
5129 5326
5130 /* build VSI that owns this VEB, temporarily attached to base VEB */ 5327 /* build VSI that owns this VEB, temporarily attached to base VEB */
5131 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) { 5328 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
5132 if (pf->vsi[v] && 5329 if (pf->vsi[v] &&
5133 pf->vsi[v]->veb_idx == veb->idx && 5330 pf->vsi[v]->veb_idx == veb->idx &&
5134 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 5331 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
@@ -5158,7 +5355,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
5158 goto end_reconstitute; 5355 goto end_reconstitute;
5159 5356
5160 /* create the remaining VSIs attached to this VEB */ 5357 /* create the remaining VSIs attached to this VEB */
5161 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5358 for (v = 0; v < pf->num_alloc_vsi; v++) {
5162 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 5359 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
5163 continue; 5360 continue;
5164 5361
@@ -5226,9 +5423,6 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
5226 } 5423 }
5227 } while (err); 5424 } while (err);
5228 5425
5229 /* increment MSI-X count because current FW skips one */
5230 pf->hw.func_caps.num_msix_vectors++;
5231
5232 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || 5426 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
5233 (pf->hw.aq.fw_maj_ver < 2)) { 5427 (pf->hw.aq.fw_maj_ver < 2)) {
5234 pf->hw.func_caps.num_msix_vectors++; 5428 pf->hw.func_caps.num_msix_vectors++;
@@ -5267,15 +5461,14 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
5267static void i40e_fdir_sb_setup(struct i40e_pf *pf) 5461static void i40e_fdir_sb_setup(struct i40e_pf *pf)
5268{ 5462{
5269 struct i40e_vsi *vsi; 5463 struct i40e_vsi *vsi;
5270 bool new_vsi = false; 5464 int i;
5271 int err, i;
5272 5465
5273 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 5466 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
5274 return; 5467 return;
5275 5468
5276 /* find existing VSI and see if it needs configuring */ 5469 /* find existing VSI and see if it needs configuring */
5277 vsi = NULL; 5470 vsi = NULL;
5278 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 5471 for (i = 0; i < pf->num_alloc_vsi; i++) {
5279 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5472 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5280 vsi = pf->vsi[i]; 5473 vsi = pf->vsi[i];
5281 break; 5474 break;
@@ -5288,47 +5481,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
5288 pf->vsi[pf->lan_vsi]->seid, 0); 5481 pf->vsi[pf->lan_vsi]->seid, 0);
5289 if (!vsi) { 5482 if (!vsi) {
5290 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 5483 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
5291 goto err_vsi; 5484 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
5485 return;
5292 } 5486 }
5293 new_vsi = true;
5294 }
5295 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
5296
5297 err = i40e_vsi_setup_tx_resources(vsi);
5298 if (err)
5299 goto err_setup_tx;
5300 err = i40e_vsi_setup_rx_resources(vsi);
5301 if (err)
5302 goto err_setup_rx;
5303
5304 if (new_vsi) {
5305 char int_name[IFNAMSIZ + 9];
5306 err = i40e_vsi_configure(vsi);
5307 if (err)
5308 goto err_setup_rx;
5309 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
5310 dev_driver_string(&pf->pdev->dev));
5311 err = i40e_vsi_request_irq(vsi, int_name);
5312 if (err)
5313 goto err_setup_rx;
5314 err = i40e_up_complete(vsi);
5315 if (err)
5316 goto err_up_complete;
5317 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
5318 } 5487 }
5319 5488
5320 return; 5489 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
5321
5322err_up_complete:
5323 i40e_down(vsi);
5324 i40e_vsi_free_irq(vsi);
5325err_setup_rx:
5326 i40e_vsi_free_rx_resources(vsi);
5327err_setup_tx:
5328 i40e_vsi_free_tx_resources(vsi);
5329err_vsi:
5330 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
5331 i40e_vsi_clear(vsi);
5332} 5490}
5333 5491
5334/** 5492/**
@@ -5340,7 +5498,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
5340 int i; 5498 int i;
5341 5499
5342 i40e_fdir_filter_exit(pf); 5500 i40e_fdir_filter_exit(pf);
5343 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 5501 for (i = 0; i < pf->num_alloc_vsi; i++) {
5344 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5502 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5345 i40e_vsi_release(pf->vsi[i]); 5503 i40e_vsi_release(pf->vsi[i]);
5346 break; 5504 break;
@@ -5357,7 +5515,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
5357static int i40e_prep_for_reset(struct i40e_pf *pf) 5515static int i40e_prep_for_reset(struct i40e_pf *pf)
5358{ 5516{
5359 struct i40e_hw *hw = &pf->hw; 5517 struct i40e_hw *hw = &pf->hw;
5360 i40e_status ret; 5518 i40e_status ret = 0;
5361 u32 v; 5519 u32 v;
5362 5520
5363 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 5521 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
@@ -5366,13 +5524,10 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
5366 5524
5367 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 5525 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
5368 5526
5369 if (i40e_check_asq_alive(hw))
5370 i40e_vc_notify_reset(pf);
5371
5372 /* quiesce the VSIs and their queues that are not already DOWN */ 5527 /* quiesce the VSIs and their queues that are not already DOWN */
5373 i40e_pf_quiesce_all_vsi(pf); 5528 i40e_pf_quiesce_all_vsi(pf);
5374 5529
5375 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5530 for (v = 0; v < pf->num_alloc_vsi; v++) {
5376 if (pf->vsi[v]) 5531 if (pf->vsi[v])
5377 pf->vsi[v]->seid = 0; 5532 pf->vsi[v]->seid = 0;
5378 } 5533 }
@@ -5380,22 +5535,40 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
5380 i40e_shutdown_adminq(&pf->hw); 5535 i40e_shutdown_adminq(&pf->hw);
5381 5536
5382 /* call shutdown HMC */ 5537 /* call shutdown HMC */
5383 ret = i40e_shutdown_lan_hmc(hw); 5538 if (hw->hmc.hmc_obj) {
5384 if (ret) { 5539 ret = i40e_shutdown_lan_hmc(hw);
5385 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret); 5540 if (ret) {
5386 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 5541 dev_warn(&pf->pdev->dev,
5542 "shutdown_lan_hmc failed: %d\n", ret);
5543 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
5544 }
5387 } 5545 }
5388 return ret; 5546 return ret;
5389} 5547}
5390 5548
5391/** 5549/**
5550 * i40e_send_version - update firmware with driver version
5551 * @pf: PF struct
5552 */
5553static void i40e_send_version(struct i40e_pf *pf)
5554{
5555 struct i40e_driver_version dv;
5556
5557 dv.major_version = DRV_VERSION_MAJOR;
5558 dv.minor_version = DRV_VERSION_MINOR;
5559 dv.build_version = DRV_VERSION_BUILD;
5560 dv.subbuild_version = 0;
5561 strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
5562 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
5563}
5564
5565/**
5392 * i40e_reset_and_rebuild - reset and rebuild using a saved config 5566 * i40e_reset_and_rebuild - reset and rebuild using a saved config
5393 * @pf: board private structure 5567 * @pf: board private structure
5394 * @reinit: if the Main VSI needs to re-initialized. 5568 * @reinit: if the Main VSI needs to re-initialized.
5395 **/ 5569 **/
5396static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) 5570static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5397{ 5571{
5398 struct i40e_driver_version dv;
5399 struct i40e_hw *hw = &pf->hw; 5572 struct i40e_hw *hw = &pf->hw;
5400 i40e_status ret; 5573 i40e_status ret;
5401 u32 v; 5574 u32 v;
@@ -5405,8 +5578,10 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5405 * because the reset will make them disappear. 5578 * because the reset will make them disappear.
5406 */ 5579 */
5407 ret = i40e_pf_reset(hw); 5580 ret = i40e_pf_reset(hw);
5408 if (ret) 5581 if (ret) {
5409 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 5582 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
5583 goto end_core_reset;
5584 }
5410 pf->pfr_count++; 5585 pf->pfr_count++;
5411 5586
5412 if (test_bit(__I40E_DOWN, &pf->state)) 5587 if (test_bit(__I40E_DOWN, &pf->state))
@@ -5426,6 +5601,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5426 i40e_verify_eeprom(pf); 5601 i40e_verify_eeprom(pf);
5427 } 5602 }
5428 5603
5604 i40e_clear_pxe_mode(hw);
5429 ret = i40e_get_capabilities(pf); 5605 ret = i40e_get_capabilities(pf);
5430 if (ret) { 5606 if (ret) {
5431 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", 5607 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
@@ -5526,13 +5702,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5526 } 5702 }
5527 5703
5528 /* tell the firmware that we're starting */ 5704 /* tell the firmware that we're starting */
5529 dv.major_version = DRV_VERSION_MAJOR; 5705 i40e_send_version(pf);
5530 dv.minor_version = DRV_VERSION_MINOR;
5531 dv.build_version = DRV_VERSION_BUILD;
5532 dv.subbuild_version = 0;
5533 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
5534
5535 dev_info(&pf->pdev->dev, "reset complete\n");
5536 5706
5537end_core_reset: 5707end_core_reset:
5538 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 5708 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
@@ -5642,7 +5812,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
5642 **/ 5812 **/
5643static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) 5813static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
5644{ 5814{
5645 const int vxlan_hdr_qwords = 4;
5646 struct i40e_hw *hw = &pf->hw; 5815 struct i40e_hw *hw = &pf->hw;
5647 i40e_status ret; 5816 i40e_status ret;
5648 u8 filter_index; 5817 u8 filter_index;
@@ -5660,7 +5829,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
5660 port = pf->vxlan_ports[i]; 5829 port = pf->vxlan_ports[i];
5661 ret = port ? 5830 ret = port ?
5662 i40e_aq_add_udp_tunnel(hw, ntohs(port), 5831 i40e_aq_add_udp_tunnel(hw, ntohs(port),
5663 vxlan_hdr_qwords,
5664 I40E_AQC_TUNNEL_TYPE_VXLAN, 5832 I40E_AQC_TUNNEL_TYPE_VXLAN,
5665 &filter_index, NULL) 5833 &filter_index, NULL)
5666 : i40e_aq_del_udp_tunnel(hw, i, NULL); 5834 : i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -5839,15 +6007,15 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
5839 * find next empty vsi slot, looping back around if necessary 6007 * find next empty vsi slot, looping back around if necessary
5840 */ 6008 */
5841 i = pf->next_vsi; 6009 i = pf->next_vsi;
5842 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i]) 6010 while (i < pf->num_alloc_vsi && pf->vsi[i])
5843 i++; 6011 i++;
5844 if (i >= pf->hw.func_caps.num_vsis) { 6012 if (i >= pf->num_alloc_vsi) {
5845 i = 0; 6013 i = 0;
5846 while (i < pf->next_vsi && pf->vsi[i]) 6014 while (i < pf->next_vsi && pf->vsi[i])
5847 i++; 6015 i++;
5848 } 6016 }
5849 6017
5850 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) { 6018 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
5851 vsi_idx = i; /* Found one! */ 6019 vsi_idx = i; /* Found one! */
5852 } else { 6020 } else {
5853 ret = -ENODEV; 6021 ret = -ENODEV;
@@ -5870,6 +6038,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
5870 vsi->netdev_registered = false; 6038 vsi->netdev_registered = false;
5871 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 6039 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
5872 INIT_LIST_HEAD(&vsi->mac_filter_list); 6040 INIT_LIST_HEAD(&vsi->mac_filter_list);
6041 vsi->irqs_ready = false;
5873 6042
5874 ret = i40e_set_num_rings_in_vsi(vsi); 6043 ret = i40e_set_num_rings_in_vsi(vsi);
5875 if (ret) 6044 if (ret)
@@ -5987,14 +6156,12 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5987 **/ 6156 **/
5988static int i40e_alloc_rings(struct i40e_vsi *vsi) 6157static int i40e_alloc_rings(struct i40e_vsi *vsi)
5989{ 6158{
6159 struct i40e_ring *tx_ring, *rx_ring;
5990 struct i40e_pf *pf = vsi->back; 6160 struct i40e_pf *pf = vsi->back;
5991 int i; 6161 int i;
5992 6162
5993 /* Set basic values in the rings to be used later during open() */ 6163 /* Set basic values in the rings to be used later during open() */
5994 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 6164 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5995 struct i40e_ring *tx_ring;
5996 struct i40e_ring *rx_ring;
5997
5998 /* allocate space for both Tx and Rx in one shot */ 6165 /* allocate space for both Tx and Rx in one shot */
5999 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 6166 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
6000 if (!tx_ring) 6167 if (!tx_ring)
@@ -6052,8 +6219,6 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
6052 vectors = 0; 6219 vectors = 0;
6053 } 6220 }
6054 6221
6055 pf->num_msix_entries = vectors;
6056
6057 return vectors; 6222 return vectors;
6058} 6223}
6059 6224
@@ -6107,6 +6272,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
6107 for (i = 0; i < v_budget; i++) 6272 for (i = 0; i < v_budget; i++)
6108 pf->msix_entries[i].entry = i; 6273 pf->msix_entries[i].entry = i;
6109 vec = i40e_reserve_msix_vectors(pf, v_budget); 6274 vec = i40e_reserve_msix_vectors(pf, v_budget);
6275
6276 if (vec != v_budget) {
6277 /* If we have limited resources, we will start with no vectors
6278 * for the special features and then allocate vectors to some
6279 * of these features based on the policy and at the end disable
6280 * the features that did not get any vectors.
6281 */
6282 pf->num_vmdq_msix = 0;
6283 }
6284
6110 if (vec < I40E_MIN_MSIX) { 6285 if (vec < I40E_MIN_MSIX) {
6111 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 6286 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
6112 kfree(pf->msix_entries); 6287 kfree(pf->msix_entries);
@@ -6115,27 +6290,25 @@ static int i40e_init_msix(struct i40e_pf *pf)
6115 6290
6116 } else if (vec == I40E_MIN_MSIX) { 6291 } else if (vec == I40E_MIN_MSIX) {
6117 /* Adjust for minimal MSIX use */ 6292 /* Adjust for minimal MSIX use */
6118 dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
6119 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
6120 pf->num_vmdq_vsis = 0; 6293 pf->num_vmdq_vsis = 0;
6121 pf->num_vmdq_qps = 0; 6294 pf->num_vmdq_qps = 0;
6122 pf->num_vmdq_msix = 0;
6123 pf->num_lan_qps = 1; 6295 pf->num_lan_qps = 1;
6124 pf->num_lan_msix = 1; 6296 pf->num_lan_msix = 1;
6125 6297
6126 } else if (vec != v_budget) { 6298 } else if (vec != v_budget) {
6299 /* reserve the misc vector */
6300 vec--;
6301
6127 /* Scale vector usage down */ 6302 /* Scale vector usage down */
6128 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 6303 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
6129 vec--; /* reserve the misc vector */ 6304 pf->num_vmdq_vsis = 1;
6130 6305
6131 /* partition out the remaining vectors */ 6306 /* partition out the remaining vectors */
6132 switch (vec) { 6307 switch (vec) {
6133 case 2: 6308 case 2:
6134 pf->num_vmdq_vsis = 1;
6135 pf->num_lan_msix = 1; 6309 pf->num_lan_msix = 1;
6136 break; 6310 break;
6137 case 3: 6311 case 3:
6138 pf->num_vmdq_vsis = 1;
6139 pf->num_lan_msix = 2; 6312 pf->num_lan_msix = 2;
6140 break; 6313 break;
6141 default: 6314 default:
@@ -6147,6 +6320,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
6147 } 6320 }
6148 } 6321 }
6149 6322
6323 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
6324 (pf->num_vmdq_msix == 0)) {
6325 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
6326 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
6327 }
6150 return err; 6328 return err;
6151} 6329}
6152 6330
@@ -6171,7 +6349,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
6171 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 6349 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
6172 if (vsi->netdev) 6350 if (vsi->netdev)
6173 netif_napi_add(vsi->netdev, &q_vector->napi, 6351 netif_napi_add(vsi->netdev, &q_vector->napi,
6174 i40e_napi_poll, vsi->work_limit); 6352 i40e_napi_poll, NAPI_POLL_WEIGHT);
6175 6353
6176 q_vector->rx.latency_range = I40E_LOW_LATENCY; 6354 q_vector->rx.latency_range = I40E_LOW_LATENCY;
6177 q_vector->tx.latency_range = I40E_LOW_LATENCY; 6355 q_vector->tx.latency_range = I40E_LOW_LATENCY;
@@ -6231,7 +6409,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
6231 if (err) { 6409 if (err) {
6232 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 6410 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
6233 I40E_FLAG_RSS_ENABLED | 6411 I40E_FLAG_RSS_ENABLED |
6234 I40E_FLAG_DCB_ENABLED | 6412 I40E_FLAG_DCB_CAPABLE |
6235 I40E_FLAG_SRIOV_ENABLED | 6413 I40E_FLAG_SRIOV_ENABLED |
6236 I40E_FLAG_FD_SB_ENABLED | 6414 I40E_FLAG_FD_SB_ENABLED |
6237 I40E_FLAG_FD_ATR_ENABLED | 6415 I40E_FLAG_FD_ATR_ENABLED |
@@ -6364,7 +6542,6 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
6364 return 0; 6542 return 0;
6365 6543
6366 queue_count = min_t(int, queue_count, pf->rss_size_max); 6544 queue_count = min_t(int, queue_count, pf->rss_size_max);
6367 queue_count = rounddown_pow_of_two(queue_count);
6368 6545
6369 if (queue_count != pf->rss_size) { 6546 if (queue_count != pf->rss_size) {
6370 i40e_prep_for_reset(pf); 6547 i40e_prep_for_reset(pf);
@@ -6407,6 +6584,10 @@ static int i40e_sw_init(struct i40e_pf *pf)
6407 I40E_FLAG_MSIX_ENABLED | 6584 I40E_FLAG_MSIX_ENABLED |
6408 I40E_FLAG_RX_1BUF_ENABLED; 6585 I40E_FLAG_RX_1BUF_ENABLED;
6409 6586
6587 /* Set default ITR */
6588 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
6589 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
6590
6410 /* Depending on PF configurations, it is possible that the RSS 6591 /* Depending on PF configurations, it is possible that the RSS
6411 * maximum might end up larger than the available queues 6592 * maximum might end up larger than the available queues
6412 */ 6593 */
@@ -6416,7 +6597,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
6416 if (pf->hw.func_caps.rss) { 6597 if (pf->hw.func_caps.rss) {
6417 pf->flags |= I40E_FLAG_RSS_ENABLED; 6598 pf->flags |= I40E_FLAG_RSS_ENABLED;
6418 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); 6599 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
6419 pf->rss_size = rounddown_pow_of_two(pf->rss_size);
6420 } else { 6600 } else {
6421 pf->rss_size = 1; 6601 pf->rss_size = 1;
6422 } 6602 }
@@ -6432,8 +6612,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
6432 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 6612 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
6433 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 6613 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6434 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 6614 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
6615 /* Setup a counter for fd_atr per pf */
6616 pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
6435 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { 6617 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
6436 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 6618 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
6619 /* Setup a counter for fd_sb per pf */
6620 pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
6437 } else { 6621 } else {
6438 dev_info(&pf->pdev->dev, 6622 dev_info(&pf->pdev->dev,
6439 "Flow Director Sideband mode Disabled in MFP mode\n"); 6623 "Flow Director Sideband mode Disabled in MFP mode\n");
@@ -6649,6 +6833,96 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
6649} 6833}
6650 6834
6651#endif 6835#endif
6836#ifdef HAVE_FDB_OPS
6837#ifdef USE_CONST_DEV_UC_CHAR
6838static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
6839 struct net_device *dev,
6840 const unsigned char *addr,
6841 u16 flags)
6842#else
6843static int i40e_ndo_fdb_add(struct ndmsg *ndm,
6844 struct net_device *dev,
6845 unsigned char *addr,
6846 u16 flags)
6847#endif
6848{
6849 struct i40e_netdev_priv *np = netdev_priv(dev);
6850 struct i40e_pf *pf = np->vsi->back;
6851 int err = 0;
6852
6853 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
6854 return -EOPNOTSUPP;
6855
6856 /* Hardware does not support aging addresses so if a
6857 * ndm_state is given only allow permanent addresses
6858 */
6859 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6860 netdev_info(dev, "FDB only supports static addresses\n");
6861 return -EINVAL;
6862 }
6863
6864 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6865 err = dev_uc_add_excl(dev, addr);
6866 else if (is_multicast_ether_addr(addr))
6867 err = dev_mc_add_excl(dev, addr);
6868 else
6869 err = -EINVAL;
6870
6871 /* Only return duplicate errors if NLM_F_EXCL is set */
6872 if (err == -EEXIST && !(flags & NLM_F_EXCL))
6873 err = 0;
6874
6875 return err;
6876}
6877
6878#ifndef USE_DEFAULT_FDB_DEL_DUMP
6879#ifdef USE_CONST_DEV_UC_CHAR
6880static int i40e_ndo_fdb_del(struct ndmsg *ndm,
6881 struct net_device *dev,
6882 const unsigned char *addr)
6883#else
6884static int i40e_ndo_fdb_del(struct ndmsg *ndm,
6885 struct net_device *dev,
6886 unsigned char *addr)
6887#endif
6888{
6889 struct i40e_netdev_priv *np = netdev_priv(dev);
6890 struct i40e_pf *pf = np->vsi->back;
6891 int err = -EOPNOTSUPP;
6892
6893 if (ndm->ndm_state & NUD_PERMANENT) {
6894 netdev_info(dev, "FDB only supports static addresses\n");
6895 return -EINVAL;
6896 }
6897
6898 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
6899 if (is_unicast_ether_addr(addr))
6900 err = dev_uc_del(dev, addr);
6901 else if (is_multicast_ether_addr(addr))
6902 err = dev_mc_del(dev, addr);
6903 else
6904 err = -EINVAL;
6905 }
6906
6907 return err;
6908}
6909
6910static int i40e_ndo_fdb_dump(struct sk_buff *skb,
6911 struct netlink_callback *cb,
6912 struct net_device *dev,
6913 int idx)
6914{
6915 struct i40e_netdev_priv *np = netdev_priv(dev);
6916 struct i40e_pf *pf = np->vsi->back;
6917
6918 if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
6919 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
6920
6921 return idx;
6922}
6923
6924#endif /* USE_DEFAULT_FDB_DEL_DUMP */
6925#endif /* HAVE_FDB_OPS */
6652static const struct net_device_ops i40e_netdev_ops = { 6926static const struct net_device_ops i40e_netdev_ops = {
6653 .ndo_open = i40e_open, 6927 .ndo_open = i40e_open,
6654 .ndo_stop = i40e_close, 6928 .ndo_stop = i40e_close,
@@ -6669,13 +6943,21 @@ static const struct net_device_ops i40e_netdev_ops = {
6669 .ndo_set_features = i40e_set_features, 6943 .ndo_set_features = i40e_set_features,
6670 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 6944 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
6671 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 6945 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
6672 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, 6946 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
6673 .ndo_get_vf_config = i40e_ndo_get_vf_config, 6947 .ndo_get_vf_config = i40e_ndo_get_vf_config,
6674 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 6948 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
6949 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck,
6675#ifdef CONFIG_I40E_VXLAN 6950#ifdef CONFIG_I40E_VXLAN
6676 .ndo_add_vxlan_port = i40e_add_vxlan_port, 6951 .ndo_add_vxlan_port = i40e_add_vxlan_port,
6677 .ndo_del_vxlan_port = i40e_del_vxlan_port, 6952 .ndo_del_vxlan_port = i40e_del_vxlan_port,
6678#endif 6953#endif
6954#ifdef HAVE_FDB_OPS
6955 .ndo_fdb_add = i40e_ndo_fdb_add,
6956#ifndef USE_DEFAULT_FDB_DEL_DUMP
6957 .ndo_fdb_del = i40e_ndo_fdb_del,
6958 .ndo_fdb_dump = i40e_ndo_fdb_dump,
6959#endif
6960#endif
6679}; 6961};
6680 6962
6681/** 6963/**
@@ -6720,16 +7002,26 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6720 NETIF_F_TSO_ECN | 7002 NETIF_F_TSO_ECN |
6721 NETIF_F_TSO6 | 7003 NETIF_F_TSO6 |
6722 NETIF_F_RXCSUM | 7004 NETIF_F_RXCSUM |
6723 NETIF_F_NTUPLE |
6724 NETIF_F_RXHASH | 7005 NETIF_F_RXHASH |
6725 0; 7006 0;
6726 7007
7008 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
7009 netdev->features |= NETIF_F_NTUPLE;
7010
6727 /* copy netdev features into list of user selectable features */ 7011 /* copy netdev features into list of user selectable features */
6728 netdev->hw_features |= netdev->features; 7012 netdev->hw_features |= netdev->features;
6729 7013
6730 if (vsi->type == I40E_VSI_MAIN) { 7014 if (vsi->type == I40E_VSI_MAIN) {
6731 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 7015 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
6732 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); 7016 ether_addr_copy(mac_addr, hw->mac.perm_addr);
7017 /* The following two steps are necessary to prevent reception
7018 * of tagged packets - by default the NVM loads a MAC-VLAN
7019 * filter that will accept any tagged packet. This is to
7020 * prevent that during normal operations until a specific
7021 * VLAN tag filter has been set.
7022 */
7023 i40e_rm_default_mac_filter(vsi, mac_addr);
7024 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
6733 } else { 7025 } else {
6734 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 7026 /* relate the VSI_VMDQ name to the VSI_MAIN name */
6735 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 7027 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -6739,8 +7031,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6739 } 7031 }
6740 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); 7032 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
6741 7033
6742 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); 7034 ether_addr_copy(netdev->dev_addr, mac_addr);
6743 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN); 7035 ether_addr_copy(netdev->perm_addr, mac_addr);
6744 /* vlan gets same features (except vlan offload) 7036 /* vlan gets same features (except vlan offload)
6745 * after any tweaks for specific VSI types 7037 * after any tweaks for specific VSI types
6746 */ 7038 */
@@ -6772,7 +7064,6 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
6772 return; 7064 return;
6773 7065
6774 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 7066 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
6775 return;
6776} 7067}
6777 7068
6778/** 7069/**
@@ -6898,6 +7189,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
6898 7189
6899 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 7190 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
6900 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 7191 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
7192 if (pf->vf[vsi->vf_id].spoofchk) {
7193 ctxt.info.valid_sections |=
7194 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
7195 ctxt.info.sec_flags |=
7196 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
7197 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
7198 }
6901 /* Setup the VSI tx/rx queue map for TC0 only for now */ 7199 /* Setup the VSI tx/rx queue map for TC0 only for now */
6902 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 7200 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6903 break; 7201 break;
@@ -6982,11 +7280,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
6982 unregister_netdev(vsi->netdev); 7280 unregister_netdev(vsi->netdev);
6983 } 7281 }
6984 } else { 7282 } else {
6985 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 7283 i40e_vsi_close(vsi);
6986 i40e_down(vsi);
6987 i40e_vsi_free_irq(vsi);
6988 i40e_vsi_free_tx_resources(vsi);
6989 i40e_vsi_free_rx_resources(vsi);
6990 } 7284 }
6991 i40e_vsi_disable_irq(vsi); 7285 i40e_vsi_disable_irq(vsi);
6992 } 7286 }
@@ -7013,7 +7307,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
7013 * the orphan VEBs yet. We'll wait for an explicit remove request 7307 * the orphan VEBs yet. We'll wait for an explicit remove request
7014 * from up the network stack. 7308 * from up the network stack.
7015 */ 7309 */
7016 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7310 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
7017 if (pf->vsi[i] && 7311 if (pf->vsi[i] &&
7018 pf->vsi[i]->uplink_seid == uplink_seid && 7312 pf->vsi[i]->uplink_seid == uplink_seid &&
7019 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 7313 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
@@ -7192,7 +7486,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
7192 7486
7193 if (!veb && uplink_seid != pf->mac_seid) { 7487 if (!veb && uplink_seid != pf->mac_seid) {
7194 7488
7195 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7489 for (i = 0; i < pf->num_alloc_vsi; i++) {
7196 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 7490 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
7197 vsi = pf->vsi[i]; 7491 vsi = pf->vsi[i];
7198 break; 7492 break;
@@ -7435,7 +7729,7 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
7435 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 7729 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
7436 * the VEB itself, so don't use (*branch) after this loop. 7730 * the VEB itself, so don't use (*branch) after this loop.
7437 */ 7731 */
7438 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7732 for (i = 0; i < pf->num_alloc_vsi; i++) {
7439 if (!pf->vsi[i]) 7733 if (!pf->vsi[i])
7440 continue; 7734 continue;
7441 if (pf->vsi[i]->uplink_seid == branch_seid && 7735 if (pf->vsi[i]->uplink_seid == branch_seid &&
@@ -7487,7 +7781,7 @@ void i40e_veb_release(struct i40e_veb *veb)
7487 pf = veb->pf; 7781 pf = veb->pf;
7488 7782
7489 /* find the remaining VSI and check for extras */ 7783 /* find the remaining VSI and check for extras */
7490 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7784 for (i = 0; i < pf->num_alloc_vsi; i++) {
7491 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 7785 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
7492 n++; 7786 n++;
7493 vsi = pf->vsi[i]; 7787 vsi = pf->vsi[i];
@@ -7516,8 +7810,6 @@ void i40e_veb_release(struct i40e_veb *veb)
7516 7810
7517 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 7811 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
7518 i40e_veb_clear(veb); 7812 i40e_veb_clear(veb);
7519
7520 return;
7521} 7813}
7522 7814
7523/** 7815/**
@@ -7601,10 +7893,10 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
7601 } 7893 }
7602 7894
7603 /* make sure there is such a vsi and uplink */ 7895 /* make sure there is such a vsi and uplink */
7604 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++) 7896 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
7605 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 7897 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
7606 break; 7898 break;
7607 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) { 7899 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
7608 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 7900 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
7609 vsi_seid); 7901 vsi_seid);
7610 return NULL; 7902 return NULL;
@@ -7639,6 +7931,8 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
7639 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 7931 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
7640 if (ret) 7932 if (ret)
7641 goto err_veb; 7933 goto err_veb;
7934 if (vsi_idx == pf->lan_vsi)
7935 pf->lan_veb = veb->idx;
7642 7936
7643 return veb; 7937 return veb;
7644 7938
@@ -7774,15 +8068,6 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
7774 "header: %d reported %d total\n", 8068 "header: %d reported %d total\n",
7775 num_reported, num_total); 8069 num_reported, num_total);
7776 8070
7777 if (num_reported) {
7778 int sz = sizeof(*sw_config) * num_reported;
7779
7780 kfree(pf->sw_config);
7781 pf->sw_config = kzalloc(sz, GFP_KERNEL);
7782 if (pf->sw_config)
7783 memcpy(pf->sw_config, sw_config, sz);
7784 }
7785
7786 for (i = 0; i < num_reported; i++) { 8071 for (i = 0; i < num_reported; i++) {
7787 struct i40e_aqc_switch_config_element_resp *ele = 8072 struct i40e_aqc_switch_config_element_resp *ele =
7788 &sw_config->element[i]; 8073 &sw_config->element[i];
@@ -7949,9 +8234,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
7949 queues_left = pf->hw.func_caps.num_tx_qp; 8234 queues_left = pf->hw.func_caps.num_tx_qp;
7950 8235
7951 if ((queues_left == 1) || 8236 if ((queues_left == 1) ||
7952 !(pf->flags & I40E_FLAG_MSIX_ENABLED) || 8237 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
7953 !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
7954 I40E_FLAG_DCB_ENABLED))) {
7955 /* one qp for PF, no queues for anything else */ 8238 /* one qp for PF, no queues for anything else */
7956 queues_left = 0; 8239 queues_left = 0;
7957 pf->rss_size = pf->num_lan_qps = 1; 8240 pf->rss_size = pf->num_lan_qps = 1;
@@ -7960,14 +8243,27 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
7960 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 8243 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
7961 I40E_FLAG_FD_SB_ENABLED | 8244 I40E_FLAG_FD_SB_ENABLED |
7962 I40E_FLAG_FD_ATR_ENABLED | 8245 I40E_FLAG_FD_ATR_ENABLED |
7963 I40E_FLAG_DCB_ENABLED | 8246 I40E_FLAG_DCB_CAPABLE |
7964 I40E_FLAG_SRIOV_ENABLED | 8247 I40E_FLAG_SRIOV_ENABLED |
7965 I40E_FLAG_VMDQ_ENABLED); 8248 I40E_FLAG_VMDQ_ENABLED);
8249 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
8250 I40E_FLAG_FD_SB_ENABLED |
8251 I40E_FLAG_FD_ATR_ENABLED |
8252 I40E_FLAG_DCB_CAPABLE))) {
8253 /* one qp for PF */
8254 pf->rss_size = pf->num_lan_qps = 1;
8255 queues_left -= pf->num_lan_qps;
8256
8257 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
8258 I40E_FLAG_FD_SB_ENABLED |
8259 I40E_FLAG_FD_ATR_ENABLED |
8260 I40E_FLAG_DCB_ENABLED |
8261 I40E_FLAG_VMDQ_ENABLED);
7966 } else { 8262 } else {
7967 /* Not enough queues for all TCs */ 8263 /* Not enough queues for all TCs */
7968 if ((pf->flags & I40E_FLAG_DCB_ENABLED) && 8264 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
7969 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 8265 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
7970 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 8266 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
7971 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 8267 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
7972 } 8268 }
7973 pf->num_lan_qps = pf->rss_size_max; 8269 pf->num_lan_qps = pf->rss_size_max;
@@ -7998,7 +8294,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
7998 } 8294 }
7999 8295
8000 pf->queues_left = queues_left; 8296 pf->queues_left = queues_left;
8001 return;
8002} 8297}
8003 8298
8004/** 8299/**
@@ -8055,12 +8350,13 @@ static void i40e_print_features(struct i40e_pf *pf)
8055 8350
8056 if (pf->flags & I40E_FLAG_RSS_ENABLED) 8351 if (pf->flags & I40E_FLAG_RSS_ENABLED)
8057 buf += sprintf(buf, "RSS "); 8352 buf += sprintf(buf, "RSS ");
8058 buf += sprintf(buf, "FDir ");
8059 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 8353 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
8060 buf += sprintf(buf, "ATR "); 8354 buf += sprintf(buf, "FD_ATR ");
8061 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) 8355 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8356 buf += sprintf(buf, "FD_SB ");
8062 buf += sprintf(buf, "NTUPLE "); 8357 buf += sprintf(buf, "NTUPLE ");
8063 if (pf->flags & I40E_FLAG_DCB_ENABLED) 8358 }
8359 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
8064 buf += sprintf(buf, "DCB "); 8360 buf += sprintf(buf, "DCB ");
8065 if (pf->flags & I40E_FLAG_PTP) 8361 if (pf->flags & I40E_FLAG_PTP)
8066 buf += sprintf(buf, "PTP "); 8362 buf += sprintf(buf, "PTP ");
@@ -8083,13 +8379,13 @@ static void i40e_print_features(struct i40e_pf *pf)
8083 **/ 8379 **/
8084static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 8380static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8085{ 8381{
8086 struct i40e_driver_version dv;
8087 struct i40e_pf *pf; 8382 struct i40e_pf *pf;
8088 struct i40e_hw *hw; 8383 struct i40e_hw *hw;
8089 static u16 pfs_found; 8384 static u16 pfs_found;
8090 u16 link_status; 8385 u16 link_status;
8091 int err = 0; 8386 int err = 0;
8092 u32 len; 8387 u32 len;
8388 u32 i;
8093 8389
8094 err = pci_enable_device_mem(pdev); 8390 err = pci_enable_device_mem(pdev);
8095 if (err) 8391 if (err)
@@ -8201,6 +8497,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8201 8497
8202 i40e_verify_eeprom(pf); 8498 i40e_verify_eeprom(pf);
8203 8499
8500 /* Rev 0 hardware was never productized */
8501 if (hw->revision_id < 1)
8502 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
8503
8204 i40e_clear_pxe_mode(hw); 8504 i40e_clear_pxe_mode(hw);
8205 err = i40e_get_capabilities(pf); 8505 err = i40e_get_capabilities(pf);
8206 if (err) 8506 if (err)
@@ -8234,7 +8534,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8234 goto err_mac_addr; 8534 goto err_mac_addr;
8235 } 8535 }
8236 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 8536 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
8237 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN); 8537 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
8238 8538
8239 pci_set_drvdata(pdev, pf); 8539 pci_set_drvdata(pdev, pf);
8240 pci_save_state(pdev); 8540 pci_save_state(pdev);
@@ -8242,8 +8542,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8242 err = i40e_init_pf_dcb(pf); 8542 err = i40e_init_pf_dcb(pf);
8243 if (err) { 8543 if (err) {
8244 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err); 8544 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
8245 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 8545 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
8246 goto err_init_dcb; 8546 /* Continue without DCB enabled */
8247 } 8547 }
8248#endif /* CONFIG_I40E_DCB */ 8548#endif /* CONFIG_I40E_DCB */
8249 8549
@@ -8264,10 +8564,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8264 i40e_determine_queue_usage(pf); 8564 i40e_determine_queue_usage(pf);
8265 i40e_init_interrupt_scheme(pf); 8565 i40e_init_interrupt_scheme(pf);
8266 8566
8267 /* Set up the *vsi struct based on the number of VSIs in the HW, 8567 /* The number of VSIs reported by the FW is the minimum guaranteed
8268 * and set up our local tracking of the MAIN PF vsi. 8568 * to us; HW supports far more and we share the remaining pool with
8569 * the other PFs. We allocate space for more than the guarantee with
8570 * the understanding that we might not get them all later.
8269 */ 8571 */
8270 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis; 8572 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
8573 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
8574 else
8575 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
8576
8577 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
8578 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
8271 pf->vsi = kzalloc(len, GFP_KERNEL); 8579 pf->vsi = kzalloc(len, GFP_KERNEL);
8272 if (!pf->vsi) { 8580 if (!pf->vsi) {
8273 err = -ENOMEM; 8581 err = -ENOMEM;
@@ -8279,6 +8587,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8279 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 8587 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
8280 goto err_vsis; 8588 goto err_vsis;
8281 } 8589 }
8590 /* if FDIR VSI was set up, start it now */
8591 for (i = 0; i < pf->num_alloc_vsi; i++) {
8592 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
8593 i40e_vsi_open(pf->vsi[i]);
8594 break;
8595 }
8596 }
8282 8597
8283 /* The main driver is (mostly) up and happy. We need to set this state 8598 /* The main driver is (mostly) up and happy. We need to set this state
8284 * before setting up the misc vector or we get a race and the vector 8599 * before setting up the misc vector or we get a race and the vector
@@ -8300,6 +8615,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8300 } 8615 }
8301 } 8616 }
8302 8617
8618#ifdef CONFIG_PCI_IOV
8303 /* prep for VF support */ 8619 /* prep for VF support */
8304 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 8620 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
8305 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 8621 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
@@ -8322,17 +8638,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8322 err); 8638 err);
8323 } 8639 }
8324 } 8640 }
8641#endif /* CONFIG_PCI_IOV */
8325 8642
8326 pfs_found++; 8643 pfs_found++;
8327 8644
8328 i40e_dbg_pf_init(pf); 8645 i40e_dbg_pf_init(pf);
8329 8646
8330 /* tell the firmware that we're starting */ 8647 /* tell the firmware that we're starting */
8331 dv.major_version = DRV_VERSION_MAJOR; 8648 i40e_send_version(pf);
8332 dv.minor_version = DRV_VERSION_MINOR;
8333 dv.build_version = DRV_VERSION_BUILD;
8334 dv.subbuild_version = 0;
8335 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
8336 8649
8337 /* since everything's happy, start the service_task timer */ 8650 /* since everything's happy, start the service_task timer */
8338 mod_timer(&pf->service_timer, 8651 mod_timer(&pf->service_timer,
@@ -8373,9 +8686,6 @@ err_vsis:
8373err_switch_setup: 8686err_switch_setup:
8374 i40e_reset_interrupt_capability(pf); 8687 i40e_reset_interrupt_capability(pf);
8375 del_timer_sync(&pf->service_timer); 8688 del_timer_sync(&pf->service_timer);
8376#ifdef CONFIG_I40E_DCB
8377err_init_dcb:
8378#endif /* CONFIG_I40E_DCB */
8379err_mac_addr: 8689err_mac_addr:
8380err_configure_lan_hmc: 8690err_configure_lan_hmc:
8381 (void)i40e_shutdown_lan_hmc(hw); 8691 (void)i40e_shutdown_lan_hmc(hw);
@@ -8456,10 +8766,13 @@ static void i40e_remove(struct pci_dev *pdev)
8456 } 8766 }
8457 8767
8458 /* shutdown and destroy the HMC */ 8768 /* shutdown and destroy the HMC */
8459 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 8769 if (pf->hw.hmc.hmc_obj) {
8460 if (ret_code) 8770 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
8461 dev_warn(&pdev->dev, 8771 if (ret_code)
8462 "Failed to destroy the HMC resources: %d\n", ret_code); 8772 dev_warn(&pdev->dev,
8773 "Failed to destroy the HMC resources: %d\n",
8774 ret_code);
8775 }
8463 8776
8464 /* shutdown the adminq */ 8777 /* shutdown the adminq */
8465 ret_code = i40e_shutdown_adminq(&pf->hw); 8778 ret_code = i40e_shutdown_adminq(&pf->hw);
@@ -8470,7 +8783,7 @@ static void i40e_remove(struct pci_dev *pdev)
8470 8783
8471 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 8784 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
8472 i40e_clear_interrupt_scheme(pf); 8785 i40e_clear_interrupt_scheme(pf);
8473 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 8786 for (i = 0; i < pf->num_alloc_vsi; i++) {
8474 if (pf->vsi[i]) { 8787 if (pf->vsi[i]) {
8475 i40e_vsi_clear_rings(pf->vsi[i]); 8788 i40e_vsi_clear_rings(pf->vsi[i]);
8476 i40e_vsi_clear(pf->vsi[i]); 8789 i40e_vsi_clear(pf->vsi[i]);
@@ -8485,7 +8798,6 @@ static void i40e_remove(struct pci_dev *pdev)
8485 8798
8486 kfree(pf->qp_pile); 8799 kfree(pf->qp_pile);
8487 kfree(pf->irq_pile); 8800 kfree(pf->irq_pile);
8488 kfree(pf->sw_config);
8489 kfree(pf->vsi); 8801 kfree(pf->vsi);
8490 8802
8491 /* force a PF reset to clean anything leftover */ 8803 /* force a PF reset to clean anything leftover */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 9cd57e617959..a430699c41d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -70,10 +70,12 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
70 u16 *fw_major_version, u16 *fw_minor_version, 70 u16 *fw_major_version, u16 *fw_minor_version,
71 u16 *api_major_version, u16 *api_minor_version, 71 u16 *api_major_version, u16 *api_minor_version,
72 struct i40e_asq_cmd_details *cmd_details); 72 struct i40e_asq_cmd_details *cmd_details);
73i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw, 73i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
74 struct i40e_asq_cmd_details *cmd_details); 74 struct i40e_asq_cmd_details *cmd_details);
75i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, 75i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
76 struct i40e_asq_cmd_details *cmd_details); 76 struct i40e_asq_cmd_details *cmd_details);
77i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
78 struct i40e_asq_cmd_details *cmd_details);
77i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, 79i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
78 struct i40e_asq_cmd_details *cmd_details); 80 struct i40e_asq_cmd_details *cmd_details);
79i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, 81i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
@@ -157,8 +159,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
157i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, 159i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
158 struct i40e_asq_cmd_details *cmd_details); 160 struct i40e_asq_cmd_details *cmd_details);
159i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 161i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
160 u16 udp_port, u8 header_len, 162 u16 udp_port, u8 protocol_index,
161 u8 protocol_index, u8 *filter_index, 163 u8 *filter_index,
162 struct i40e_asq_cmd_details *cmd_details); 164 struct i40e_asq_cmd_details *cmd_details);
163i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 165i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
164 struct i40e_asq_cmd_details *cmd_details); 166 struct i40e_asq_cmd_details *cmd_details);
@@ -167,6 +169,9 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
167i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, 169i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
168 u16 flags, u8 *mac_addr, 170 u16 flags, u8 *mac_addr,
169 struct i40e_asq_cmd_details *cmd_details); 171 struct i40e_asq_cmd_details *cmd_details);
172i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
173 u16 seid, u16 credit, u8 max_credit,
174 struct i40e_asq_cmd_details *cmd_details);
170i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, 175i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
171 struct i40e_asq_cmd_details *cmd_details); 176 struct i40e_asq_cmd_details *cmd_details);
172i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, 177i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
@@ -216,6 +221,7 @@ bool i40e_get_link_status(struct i40e_hw *hw);
216i40e_status i40e_get_mac_addr(struct i40e_hw *hw, 221i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
217 u8 *mac_addr); 222 u8 *mac_addr);
218i40e_status i40e_validate_mac_addr(u8 *mac_addr); 223i40e_status i40e_validate_mac_addr(u8 *mac_addr);
224void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
219/* prototype for functions used for NVM access */ 225/* prototype for functions used for NVM access */
220i40e_status i40e_init_nvm(struct i40e_hw *hw); 226i40e_status i40e_init_nvm(struct i40e_hw *hw);
221i40e_status i40e_acquire_nvm(struct i40e_hw *hw, 227i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e61e63720800..101f439acda6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -48,7 +48,6 @@
48 I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) 48 I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
49#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \ 49#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
50 I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) 50 I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
51#define I40E_PTP_TX_TIMEOUT (HZ * 15)
52 51
53/** 52/**
54 * i40e_ptp_read - Read the PHC time from the device 53 * i40e_ptp_read - Read the PHC time from the device
@@ -217,40 +216,6 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
217} 216}
218 217
219/** 218/**
220 * i40e_ptp_tx_work
221 * @work: pointer to work struct
222 *
223 * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a
224 * Tx timestamp event has occurred, in order to pass the Tx timestamp value up
225 * the stack in the skb.
226 */
227static void i40e_ptp_tx_work(struct work_struct *work)
228{
229 struct i40e_pf *pf = container_of(work, struct i40e_pf,
230 ptp_tx_work);
231 struct i40e_hw *hw = &pf->hw;
232 u32 prttsyn_stat_0;
233
234 if (!pf->ptp_tx_skb)
235 return;
236
237 if (time_is_before_jiffies(pf->ptp_tx_start +
238 I40E_PTP_TX_TIMEOUT)) {
239 dev_kfree_skb_any(pf->ptp_tx_skb);
240 pf->ptp_tx_skb = NULL;
241 pf->tx_hwtstamp_timeouts++;
242 dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n");
243 return;
244 }
245
246 prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0);
247 if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
248 i40e_ptp_tx_hwtstamp(pf);
249 else
250 schedule_work(&pf->ptp_tx_work);
251}
252
253/**
254 * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem 219 * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
255 * @ptp: The PTP clock structure 220 * @ptp: The PTP clock structure
256 * @rq: The requested feature to change 221 * @rq: The requested feature to change
@@ -608,7 +573,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
608 u32 regval; 573 u32 regval;
609 574
610 spin_lock_init(&pf->tmreg_lock); 575 spin_lock_init(&pf->tmreg_lock);
611 INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work);
612 576
613 dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, 577 dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
614 netdev->name); 578 netdev->name);
@@ -647,7 +611,6 @@ void i40e_ptp_stop(struct i40e_pf *pf)
647 pf->ptp_tx = false; 611 pf->ptp_tx = false;
648 pf->ptp_rx = false; 612 pf->ptp_rx = false;
649 613
650 cancel_work_sync(&pf->ptp_tx_work);
651 if (pf->ptp_tx_skb) { 614 if (pf->ptp_tx_skb) {
652 dev_kfree_skb_any(pf->ptp_tx_skb); 615 dev_kfree_skb_any(pf->ptp_tx_skb);
653 pf->ptp_tx_skb = NULL; 616 pf->ptp_tx_skb = NULL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 1d40f425acf1..947de98500f3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1340,8 +1340,6 @@
1340#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT) 1340#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
1341#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 1341#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
1342#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT) 1342#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
1343#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
1344#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
1345#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 1343#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
1346#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) 1344#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
1347#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 1345#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
@@ -1367,8 +1365,6 @@
1367#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT) 1365#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
1368#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 1366#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
1369#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) 1367#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
1370#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
1371#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
1372#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 1368#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
1373#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) 1369#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
1374#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 1370#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
@@ -1589,6 +1585,14 @@
1589#define I40E_GLLAN_TSOMSK_M 0x000442DC 1585#define I40E_GLLAN_TSOMSK_M 0x000442DC
1590#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 1586#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
1591#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) 1587#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
1588#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
1589#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
1590#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
1591#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
1592#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
1593#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
1594#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
1595
1592#define I40E_PFLAN_QALLOC 0x001C0400 1596#define I40E_PFLAN_QALLOC 0x001C0400
1593#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 1597#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
1594#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) 1598#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9478ddc66caf..e49f31dbd5d8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -24,6 +24,7 @@
24 * 24 *
25 ******************************************************************************/ 25 ******************************************************************************/
26 26
27#include <linux/prefetch.h>
27#include "i40e.h" 28#include "i40e.h"
28#include "i40e_prototype.h" 29#include "i40e_prototype.h"
29 30
@@ -61,7 +62,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
61 62
62 /* find existing FDIR VSI */ 63 /* find existing FDIR VSI */
63 vsi = NULL; 64 vsi = NULL;
64 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 65 for (i = 0; i < pf->num_alloc_vsi; i++)
65 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) 66 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
66 vsi = pf->vsi[i]; 67 vsi = pf->vsi[i];
67 if (!vsi) 68 if (!vsi)
@@ -120,7 +121,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
120 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; 121 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
121 dcc |= ((u32)fdir_data->cnt_index << 122 dcc |= ((u32)fdir_data->cnt_index <<
122 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 123 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
123 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 124 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
124 } 125 }
125 126
126 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc); 127 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
@@ -183,7 +184,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
183 struct iphdr *ip; 184 struct iphdr *ip;
184 bool err = false; 185 bool err = false;
185 int ret; 186 int ret;
186 int i;
187 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, 187 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
188 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0, 188 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
@@ -199,21 +199,17 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
199 ip->saddr = fd_data->src_ip[0]; 199 ip->saddr = fd_data->src_ip[0];
200 udp->source = fd_data->src_port; 200 udp->source = fd_data->src_port;
201 201
202 for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP; 202 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
203 i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) { 203 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
204 fd_data->pctype = i; 204 if (ret) {
205 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); 205 dev_info(&pf->pdev->dev,
206 206 "Filter command send failed for PCTYPE %d (ret = %d)\n",
207 if (ret) { 207 fd_data->pctype, ret);
208 dev_info(&pf->pdev->dev, 208 err = true;
209 "Filter command send failed for PCTYPE %d (ret = %d)\n", 209 } else {
210 fd_data->pctype, ret); 210 dev_info(&pf->pdev->dev,
211 err = true; 211 "Filter OK for PCTYPE %d (ret = %d)\n",
212 } else { 212 fd_data->pctype, ret);
213 dev_info(&pf->pdev->dev,
214 "Filter OK for PCTYPE %d (ret = %d)\n",
215 fd_data->pctype, ret);
216 }
217 } 213 }
218 214
219 return err ? -EOPNOTSUPP : 0; 215 return err ? -EOPNOTSUPP : 0;
@@ -262,7 +258,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
262 } 258 }
263 } 259 }
264 260
265 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN; 261 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
266 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); 262 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
267 263
268 if (ret) { 264 if (ret) {
@@ -455,22 +451,20 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
455 451
456 /* filter programming failed most likely due to table full */ 452 /* filter programming failed most likely due to table full */
457 fcnt_prog = i40e_get_current_fd_count(pf); 453 fcnt_prog = i40e_get_current_fd_count(pf);
458 fcnt_avail = pf->hw.fdir_shared_filter_count + 454 fcnt_avail = i40e_get_fd_cnt_all(pf);
459 pf->fdir_pf_filter_count;
460
461 /* If ATR is running fcnt_prog can quickly change, 455 /* If ATR is running fcnt_prog can quickly change,
462 * if we are very close to full, it makes sense to disable 456 * if we are very close to full, it makes sense to disable
463 * FD ATR/SB and then re-enable it when there is room. 457 * FD ATR/SB and then re-enable it when there is room.
464 */ 458 */
465 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { 459 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
466 /* Turn off ATR first */ 460 /* Turn off ATR first */
467 if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) { 461 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
468 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 462 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
469 dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n"); 463 dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
470 pf->auto_disable_flags |= 464 pf->auto_disable_flags |=
471 I40E_FLAG_FD_ATR_ENABLED; 465 I40E_FLAG_FD_ATR_ENABLED;
472 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; 466 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
473 } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) { 467 } else if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
474 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 468 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
475 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); 469 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
476 pf->auto_disable_flags |= 470 pf->auto_disable_flags |=
@@ -1199,10 +1193,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1199 u32 rx_error, 1193 u32 rx_error,
1200 u16 rx_ptype) 1194 u16 rx_ptype)
1201{ 1195{
1196 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1197 bool ipv4 = false, ipv6 = false;
1202 bool ipv4_tunnel, ipv6_tunnel; 1198 bool ipv4_tunnel, ipv6_tunnel;
1203 __wsum rx_udp_csum; 1199 __wsum rx_udp_csum;
1204 __sum16 csum;
1205 struct iphdr *iph; 1200 struct iphdr *iph;
1201 __sum16 csum;
1206 1202
1207 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && 1203 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1208 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); 1204 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -1213,29 +1209,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1213 skb->ip_summed = CHECKSUM_NONE; 1209 skb->ip_summed = CHECKSUM_NONE;
1214 1210
1215 /* Rx csum enabled and ip headers found? */ 1211 /* Rx csum enabled and ip headers found? */
1216 if (!(vsi->netdev->features & NETIF_F_RXCSUM && 1212 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1217 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) 1213 return;
1214
1215 /* did the hardware decode the packet and checksum? */
1216 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1217 return;
1218
1219 /* both known and outer_ip must be set for the below code to work */
1220 if (!(decoded.known && decoded.outer_ip))
1218 return; 1221 return;
1219 1222
1223 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1224 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1225 ipv4 = true;
1226 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1227 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1228 ipv6 = true;
1229
1230 if (ipv4 &&
1231 (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
1232 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1233 goto checksum_fail;
1234
1220 /* likely incorrect csum if alternate IP extension headers found */ 1235 /* likely incorrect csum if alternate IP extension headers found */
1221 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 1236 if (ipv6 &&
1237 decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
1238 rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
1239 rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1240 /* don't increment checksum err here, non-fatal err */
1222 return; 1241 return;
1223 1242
1224 /* IP or L4 or outmost IP checksum error */ 1243 /* there was some L4 error, count error and punt packet to the stack */
1225 if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | 1244 if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
1226 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) | 1245 goto checksum_fail;
1227 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) { 1246
1228 vsi->back->hw_csum_rx_error++; 1247 /* handle packets that were not able to be checksummed due
1248 * to arrival speed, in this case the stack can compute
1249 * the csum.
1250 */
1251 if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
1229 return; 1252 return;
1230 }
1231 1253
1254 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1255 * it in the driver, hardware does not do it for us.
1256 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1257 * so the total length of IPv4 header is IHL*4 bytes
1258 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1259 */
1232 if (ipv4_tunnel && 1260 if (ipv4_tunnel &&
1261 (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
1233 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { 1262 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
1234 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1235 * it in the driver, hardware does not do it for us.
1236 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1237 * so the total length of IPv4 header is IHL*4 bytes
1238 */
1239 skb->transport_header = skb->mac_header + 1263 skb->transport_header = skb->mac_header +
1240 sizeof(struct ethhdr) + 1264 sizeof(struct ethhdr) +
1241 (ip_hdr(skb)->ihl * 4); 1265 (ip_hdr(skb)->ihl * 4);
@@ -1252,13 +1276,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1252 (skb->len - skb_transport_offset(skb)), 1276 (skb->len - skb_transport_offset(skb)),
1253 IPPROTO_UDP, rx_udp_csum); 1277 IPPROTO_UDP, rx_udp_csum);
1254 1278
1255 if (udp_hdr(skb)->check != csum) { 1279 if (udp_hdr(skb)->check != csum)
1256 vsi->back->hw_csum_rx_error++; 1280 goto checksum_fail;
1257 return;
1258 }
1259 } 1281 }
1260 1282
1261 skb->ip_summed = CHECKSUM_UNNECESSARY; 1283 skb->ip_summed = CHECKSUM_UNNECESSARY;
1284
1285 return;
1286
1287checksum_fail:
1288 vsi->back->hw_csum_rx_error++;
1262} 1289}
1263 1290
1264/** 1291/**
@@ -1435,6 +1462,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1435 /* ERR_MASK will only have valid bits if EOP set */ 1462 /* ERR_MASK will only have valid bits if EOP set */
1436 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { 1463 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1437 dev_kfree_skb_any(skb); 1464 dev_kfree_skb_any(skb);
1465 /* TODO: shouldn't we increment a counter indicating the
1466 * drop?
1467 */
1438 goto next_desc; 1468 goto next_desc;
1439 } 1469 }
1440 1470
@@ -1665,6 +1695,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1665 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID << 1695 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1666 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; 1696 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1667 1697
1698 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
1699 dtype_cmd |=
1700 ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1701 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
1702
1668 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 1703 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
1669 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); 1704 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
1670} 1705}
@@ -1825,9 +1860,6 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
1825 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN << 1860 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
1826 I40E_TXD_CTX_QW1_CMD_SHIFT; 1861 I40E_TXD_CTX_QW1_CMD_SHIFT;
1827 1862
1828 pf->ptp_tx_start = jiffies;
1829 schedule_work(&pf->ptp_tx_work);
1830
1831 return 1; 1863 return 1;
1832} 1864}
1833 1865
@@ -2179,9 +2211,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2179static int i40e_xmit_descriptor_count(struct sk_buff *skb, 2211static int i40e_xmit_descriptor_count(struct sk_buff *skb,
2180 struct i40e_ring *tx_ring) 2212 struct i40e_ring *tx_ring)
2181{ 2213{
2182#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
2183 unsigned int f; 2214 unsigned int f;
2184#endif
2185 int count = 0; 2215 int count = 0;
2186 2216
2187 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 2217 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
@@ -2190,12 +2220,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
2190 * + 1 desc for context descriptor, 2220 * + 1 desc for context descriptor,
2191 * otherwise try next time 2221 * otherwise try next time
2192 */ 2222 */
2193#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
2194 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 2223 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2195 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 2224 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2196#else 2225
2197 count += skb_shinfo(skb)->nr_frags;
2198#endif
2199 count += TXD_USE_COUNT(skb_headlen(skb)); 2226 count += TXD_USE_COUNT(skb_headlen(skb));
2200 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { 2227 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2201 tx_ring->tx_stats.tx_busy++; 2228 tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index d5349698e513..0277894fe1c4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -27,7 +27,7 @@
27#ifndef _I40E_TXRX_H_ 27#ifndef _I40E_TXRX_H_
28#define _I40E_TXRX_H_ 28#define _I40E_TXRX_H_
29 29
30/* Interrupt Throttling and Rate Limiting (storm control) Goodies */ 30/* Interrupt Throttling and Rate Limiting Goodies */
31 31
32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ 32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
33#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ 33#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
@@ -69,16 +69,11 @@ enum i40e_dyn_idx_t {
69 69
70/* Supported RSS offloads */ 70/* Supported RSS offloads */
71#define I40E_DEFAULT_RSS_HENA ( \ 71#define I40E_DEFAULT_RSS_HENA ( \
72 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
73 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
74 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 72 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
75 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 73 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
76 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
77 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 74 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
78 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 75 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
79 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ 76 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
80 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
81 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
82 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 77 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
83 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \ 78 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
84 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 79 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
@@ -122,11 +117,11 @@ enum i40e_dyn_idx_t {
122#define i40e_rx_desc i40e_32byte_rx_desc 117#define i40e_rx_desc i40e_32byte_rx_desc
123 118
124#define I40E_MIN_TX_LEN 17 119#define I40E_MIN_TX_LEN 17
125#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */ 120#define I40E_MAX_DATA_PER_TXD 8192
126 121
127/* Tx Descriptors needed, worst case */ 122/* Tx Descriptors needed, worst case */
128#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) 123#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
129#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) 124#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
130 125
131#define I40E_TX_FLAGS_CSUM (u32)(1) 126#define I40E_TX_FLAGS_CSUM (u32)(1)
132#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) 127#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
@@ -184,7 +179,6 @@ enum i40e_ring_state_t {
184 __I40E_TX_DETECT_HANG, 179 __I40E_TX_DETECT_HANG,
185 __I40E_HANG_CHECK_ARMED, 180 __I40E_HANG_CHECK_ARMED,
186 __I40E_RX_PS_ENABLED, 181 __I40E_RX_PS_ENABLED,
187 __I40E_RX_LRO_ENABLED,
188 __I40E_RX_16BYTE_DESC_ENABLED, 182 __I40E_RX_16BYTE_DESC_ENABLED,
189}; 183};
190 184
@@ -200,12 +194,6 @@ enum i40e_ring_state_t {
200 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) 194 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
201#define clear_check_for_tx_hang(ring) \ 195#define clear_check_for_tx_hang(ring) \
202 clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) 196 clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
203#define ring_is_lro_enabled(ring) \
204 test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
205#define set_ring_lro_enabled(ring) \
206 set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
207#define clear_ring_lro_enabled(ring) \
208 clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
209#define ring_is_16byte_desc_enabled(ring) \ 197#define ring_is_16byte_desc_enabled(ring) \
210 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) 198 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
211#define set_ring_16byte_desc_enabled(ring) \ 199#define set_ring_16byte_desc_enabled(ring) \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 71a968fe557f..9d39ff23c5fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -36,12 +36,10 @@
36 36
37/* Device IDs */ 37/* Device IDs */
38#define I40E_DEV_ID_SFP_XL710 0x1572 38#define I40E_DEV_ID_SFP_XL710 0x1572
39#define I40E_DEV_ID_SFP_X710 0x1573
40#define I40E_DEV_ID_QEMU 0x1574 39#define I40E_DEV_ID_QEMU 0x1574
41#define I40E_DEV_ID_KX_A 0x157F 40#define I40E_DEV_ID_KX_A 0x157F
42#define I40E_DEV_ID_KX_B 0x1580 41#define I40E_DEV_ID_KX_B 0x1580
43#define I40E_DEV_ID_KX_C 0x1581 42#define I40E_DEV_ID_KX_C 0x1581
44#define I40E_DEV_ID_KX_D 0x1582
45#define I40E_DEV_ID_QSFP_A 0x1583 43#define I40E_DEV_ID_QSFP_A 0x1583
46#define I40E_DEV_ID_QSFP_B 0x1584 44#define I40E_DEV_ID_QSFP_B 0x1584
47#define I40E_DEV_ID_QSFP_C 0x1585 45#define I40E_DEV_ID_QSFP_C 0x1585
@@ -60,8 +58,8 @@
60/* Max default timeout in ms, */ 58/* Max default timeout in ms, */
61#define I40E_MAX_NVM_TIMEOUT 18000 59#define I40E_MAX_NVM_TIMEOUT 18000
62 60
63/* Switch from mc to the 2usec global time (this is the GTIME resolution) */ 61/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
64#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2) 62#define I40E_MS_TO_GTIME(time) ((time) * 1000)
65 63
66/* forward declaration */ 64/* forward declaration */
67struct i40e_hw; 65struct i40e_hw;
@@ -167,6 +165,9 @@ struct i40e_link_status {
167 u8 loopback; 165 u8 loopback;
168 /* is Link Status Event notification to SW enabled */ 166 /* is Link Status Event notification to SW enabled */
169 bool lse_enable; 167 bool lse_enable;
168 u16 max_frame_size;
169 bool crc_enable;
170 u8 pacing;
170}; 171};
171 172
172struct i40e_phy_info { 173struct i40e_phy_info {
@@ -409,6 +410,7 @@ struct i40e_driver_version {
409 u8 minor_version; 410 u8 minor_version;
410 u8 build_version; 411 u8 build_version;
411 u8 subbuild_version; 412 u8 subbuild_version;
413 u8 driver_string[32];
412}; 414};
413 415
414/* RX Descriptors */ 416/* RX Descriptors */
@@ -488,9 +490,6 @@ union i40e_32byte_rx_desc {
488 } wb; /* writeback */ 490 } wb; /* writeback */
489}; 491};
490 492
491#define I40E_RXD_QW1_STATUS_SHIFT 0
492#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
493
494enum i40e_rx_desc_status_bits { 493enum i40e_rx_desc_status_bits {
495 /* Note: These are predefined bit offsets */ 494 /* Note: These are predefined bit offsets */
496 I40E_RX_DESC_STATUS_DD_SHIFT = 0, 495 I40E_RX_DESC_STATUS_DD_SHIFT = 0,
@@ -507,9 +506,14 @@ enum i40e_rx_desc_status_bits {
507 I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, 506 I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
508 I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, 507 I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
509 I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ 508 I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
510 I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18 509 I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
510 I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
511}; 511};
512 512
513#define I40E_RXD_QW1_STATUS_SHIFT 0
514#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
515 << I40E_RXD_QW1_STATUS_SHIFT)
516
513#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT 517#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
514#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ 518#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
515 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) 519 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
@@ -537,7 +541,8 @@ enum i40e_rx_desc_error_bits {
537 I40E_RX_DESC_ERROR_IPE_SHIFT = 3, 541 I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
538 I40E_RX_DESC_ERROR_L4E_SHIFT = 4, 542 I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
539 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, 543 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
540 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6 544 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
545 I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
541}; 546};
542 547
543enum i40e_rx_desc_error_l3l4e_fcoe_masks { 548enum i40e_rx_desc_error_l3l4e_fcoe_masks {
@@ -658,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
658 I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, 663 I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
659 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ 664 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
660 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ 665 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
661 I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
662 I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, 666 I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
663 I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, 667 I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
664 I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, 668 I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
@@ -862,18 +866,14 @@ struct i40e_filter_program_desc {
862 866
863/* Packet Classifier Types for filters */ 867/* Packet Classifier Types for filters */
864enum i40e_filter_pctype { 868enum i40e_filter_pctype {
865 /* Note: Values 0-28 are reserved for future use */ 869 /* Note: Values 0-30 are reserved for future use */
866 I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
867 I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
868 I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, 870 I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
869 I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32, 871 /* Note: Value 32 is reserved for future use */
870 I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, 872 I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
871 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, 873 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
872 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, 874 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
873 I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, 875 I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
874 /* Note: Values 37-38 are reserved for future use */ 876 /* Note: Values 37-40 are reserved for future use */
875 I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
876 I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
877 I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, 877 I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
878 I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42, 878 I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
879 I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, 879 I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
@@ -955,6 +955,16 @@ struct i40e_vsi_context {
955 struct i40e_aqc_vsi_properties_data info; 955 struct i40e_aqc_vsi_properties_data info;
956}; 956};
957 957
958struct i40e_veb_context {
959 u16 seid;
960 u16 uplink_seid;
961 u16 veb_number;
962 u16 vebs_allocated;
963 u16 vebs_unallocated;
964 u16 flags;
965 struct i40e_aqc_get_veb_parameters_completion info;
966};
967
958/* Statistics collected by each port, VSI, VEB, and S-channel */ 968/* Statistics collected by each port, VSI, VEB, and S-channel */
959struct i40e_eth_stats { 969struct i40e_eth_stats {
960 u64 rx_bytes; /* gorc */ 970 u64 rx_bytes; /* gorc */
@@ -962,8 +972,6 @@ struct i40e_eth_stats {
962 u64 rx_multicast; /* mprc */ 972 u64 rx_multicast; /* mprc */
963 u64 rx_broadcast; /* bprc */ 973 u64 rx_broadcast; /* bprc */
964 u64 rx_discards; /* rdpc */ 974 u64 rx_discards; /* rdpc */
965 u64 rx_errors; /* repc */
966 u64 rx_missed; /* rmpc */
967 u64 rx_unknown_protocol; /* rupp */ 975 u64 rx_unknown_protocol; /* rupp */
968 u64 tx_bytes; /* gotc */ 976 u64 tx_bytes; /* gotc */
969 u64 tx_unicast; /* uptc */ 977 u64 tx_unicast; /* uptc */
@@ -1015,9 +1023,12 @@ struct i40e_hw_port_stats {
1015 u64 tx_size_big; /* ptc9522 */ 1023 u64 tx_size_big; /* ptc9522 */
1016 u64 mac_short_packet_dropped; /* mspdc */ 1024 u64 mac_short_packet_dropped; /* mspdc */
1017 u64 checksum_error; /* xec */ 1025 u64 checksum_error; /* xec */
1026 /* flow director stats */
1027 u64 fd_atr_match;
1028 u64 fd_sb_match;
1018 /* EEE LPI */ 1029 /* EEE LPI */
1019 bool tx_lpi_status; 1030 u32 tx_lpi_status;
1020 bool rx_lpi_status; 1031 u32 rx_lpi_status;
1021 u64 tx_lpi_count; /* etlpic */ 1032 u64 tx_lpi_count; /* etlpic */
1022 u64 rx_lpi_count; /* erlpic */ 1033 u64 rx_lpi_count; /* erlpic */
1023}; 1034};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 22a1b69cd646..70951d2edcad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -341,10 +341,6 @@ struct i40e_virtchnl_pf_event {
341 int severity; 341 int severity;
342}; 342};
343 343
344/* The following are TBD, not necessary for LAN functionality.
345 * I40E_VIRTCHNL_OP_FCOE
346 */
347
348/* VF reset states - these are written into the RSTAT register: 344/* VF reset states - these are written into the RSTAT register:
349 * I40E_VFGEN_RSTAT1 on the PF 345 * I40E_VFGEN_RSTAT1 on the PF
350 * I40E_VFGEN_RSTAT on the VF 346 * I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 02c11a7f7d29..f5b9d2062573 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -29,6 +29,24 @@
29/***********************misc routines*****************************/ 29/***********************misc routines*****************************/
30 30
31/** 31/**
32 * i40e_vc_disable_vf
33 * @pf: pointer to the pf info
34 * @vf: pointer to the vf info
35 *
36 * Disable the VF through a SW reset
37 **/
38static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
39{
40 struct i40e_hw *hw = &pf->hw;
41 u32 reg;
42
43 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
44 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
45 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
46 i40e_flush(hw);
47}
48
49/**
32 * i40e_vc_isvalid_vsi_id 50 * i40e_vc_isvalid_vsi_id
33 * @vf: pointer to the vf info 51 * @vf: pointer to the vf info
34 * @vsi_id: vf relative vsi id 52 * @vsi_id: vf relative vsi id
@@ -230,9 +248,8 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
230 tx_ctx.qlen = info->ring_len; 248 tx_ctx.qlen = info->ring_len;
231 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); 249 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
232 tx_ctx.rdylist_act = 0; 250 tx_ctx.rdylist_act = 0;
233 tx_ctx.head_wb_ena = 1; 251 tx_ctx.head_wb_ena = info->headwb_enabled;
234 tx_ctx.head_wb_addr = info->dma_ring_addr + 252 tx_ctx.head_wb_addr = info->dma_headwb_addr;
235 (info->ring_len * sizeof(struct i40e_tx_desc));
236 253
237 /* clear the context in the HMC */ 254 /* clear the context in the HMC */
238 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 255 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
@@ -336,6 +353,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
336 rx_ctx.tphhead_ena = 1; 353 rx_ctx.tphhead_ena = 1;
337 rx_ctx.lrxqthresh = 2; 354 rx_ctx.lrxqthresh = 2;
338 rx_ctx.crcstrip = 1; 355 rx_ctx.crcstrip = 1;
356 rx_ctx.prefena = 1;
339 357
340 /* clear the context in the HMC */ 358 /* clear the context in the HMC */
341 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 359 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
@@ -416,6 +434,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
416 if (ret) 434 if (ret)
417 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 435 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
418 436
437 /* Set VF bandwidth if specified */
438 if (vf->tx_rate) {
439 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
440 vf->tx_rate / 50, 0, NULL);
441 if (ret)
442 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
443 vf->vf_id, ret);
444 }
445
419error_alloc_vsi_res: 446error_alloc_vsi_res:
420 return ret; 447 return ret;
421} 448}
@@ -815,6 +842,10 @@ void i40e_free_vfs(struct i40e_pf *pf)
815 kfree(pf->vf); 842 kfree(pf->vf);
816 pf->vf = NULL; 843 pf->vf = NULL;
817 844
845 /* This check is for when the driver is unloaded while VFs are
846 * assigned. Setting the number of VFs to 0 through sysfs is caught
847 * before this function ever gets called.
848 */
818 if (!i40e_vfs_are_assigned(pf)) { 849 if (!i40e_vfs_are_assigned(pf)) {
819 pci_disable_sriov(pf->pdev); 850 pci_disable_sriov(pf->pdev);
820 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 851 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
@@ -867,6 +898,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
867 ret = -ENOMEM; 898 ret = -ENOMEM;
868 goto err_alloc; 899 goto err_alloc;
869 } 900 }
901 pf->vf = vfs;
870 902
871 /* apply default profile */ 903 /* apply default profile */
872 for (i = 0; i < num_alloc_vfs; i++) { 904 for (i = 0; i < num_alloc_vfs; i++) {
@@ -876,13 +908,13 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
876 908
877 /* assign default capabilities */ 909 /* assign default capabilities */
878 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 910 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
911 vfs[i].spoofchk = true;
879 /* vf resources get allocated during reset */ 912 /* vf resources get allocated during reset */
880 i40e_reset_vf(&vfs[i], false); 913 i40e_reset_vf(&vfs[i], false);
881 914
882 /* enable vf vplan_qtable mappings */ 915 /* enable vf vplan_qtable mappings */
883 i40e_enable_vf_mappings(&vfs[i]); 916 i40e_enable_vf_mappings(&vfs[i]);
884 } 917 }
885 pf->vf = vfs;
886 pf->num_alloc_vfs = num_alloc_vfs; 918 pf->num_alloc_vfs = num_alloc_vfs;
887 919
888 i40e_enable_pf_switch_lb(pf); 920 i40e_enable_pf_switch_lb(pf);
@@ -951,7 +983,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
951 if (num_vfs) 983 if (num_vfs)
952 return i40e_pci_sriov_enable(pdev, num_vfs); 984 return i40e_pci_sriov_enable(pdev, num_vfs);
953 985
954 i40e_free_vfs(pf); 986 if (!i40e_vfs_are_assigned(pf)) {
987 i40e_free_vfs(pf);
988 } else {
989 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
990 return -EINVAL;
991 }
955 return 0; 992 return 0;
956} 993}
957 994
@@ -2022,16 +2059,14 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2022 } 2059 }
2023 2060
2024 /* delete the temporary mac address */ 2061 /* delete the temporary mac address */
2025 i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false); 2062 i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
2063 true, false);
2026 2064
2027 /* add the new mac address */ 2065 /* Delete all the filters for this VSI - we're going to kill it
2028 f = i40e_add_filter(vsi, mac, 0, true, false); 2066 * anyway.
2029 if (!f) { 2067 */
2030 dev_err(&pf->pdev->dev, 2068 list_for_each_entry(f, &vsi->mac_filter_list, list)
2031 "Unable to add VF ucast filter\n"); 2069 i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
2032 ret = -ENOMEM;
2033 goto error_param;
2034 }
2035 2070
2036 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); 2071 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
2037 /* program mac filter */ 2072 /* program mac filter */
@@ -2040,7 +2075,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2040 ret = -EIO; 2075 ret = -EIO;
2041 goto error_param; 2076 goto error_param;
2042 } 2077 }
2043 memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN); 2078 ether_addr_copy(vf->default_lan_addr.addr, mac);
2044 vf->pf_set_mac = true; 2079 vf->pf_set_mac = true;
2045 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2080 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
2046 ret = 0; 2081 ret = 0;
@@ -2088,18 +2123,28 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
2088 goto error_pvid; 2123 goto error_pvid;
2089 } 2124 }
2090 2125
2091 if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) 2126 if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
2092 dev_err(&pf->pdev->dev, 2127 dev_err(&pf->pdev->dev,
2093 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 2128 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
2094 vf_id); 2129 vf_id);
2130 /* Administrator Error - knock the VF offline until he does
2131 * the right thing by reconfiguring his network correctly
2132 * and then reloading the VF driver.
2133 */
2134 i40e_vc_disable_vf(pf, vf);
2135 }
2095 2136
2096 /* Check for condition where there was already a port VLAN ID 2137 /* Check for condition where there was already a port VLAN ID
2097 * filter set and now it is being deleted by setting it to zero. 2138 * filter set and now it is being deleted by setting it to zero.
2139 * Additionally check for the condition where there was a port
2140 * VLAN but now there is a new and different port VLAN being set.
2098 * Before deleting all the old VLAN filters we must add new ones 2141 * Before deleting all the old VLAN filters we must add new ones
2099 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 2142 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
2100 * MAC addresses deleted. 2143 * MAC addresses deleted.
2101 */ 2144 */
2102 if (!(vlan_id || qos) && vsi->info.pvid) 2145 if ((!(vlan_id || qos) ||
2146 (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
2147 vsi->info.pvid)
2103 ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); 2148 ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
2104 2149
2105 if (vsi->info.pvid) { 2150 if (vsi->info.pvid) {
@@ -2150,6 +2195,8 @@ error_pvid:
2150 return ret; 2195 return ret;
2151} 2196}
2152 2197
2198#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
2199#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */
2153/** 2200/**
2154 * i40e_ndo_set_vf_bw 2201 * i40e_ndo_set_vf_bw
2155 * @netdev: network interface device structure 2202 * @netdev: network interface device structure
@@ -2158,9 +2205,76 @@ error_pvid:
2158 * 2205 *
2159 * configure vf tx rate 2206 * configure vf tx rate
2160 **/ 2207 **/
2161int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate) 2208int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
2209 int max_tx_rate)
2162{ 2210{
2163 return -EOPNOTSUPP; 2211 struct i40e_netdev_priv *np = netdev_priv(netdev);
2212 struct i40e_pf *pf = np->vsi->back;
2213 struct i40e_vsi *vsi;
2214 struct i40e_vf *vf;
2215 int speed = 0;
2216 int ret = 0;
2217
2218 /* validate the request */
2219 if (vf_id >= pf->num_alloc_vfs) {
2220 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
2221 ret = -EINVAL;
2222 goto error;
2223 }
2224
2225 if (min_tx_rate) {
2226 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
2227 min_tx_rate, vf_id);
2228 return -EINVAL;
2229 }
2230
2231 vf = &(pf->vf[vf_id]);
2232 vsi = pf->vsi[vf->lan_vsi_index];
2233 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2234 dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
2235 ret = -EINVAL;
2236 goto error;
2237 }
2238
2239 switch (pf->hw.phy.link_info.link_speed) {
2240 case I40E_LINK_SPEED_40GB:
2241 speed = 40000;
2242 break;
2243 case I40E_LINK_SPEED_10GB:
2244 speed = 10000;
2245 break;
2246 case I40E_LINK_SPEED_1GB:
2247 speed = 1000;
2248 break;
2249 default:
2250 break;
2251 }
2252
2253 if (max_tx_rate > speed) {
2254 dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.",
2255 max_tx_rate, vf->vf_id);
2256 ret = -EINVAL;
2257 goto error;
2258 }
2259
2260 if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
2261 dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
2262 max_tx_rate = 50;
2263 }
2264
2265 /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
2266 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
2267 max_tx_rate / I40E_BW_CREDIT_DIVISOR,
2268 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
2269 if (ret) {
2270 dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
2271 ret);
2272 ret = -EIO;
2273 goto error;
2274 }
2275 vf->tx_rate = max_tx_rate;
2276error:
2277 return ret;
2164} 2278}
2165 2279
2166/** 2280/**
@@ -2200,10 +2314,18 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
2200 2314
2201 memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); 2315 memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
2202 2316
2203 ivi->tx_rate = 0; 2317 ivi->max_tx_rate = vf->tx_rate;
2318 ivi->min_tx_rate = 0;
2204 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 2319 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
2205 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 2320 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
2206 I40E_VLAN_PRIORITY_SHIFT; 2321 I40E_VLAN_PRIORITY_SHIFT;
2322 if (vf->link_forced == false)
2323 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
2324 else if (vf->link_up == true)
2325 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
2326 else
2327 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
2328 ivi->spoofchk = vf->spoofchk;
2207 ret = 0; 2329 ret = 0;
2208 2330
2209error_param: 2331error_param:
@@ -2270,3 +2392,50 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
2270error_out: 2392error_out:
2271 return ret; 2393 return ret;
2272} 2394}
2395
2396/**
2397 * i40e_ndo_set_vf_spoofchk
2398 * @netdev: network interface device structure
2399 * @vf_id: vf identifier
2400 * @enable: flag to enable or disable feature
2401 *
2402 * Enable or disable VF spoof checking
2403 **/
2404int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable)
2405{
2406 struct i40e_netdev_priv *np = netdev_priv(netdev);
2407 struct i40e_vsi *vsi = np->vsi;
2408 struct i40e_pf *pf = vsi->back;
2409 struct i40e_vsi_context ctxt;
2410 struct i40e_hw *hw = &pf->hw;
2411 struct i40e_vf *vf;
2412 int ret = 0;
2413
2414 /* validate the request */
2415 if (vf_id >= pf->num_alloc_vfs) {
2416 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2417 ret = -EINVAL;
2418 goto out;
2419 }
2420
2421 vf = &(pf->vf[vf_id]);
2422
2423 if (enable == vf->spoofchk)
2424 goto out;
2425
2426 vf->spoofchk = enable;
2427 memset(&ctxt, 0, sizeof(ctxt));
2428 ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
2429 ctxt.pf_num = pf->hw.pf_id;
2430 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
2431 if (enable)
2432 ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
2433 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2434 if (ret) {
2435 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
2436 ret);
2437 ret = -EIO;
2438 }
2439out:
2440 return ret;
2441}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 389c47f396d5..63e7e0d81ad2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,8 +98,10 @@ struct i40e_vf {
98 98
99 unsigned long vf_caps; /* vf's adv. capabilities */ 99 unsigned long vf_caps; /* vf's adv. capabilities */
100 unsigned long vf_states; /* vf's runtime states */ 100 unsigned long vf_states; /* vf's runtime states */
101 unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
101 bool link_forced; 102 bool link_forced;
102 bool link_up; /* only valid if vf link is forced */ 103 bool link_up; /* only valid if vf link is forced */
104 bool spoofchk;
103}; 105};
104 106
105void i40e_free_vfs(struct i40e_pf *pf); 107void i40e_free_vfs(struct i40e_pf *pf);
@@ -115,10 +117,12 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
115int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); 117int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
116int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, 118int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
117 int vf_id, u16 vlan_id, u8 qos); 119 int vf_id, u16 vlan_id, u8 qos);
118int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate); 120int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
121 int max_tx_rate);
119int i40e_ndo_get_vf_config(struct net_device *netdev, 122int i40e_ndo_get_vf_config(struct net_device *netdev,
120 int vf_id, struct ifla_vf_info *ivi); 123 int vf_id, struct ifla_vf_info *ivi);
121int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); 124int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
125int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable);
122 126
123void i40e_vc_notify_link_state(struct i40e_pf *pf); 127void i40e_vc_notify_link_state(struct i40e_pf *pf);
124void i40e_vc_notify_reset(struct i40e_pf *pf); 128void i40e_vc_notify_reset(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
index e09be37a07a8..3a423836a565 100644
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4# Copyright(c) 2013 Intel Corporation. 4# Copyright(c) 2013 - 2014 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details. 13# more details.
14# 14#
15# You should have received a copy of the GNU General Public License along
16# with this program. If not, see <http://www.gnu.org/licenses/>.
17#
15# The full GNU General Public License is included in this distribution in 18# The full GNU General Public License is included in this distribution in
16# the file called "COPYING". 19# the file called "COPYING".
17# 20#
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 5470ce95936e..eb67cce3e8f9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -28,6 +31,16 @@
28#include "i40e_prototype.h" 31#include "i40e_prototype.h"
29 32
30/** 33/**
34 * i40e_is_nvm_update_op - return true if this is an NVM update operation
35 * @desc: API request descriptor
36 **/
37static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
38{
39 return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
40 (desc->opcode == i40e_aqc_opc_nvm_update);
41}
42
43/**
31 * i40e_adminq_init_regs - Initialize AdminQ registers 44 * i40e_adminq_init_regs - Initialize AdminQ registers
32 * @hw: pointer to the hardware structure 45 * @hw: pointer to the hardware structure
33 * 46 *
@@ -276,8 +289,11 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
276 * 289 *
277 * Configure base address and length registers for the transmit queue 290 * Configure base address and length registers for the transmit queue
278 **/ 291 **/
279static void i40e_config_asq_regs(struct i40e_hw *hw) 292static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
280{ 293{
294 i40e_status ret_code = 0;
295 u32 reg = 0;
296
281 if (hw->mac.type == I40E_MAC_VF) { 297 if (hw->mac.type == I40E_MAC_VF) {
282 /* configure the transmit queue */ 298 /* configure the transmit queue */
283 wr32(hw, I40E_VF_ATQBAH1, 299 wr32(hw, I40E_VF_ATQBAH1,
@@ -286,6 +302,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
286 lower_32_bits(hw->aq.asq.desc_buf.pa)); 302 lower_32_bits(hw->aq.asq.desc_buf.pa));
287 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries | 303 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
288 I40E_VF_ATQLEN1_ATQENABLE_MASK)); 304 I40E_VF_ATQLEN1_ATQENABLE_MASK));
305 reg = rd32(hw, I40E_VF_ATQBAL1);
289 } else { 306 } else {
290 /* configure the transmit queue */ 307 /* configure the transmit queue */
291 wr32(hw, I40E_PF_ATQBAH, 308 wr32(hw, I40E_PF_ATQBAH,
@@ -294,7 +311,14 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
294 lower_32_bits(hw->aq.asq.desc_buf.pa)); 311 lower_32_bits(hw->aq.asq.desc_buf.pa));
295 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries | 312 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
296 I40E_PF_ATQLEN_ATQENABLE_MASK)); 313 I40E_PF_ATQLEN_ATQENABLE_MASK));
314 reg = rd32(hw, I40E_PF_ATQBAL);
297 } 315 }
316
317 /* Check one register to verify that config was applied */
318 if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
319 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
320
321 return ret_code;
298} 322}
299 323
300/** 324/**
@@ -303,8 +327,11 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
303 * 327 *
304 * Configure base address and length registers for the receive (event queue) 328 * Configure base address and length registers for the receive (event queue)
305 **/ 329 **/
306static void i40e_config_arq_regs(struct i40e_hw *hw) 330static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
307{ 331{
332 i40e_status ret_code = 0;
333 u32 reg = 0;
334
308 if (hw->mac.type == I40E_MAC_VF) { 335 if (hw->mac.type == I40E_MAC_VF) {
309 /* configure the receive queue */ 336 /* configure the receive queue */
310 wr32(hw, I40E_VF_ARQBAH1, 337 wr32(hw, I40E_VF_ARQBAH1,
@@ -313,6 +340,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
313 lower_32_bits(hw->aq.arq.desc_buf.pa)); 340 lower_32_bits(hw->aq.arq.desc_buf.pa));
314 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries | 341 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
315 I40E_VF_ARQLEN1_ARQENABLE_MASK)); 342 I40E_VF_ARQLEN1_ARQENABLE_MASK));
343 reg = rd32(hw, I40E_VF_ARQBAL1);
316 } else { 344 } else {
317 /* configure the receive queue */ 345 /* configure the receive queue */
318 wr32(hw, I40E_PF_ARQBAH, 346 wr32(hw, I40E_PF_ARQBAH,
@@ -321,10 +349,17 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
321 lower_32_bits(hw->aq.arq.desc_buf.pa)); 349 lower_32_bits(hw->aq.arq.desc_buf.pa));
322 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries | 350 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
323 I40E_PF_ARQLEN_ARQENABLE_MASK)); 351 I40E_PF_ARQLEN_ARQENABLE_MASK));
352 reg = rd32(hw, I40E_PF_ARQBAL);
324 } 353 }
325 354
326 /* Update tail in the HW to post pre-allocated buffers */ 355 /* Update tail in the HW to post pre-allocated buffers */
327 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); 356 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
357
358 /* Check one register to verify that config was applied */
359 if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
360 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
361
362 return ret_code;
328} 363}
329 364
330/** 365/**
@@ -372,7 +407,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
372 goto init_adminq_free_rings; 407 goto init_adminq_free_rings;
373 408
374 /* initialize base registers */ 409 /* initialize base registers */
375 i40e_config_asq_regs(hw); 410 ret_code = i40e_config_asq_regs(hw);
411 if (ret_code)
412 goto init_adminq_free_rings;
376 413
377 /* success! */ 414 /* success! */
378 goto init_adminq_exit; 415 goto init_adminq_exit;
@@ -429,7 +466,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
429 goto init_adminq_free_rings; 466 goto init_adminq_free_rings;
430 467
431 /* initialize base registers */ 468 /* initialize base registers */
432 i40e_config_arq_regs(hw); 469 ret_code = i40e_config_arq_regs(hw);
470 if (ret_code)
471 goto init_adminq_free_rings;
433 472
434 /* success! */ 473 /* success! */
435 goto init_adminq_exit; 474 goto init_adminq_exit;
@@ -659,6 +698,12 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
659 goto asq_send_command_exit; 698 goto asq_send_command_exit;
660 } 699 }
661 700
701 if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
702 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
703 status = I40E_ERR_NVM;
704 goto asq_send_command_exit;
705 }
706
662 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 707 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
663 if (cmd_details) { 708 if (cmd_details) {
664 *details = *cmd_details; 709 *details = *cmd_details;
@@ -786,6 +831,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
786 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 831 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
787 } 832 }
788 833
834 if (i40e_is_nvm_update_op(desc))
835 hw->aq.nvm_busy = true;
836
789 /* update the error if time out occurred */ 837 /* update the error if time out occurred */
790 if ((!cmd_completed) && 838 if ((!cmd_completed) &&
791 (!details->async && !details->postpone)) { 839 (!details->async && !details->postpone)) {
@@ -880,6 +928,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
880 e->msg_size); 928 e->msg_size);
881 } 929 }
882 930
931 if (i40e_is_nvm_update_op(&e->desc))
932 hw->aq.nvm_busy = false;
933
883 /* Restore the original datalen and buffer address in the desc, 934 /* Restore the original datalen and buffer address in the desc,
884 * FW updates datalen to indicate the event message 935 * FW updates datalen to indicate the event message
885 * size 936 * size
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 8f72c31d95cc..e3472c62e155 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -87,6 +90,7 @@ struct i40e_adminq_info {
87 u16 fw_min_ver; /* firmware minor version */ 90 u16 fw_min_ver; /* firmware minor version */
88 u16 api_maj_ver; /* api major version */ 91 u16 api_maj_ver; /* api major version */
89 u16 api_min_ver; /* api minor version */ 92 u16 api_min_ver; /* api minor version */
93 bool nvm_busy;
90 94
91 struct mutex asq_mutex; /* Send queue lock */ 95 struct mutex asq_mutex; /* Send queue lock */
92 struct mutex arq_mutex; /* Receive queue lock */ 96 struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 97662b6bd98a..e656ea7a7920 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -31,7 +34,7 @@
31 */ 34 */
32 35
33#define I40E_FW_API_VERSION_MAJOR 0x0001 36#define I40E_FW_API_VERSION_MAJOR 0x0001
34#define I40E_FW_API_VERSION_MINOR 0x0001 37#define I40E_FW_API_VERSION_MINOR 0x0002
35#define I40E_FW_API_VERSION_A0_MINOR 0x0000 38#define I40E_FW_API_VERSION_A0_MINOR 0x0000
36 39
37struct i40e_aq_desc { 40struct i40e_aq_desc {
@@ -121,6 +124,7 @@ enum i40e_admin_queue_opc {
121 i40e_aqc_opc_get_version = 0x0001, 124 i40e_aqc_opc_get_version = 0x0001,
122 i40e_aqc_opc_driver_version = 0x0002, 125 i40e_aqc_opc_driver_version = 0x0002,
123 i40e_aqc_opc_queue_shutdown = 0x0003, 126 i40e_aqc_opc_queue_shutdown = 0x0003,
127 i40e_aqc_opc_set_pf_context = 0x0004,
124 128
125 /* resource ownership */ 129 /* resource ownership */
126 i40e_aqc_opc_request_resource = 0x0008, 130 i40e_aqc_opc_request_resource = 0x0008,
@@ -180,9 +184,6 @@ enum i40e_admin_queue_opc {
180 i40e_aqc_opc_add_mirror_rule = 0x0260, 184 i40e_aqc_opc_add_mirror_rule = 0x0260,
181 i40e_aqc_opc_delete_mirror_rule = 0x0261, 185 i40e_aqc_opc_delete_mirror_rule = 0x0261,
182 186
183 i40e_aqc_opc_set_storm_control_config = 0x0280,
184 i40e_aqc_opc_get_storm_control_config = 0x0281,
185
186 /* DCB commands */ 187 /* DCB commands */
187 i40e_aqc_opc_dcb_ignore_pfc = 0x0301, 188 i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
188 i40e_aqc_opc_dcb_updated = 0x0302, 189 i40e_aqc_opc_dcb_updated = 0x0302,
@@ -205,6 +206,7 @@ enum i40e_admin_queue_opc {
205 i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, 206 i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
206 i40e_aqc_opc_suspend_port_tx = 0x041B, 207 i40e_aqc_opc_suspend_port_tx = 0x041B,
207 i40e_aqc_opc_resume_port_tx = 0x041C, 208 i40e_aqc_opc_resume_port_tx = 0x041C,
209 i40e_aqc_opc_configure_partition_bw = 0x041D,
208 210
209 /* hmc */ 211 /* hmc */
210 i40e_aqc_opc_query_hmc_resource_profile = 0x0500, 212 i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -222,13 +224,15 @@ enum i40e_admin_queue_opc {
222 i40e_aqc_opc_get_partner_advt = 0x0616, 224 i40e_aqc_opc_get_partner_advt = 0x0616,
223 i40e_aqc_opc_set_lb_modes = 0x0618, 225 i40e_aqc_opc_set_lb_modes = 0x0618,
224 i40e_aqc_opc_get_phy_wol_caps = 0x0621, 226 i40e_aqc_opc_get_phy_wol_caps = 0x0621,
225 i40e_aqc_opc_set_phy_reset = 0x0622, 227 i40e_aqc_opc_set_phy_debug = 0x0622,
226 i40e_aqc_opc_upload_ext_phy_fm = 0x0625, 228 i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
227 229
228 /* NVM commands */ 230 /* NVM commands */
229 i40e_aqc_opc_nvm_read = 0x0701, 231 i40e_aqc_opc_nvm_read = 0x0701,
230 i40e_aqc_opc_nvm_erase = 0x0702, 232 i40e_aqc_opc_nvm_erase = 0x0702,
231 i40e_aqc_opc_nvm_update = 0x0703, 233 i40e_aqc_opc_nvm_update = 0x0703,
234 i40e_aqc_opc_nvm_config_read = 0x0704,
235 i40e_aqc_opc_nvm_config_write = 0x0705,
232 236
233 /* virtualization commands */ 237 /* virtualization commands */
234 i40e_aqc_opc_send_msg_to_pf = 0x0801, 238 i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -270,8 +274,6 @@ enum i40e_admin_queue_opc {
270 i40e_aqc_opc_debug_set_mode = 0xFF01, 274 i40e_aqc_opc_debug_set_mode = 0xFF01,
271 i40e_aqc_opc_debug_read_reg = 0xFF03, 275 i40e_aqc_opc_debug_read_reg = 0xFF03,
272 i40e_aqc_opc_debug_write_reg = 0xFF04, 276 i40e_aqc_opc_debug_write_reg = 0xFF04,
273 i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
274 i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
275 i40e_aqc_opc_debug_modify_reg = 0xFF07, 277 i40e_aqc_opc_debug_modify_reg = 0xFF07,
276 i40e_aqc_opc_debug_dump_internals = 0xFF08, 278 i40e_aqc_opc_debug_dump_internals = 0xFF08,
277 i40e_aqc_opc_debug_modify_internals = 0xFF09, 279 i40e_aqc_opc_debug_modify_internals = 0xFF09,
@@ -339,6 +341,14 @@ struct i40e_aqc_queue_shutdown {
339 341
340I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); 342I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
341 343
344/* Set PF context (0x0004, direct) */
345struct i40e_aqc_set_pf_context {
346 u8 pf_id;
347 u8 reserved[15];
348};
349
350I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
351
342/* Request resource ownership (direct 0x0008) 352/* Request resource ownership (direct 0x0008)
343 * Release resource ownership (direct 0x0009) 353 * Release resource ownership (direct 0x0009)
344 */ 354 */
@@ -678,7 +688,6 @@ struct i40e_aqc_add_get_update_vsi {
678#define I40E_AQ_VSI_TYPE_PF 0x2 688#define I40E_AQ_VSI_TYPE_PF 0x2
679#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 689#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
680#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 690#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
681#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
682 __le32 addr_high; 691 __le32 addr_high;
683 __le32 addr_low; 692 __le32 addr_low;
684}; 693};
@@ -1040,7 +1049,9 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
1040#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 1049#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
1041 __le16 seid; 1050 __le16 seid;
1042#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF 1051#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
1043 u8 reserved[10]; 1052 __le16 vlan_tag;
1053#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
1054 u8 reserved[8];
1044}; 1055};
1045 1056
1046I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); 1057I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1289,27 +1300,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
1289 1300
1290I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); 1301I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
1291 1302
1292/* Set Storm Control Configuration (direct 0x0280)
1293 * Get Storm Control Configuration (direct 0x0281)
1294 * the command and response use the same descriptor structure
1295 */
1296struct i40e_aqc_set_get_storm_control_config {
1297 __le32 broadcast_threshold;
1298 __le32 multicast_threshold;
1299 __le32 control_flags;
1300#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
1301#define I40E_AQC_STORM_CONTROL_MDICW 0x02
1302#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
1303#define I40E_AQC_STORM_CONTROL_BDICW 0x08
1304#define I40E_AQC_STORM_CONTROL_BIDU 0x10
1305#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
1306#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
1307 I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
1308 u8 reserved[4];
1309};
1310
1311I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
1312
1313/* DCB 0x03xx*/ 1303/* DCB 0x03xx*/
1314 1304
1315/* PFC Ignore (direct 0x0301) 1305/* PFC Ignore (direct 0x0301)
@@ -1427,11 +1417,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
1427struct i40e_aqc_configure_switching_comp_ets_data { 1417struct i40e_aqc_configure_switching_comp_ets_data {
1428 u8 reserved[4]; 1418 u8 reserved[4];
1429 u8 tc_valid_bits; 1419 u8 tc_valid_bits;
1430 u8 reserved1; 1420 u8 seepage;
1421#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
1431 u8 tc_strict_priority_flags; 1422 u8 tc_strict_priority_flags;
1432 u8 reserved2[17]; 1423 u8 reserved1[17];
1433 u8 tc_bw_share_credits[8]; 1424 u8 tc_bw_share_credits[8];
1434 u8 reserved3[96]; 1425 u8 reserved2[96];
1435}; 1426};
1436 1427
1437/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ 1428/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
@@ -1499,6 +1490,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
1499 * (direct 0x041B and 0x041C) uses the generic SEID struct 1490 * (direct 0x041B and 0x041C) uses the generic SEID struct
1500 */ 1491 */
1501 1492
1493/* Configure partition BW
1494 * (indirect 0x041D)
1495 */
1496struct i40e_aqc_configure_partition_bw_data {
1497 __le16 pf_valid_bits;
1498 u8 min_bw[16]; /* guaranteed bandwidth */
1499 u8 max_bw[16]; /* bandwidth limit */
1500};
1501
1502/* Get and set the active HMC resource profile and status. 1502/* Get and set the active HMC resource profile and status.
1503 * (direct 0x0500) and (direct 0x0501) 1503 * (direct 0x0500) and (direct 0x0501)
1504 */ 1504 */
@@ -1539,6 +1539,8 @@ enum i40e_aq_phy_type {
1539 I40E_PHY_TYPE_XLPPI = 0x9, 1539 I40E_PHY_TYPE_XLPPI = 0x9,
1540 I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, 1540 I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
1541 I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, 1541 I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
1542 I40E_PHY_TYPE_10GBASE_AOC = 0xC,
1543 I40E_PHY_TYPE_40GBASE_AOC = 0xD,
1542 I40E_PHY_TYPE_100BASE_TX = 0x11, 1544 I40E_PHY_TYPE_100BASE_TX = 0x11,
1543 I40E_PHY_TYPE_1000BASE_T = 0x12, 1545 I40E_PHY_TYPE_1000BASE_T = 0x12,
1544 I40E_PHY_TYPE_10GBASE_T = 0x13, 1546 I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1549,7 +1551,10 @@ enum i40e_aq_phy_type {
1549 I40E_PHY_TYPE_40GBASE_CR4 = 0x18, 1551 I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
1550 I40E_PHY_TYPE_40GBASE_SR4 = 0x19, 1552 I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
1551 I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, 1553 I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
1552 I40E_PHY_TYPE_20GBASE_KR2 = 0x1B, 1554 I40E_PHY_TYPE_1000BASE_SX = 0x1B,
1555 I40E_PHY_TYPE_1000BASE_LX = 0x1C,
1556 I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
1557 I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
1553 I40E_PHY_TYPE_MAX 1558 I40E_PHY_TYPE_MAX
1554}; 1559};
1555 1560
@@ -1583,11 +1588,8 @@ struct i40e_aq_get_phy_abilities_resp {
1583#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 1588#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
1584#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 1589#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
1585#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 1590#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
1586#define I40E_AQ_PHY_FLAG_AN_SHIFT 3 1591#define I40E_AQ_PHY_LINK_ENABLED 0x08
1587#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT) 1592#define I40E_AQ_PHY_AN_ENABLED 0x10
1588#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
1589#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
1590#define I40E_AQ_PHY_FLAG_AN_ON 0x02
1591#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 1593#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
1592 __le16 eee_capability; 1594 __le16 eee_capability;
1593#define I40E_AQ_EEE_100BASE_TX 0x0002 1595#define I40E_AQ_EEE_100BASE_TX 0x0002
@@ -1696,6 +1698,7 @@ struct i40e_aqc_get_link_status {
1696#define I40E_AQ_LINK_TX_ACTIVE 0x00 1698#define I40E_AQ_LINK_TX_ACTIVE 0x00
1697#define I40E_AQ_LINK_TX_DRAINED 0x01 1699#define I40E_AQ_LINK_TX_DRAINED 0x01
1698#define I40E_AQ_LINK_TX_FLUSHED 0x03 1700#define I40E_AQ_LINK_TX_FLUSHED 0x03
1701#define I40E_AQ_LINK_FORCED_40G 0x10
1699 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ 1702 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
1700 __le16 max_frame_size; 1703 __le16 max_frame_size;
1701 u8 config; 1704 u8 config;
@@ -1747,14 +1750,21 @@ struct i40e_aqc_set_lb_mode {
1747 1750
1748I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); 1751I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
1749 1752
1750/* Set PHY Reset command (0x0622) */ 1753/* Set PHY Debug command (0x0622) */
1751struct i40e_aqc_set_phy_reset { 1754struct i40e_aqc_set_phy_debug {
1752 u8 reset_flags; 1755 u8 command_flags;
1753#define I40E_AQ_PHY_RESET_REQUEST 0x02 1756#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
1757#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
1758#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
1759 I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
1760#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
1761#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
1762#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
1763#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
1754 u8 reserved[15]; 1764 u8 reserved[15];
1755}; 1765};
1756 1766
1757I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset); 1767I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
1758 1768
1759enum i40e_aq_phy_reg_type { 1769enum i40e_aq_phy_reg_type {
1760 I40E_AQC_PHY_REG_INTERNAL = 0x1, 1770 I40E_AQC_PHY_REG_INTERNAL = 0x1,
@@ -1779,6 +1789,47 @@ struct i40e_aqc_nvm_update {
1779 1789
1780I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); 1790I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
1781 1791
1792/* NVM Config Read (indirect 0x0704) */
1793struct i40e_aqc_nvm_config_read {
1794 __le16 cmd_flags;
1795#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
1796#define ANVM_READ_SINGLE_FEATURE 0
1797#define ANVM_READ_MULTIPLE_FEATURES 1
1798 __le16 element_count;
1799 __le16 element_id; /* Feature/field ID */
1800 u8 reserved[2];
1801 __le32 address_high;
1802 __le32 address_low;
1803};
1804
1805I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
1806
1807/* NVM Config Write (indirect 0x0705) */
1808struct i40e_aqc_nvm_config_write {
1809 __le16 cmd_flags;
1810 __le16 element_count;
1811 u8 reserved[4];
1812 __le32 address_high;
1813 __le32 address_low;
1814};
1815
1816I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
1817
1818struct i40e_aqc_nvm_config_data_feature {
1819 __le16 feature_id;
1820 __le16 instance_id;
1821 __le16 feature_options;
1822 __le16 feature_selection;
1823};
1824
1825struct i40e_aqc_nvm_config_data_immediate_field {
1826#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
1827 __le16 field_id;
1828 __le16 instance_id;
1829 __le16 field_options;
1830 __le16 field_value;
1831};
1832
1782/* Send to PF command (indirect 0x0801) id is only used by PF 1833/* Send to PF command (indirect 0x0801) id is only used by PF
1783 * Send to VF command (indirect 0x0802) id is only used by PF 1834 * Send to VF command (indirect 0x0802) id is only used by PF
1784 * Send to Peer PF command (indirect 0x0803) 1835 * Send to Peer PF command (indirect 0x0803)
@@ -1948,19 +1999,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
1948/* Add Udp Tunnel command and completion (direct 0x0B00) */ 1999/* Add Udp Tunnel command and completion (direct 0x0B00) */
1949struct i40e_aqc_add_udp_tunnel { 2000struct i40e_aqc_add_udp_tunnel {
1950 __le16 udp_port; 2001 __le16 udp_port;
1951 u8 header_len; /* in DWords, 1 to 15 */ 2002 u8 reserved0[3];
1952 u8 protocol_type; 2003 u8 protocol_type;
1953#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x0 2004#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
1954#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x2 2005#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
1955#define I40E_AQC_TUNNEL_TYPE_NGE 0x3 2006#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
1956 u8 variable_udp_length; 2007 u8 reserved1[10];
1957#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH 0x0
1958#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH 0x1
1959 u8 udp_key_index;
1960#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN 0x0
1961#define I40E_AQC_TUNNEL_KEY_INDEX_NGE 0x1
1962#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP 0x2
1963 u8 reserved[10];
1964}; 2008};
1965 2009
1966I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); 2010I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
index d8654fb9e525..8e6a6dd9212b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index ae084378faab..a43155afdbe2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -40,12 +43,10 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
40 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 43 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
41 switch (hw->device_id) { 44 switch (hw->device_id) {
42 case I40E_DEV_ID_SFP_XL710: 45 case I40E_DEV_ID_SFP_XL710:
43 case I40E_DEV_ID_SFP_X710:
44 case I40E_DEV_ID_QEMU: 46 case I40E_DEV_ID_QEMU:
45 case I40E_DEV_ID_KX_A: 47 case I40E_DEV_ID_KX_A:
46 case I40E_DEV_ID_KX_B: 48 case I40E_DEV_ID_KX_B:
47 case I40E_DEV_ID_KX_C: 49 case I40E_DEV_ID_KX_C:
48 case I40E_DEV_ID_KX_D:
49 case I40E_DEV_ID_QSFP_A: 50 case I40E_DEV_ID_QSFP_A:
50 case I40E_DEV_ID_QSFP_B: 51 case I40E_DEV_ID_QSFP_B:
51 case I40E_DEV_ID_QSFP_C: 52 case I40E_DEV_ID_QSFP_C:
@@ -130,7 +131,11 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
130 **/ 131 **/
131bool i40evf_check_asq_alive(struct i40e_hw *hw) 132bool i40evf_check_asq_alive(struct i40e_hw *hw)
132{ 133{
133 return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK); 134 if (hw->aq.asq.len)
135 return !!(rd32(hw, hw->aq.asq.len) &
136 I40E_PF_ATQLEN_ATQENABLE_MASK);
137 else
138 return false;
134} 139}
135 140
136/** 141/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index cb97b3eed440..a2ad9a4e399d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -160,11 +163,6 @@ struct i40e_hmc_info {
160 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ 163 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
161 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) 164 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
162 165
163#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
164 wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
165 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
166 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
167
168/** 166/**
169 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit 167 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
170 * @hmc_info: pointer to the HMC configuration information structure 168 * @hmc_info: pointer to the HMC configuration information structure
@@ -223,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
223 u32 pd_index); 221 u32 pd_index);
224i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, 222i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
225 struct i40e_hmc_info *hmc_info, 223 struct i40e_hmc_info *hmc_info,
226 u32 idx, bool is_pf); 224 u32 idx);
227i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, 225i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
228 u32 idx); 226 u32 idx);
229i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, 227i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
index 17e42ca26d0b..d6f762241537 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -53,6 +56,7 @@ struct i40e_hmc_obj_rxq {
53 u8 tphdata_ena; 56 u8 tphdata_ena;
54 u8 tphhead_ena; 57 u8 tphhead_ena;
55 u8 lrxqthresh; 58 u8 lrxqthresh;
59 u8 prefena; /* NOTE: normally must be set to 1 at init */
56}; 60};
57 61
58/* Tx queue context data */ 62/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
index 622f373b745d..21a91b14bf81 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 97ab8c2b76f8..849edcc2e398 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index 30af953cf106..369839655818 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -1337,8 +1340,6 @@
1337#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT) 1340#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
1338#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 1341#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
1339#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT) 1342#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
1340#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
1341#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
1342#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 1343#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
1343#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) 1344#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
1344#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 1345#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
@@ -1364,8 +1365,6 @@
1364#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT) 1365#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
1365#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 1366#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
1366#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) 1367#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
1367#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
1368#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
1369#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 1368#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
1370#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) 1369#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
1371#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 1370#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
@@ -1586,6 +1585,14 @@
1586#define I40E_GLLAN_TSOMSK_M 0x000442DC 1585#define I40E_GLLAN_TSOMSK_M 0x000442DC
1587#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 1586#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
1588#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) 1587#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
1588#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
1589#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
1590#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
1591#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
1592#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
1593#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
1594#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
1595
1589#define I40E_PFLAN_QALLOC 0x001C0400 1596#define I40E_PFLAN_QALLOC 0x001C0400
1590#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 1597#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
1591#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) 1598#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
index 7c08cc2e339b..7fa7a41915c1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index b9f50f40abe1..48ebb6cd69f2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -725,10 +728,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
725 u32 rx_error, 728 u32 rx_error,
726 u16 rx_ptype) 729 u16 rx_ptype)
727{ 730{
731 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
732 bool ipv4 = false, ipv6 = false;
728 bool ipv4_tunnel, ipv6_tunnel; 733 bool ipv4_tunnel, ipv6_tunnel;
729 __wsum rx_udp_csum; 734 __wsum rx_udp_csum;
730 __sum16 csum;
731 struct iphdr *iph; 735 struct iphdr *iph;
736 __sum16 csum;
732 737
733 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && 738 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
734 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); 739 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -739,29 +744,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
739 skb->ip_summed = CHECKSUM_NONE; 744 skb->ip_summed = CHECKSUM_NONE;
740 745
741 /* Rx csum enabled and ip headers found? */ 746 /* Rx csum enabled and ip headers found? */
742 if (!(vsi->netdev->features & NETIF_F_RXCSUM && 747 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
743 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
744 return; 748 return;
745 749
750 /* did the hardware decode the packet and checksum? */
751 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
752 return;
753
754 /* both known and outer_ip must be set for the below code to work */
755 if (!(decoded.known && decoded.outer_ip))
756 return;
757
758 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
759 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
760 ipv4 = true;
761 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
762 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
763 ipv6 = true;
764
765 if (ipv4 &&
766 (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
767 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
768 goto checksum_fail;
769
746 /* likely incorrect csum if alternate IP extension headers found */ 770 /* likely incorrect csum if alternate IP extension headers found */
747 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 771 if (ipv6 &&
772 decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
773 rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
774 rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
775 /* don't increment checksum err here, non-fatal err */
748 return; 776 return;
749 777
750 /* IP or L4 or outmost IP checksum error */ 778 /* there was some L4 error, count error and punt packet to the stack */
751 if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | 779 if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
752 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) | 780 goto checksum_fail;
753 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) { 781
754 vsi->back->hw_csum_rx_error++; 782 /* handle packets that were not able to be checksummed due
783 * to arrival speed, in this case the stack can compute
784 * the csum.
785 */
786 if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
755 return; 787 return;
756 }
757 788
789 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
790 * it in the driver, hardware does not do it for us.
791 * Since L3L4P bit was set we assume a valid IHL value (>=5)
792 * so the total length of IPv4 header is IHL*4 bytes
793 * The UDP_0 bit *may* bet set if the *inner* header is UDP
794 */
758 if (ipv4_tunnel && 795 if (ipv4_tunnel &&
796 (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
759 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { 797 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
760 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
761 * it in the driver, hardware does not do it for us.
762 * Since L3L4P bit was set we assume a valid IHL value (>=5)
763 * so the total length of IPv4 header is IHL*4 bytes
764 */
765 skb->transport_header = skb->mac_header + 798 skb->transport_header = skb->mac_header +
766 sizeof(struct ethhdr) + 799 sizeof(struct ethhdr) +
767 (ip_hdr(skb)->ihl * 4); 800 (ip_hdr(skb)->ihl * 4);
@@ -778,13 +811,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
778 (skb->len - skb_transport_offset(skb)), 811 (skb->len - skb_transport_offset(skb)),
779 IPPROTO_UDP, rx_udp_csum); 812 IPPROTO_UDP, rx_udp_csum);
780 813
781 if (udp_hdr(skb)->check != csum) { 814 if (udp_hdr(skb)->check != csum)
782 vsi->back->hw_csum_rx_error++; 815 goto checksum_fail;
783 return;
784 }
785 } 816 }
786 817
787 skb->ip_summed = CHECKSUM_UNNECESSARY; 818 skb->ip_summed = CHECKSUM_UNNECESSARY;
819
820 return;
821
822checksum_fail:
823 vsi->back->hw_csum_rx_error++;
788} 824}
789 825
790/** 826/**
@@ -953,6 +989,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
953 /* ERR_MASK will only have valid bits if EOP set */ 989 /* ERR_MASK will only have valid bits if EOP set */
954 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { 990 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
955 dev_kfree_skb_any(skb); 991 dev_kfree_skb_any(skb);
992 /* TODO: shouldn't we increment a counter indicating the
993 * drop?
994 */
956 goto next_desc; 995 goto next_desc;
957 } 996 }
958 997
@@ -1508,9 +1547,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
1508static int i40e_xmit_descriptor_count(struct sk_buff *skb, 1547static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1509 struct i40e_ring *tx_ring) 1548 struct i40e_ring *tx_ring)
1510{ 1549{
1511#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
1512 unsigned int f; 1550 unsigned int f;
1513#endif
1514 int count = 0; 1551 int count = 0;
1515 1552
1516 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 1553 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
@@ -1519,12 +1556,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1519 * + 1 desc for context descriptor, 1556 * + 1 desc for context descriptor,
1520 * otherwise try next time 1557 * otherwise try next time
1521 */ 1558 */
1522#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
1523 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1559 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1524 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 1560 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
1525#else 1561
1526 count += skb_shinfo(skb)->nr_frags;
1527#endif
1528 count += TXD_USE_COUNT(skb_headlen(skb)); 1562 count += TXD_USE_COUNT(skb_headlen(skb));
1529 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { 1563 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
1530 tx_ring->tx_stats.tx_busy++; 1564 tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 10bf49e18d7f..30d248bc5d19 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -24,7 +27,7 @@
24#ifndef _I40E_TXRX_H_ 27#ifndef _I40E_TXRX_H_
25#define _I40E_TXRX_H_ 28#define _I40E_TXRX_H_
26 29
27/* Interrupt Throttling and Rate Limiting (storm control) Goodies */ 30/* Interrupt Throttling and Rate Limiting Goodies */
28 31
29#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ 32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
30#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ 33#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
@@ -66,16 +69,11 @@ enum i40e_dyn_idx_t {
66 69
67/* Supported RSS offloads */ 70/* Supported RSS offloads */
68#define I40E_DEFAULT_RSS_HENA ( \ 71#define I40E_DEFAULT_RSS_HENA ( \
69 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
70 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
71 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 72 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
72 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 73 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
73 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
74 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 74 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
75 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 75 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
76 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ 76 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
77 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
78 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
79 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 77 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
80 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \ 78 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
81 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 79 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
@@ -119,11 +117,11 @@ enum i40e_dyn_idx_t {
119#define i40e_rx_desc i40e_32byte_rx_desc 117#define i40e_rx_desc i40e_32byte_rx_desc
120 118
121#define I40E_MIN_TX_LEN 17 119#define I40E_MIN_TX_LEN 17
122#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */ 120#define I40E_MAX_DATA_PER_TXD 8192
123 121
124/* Tx Descriptors needed, worst case */ 122/* Tx Descriptors needed, worst case */
125#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) 123#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
126#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) 124#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
127 125
128#define I40E_TX_FLAGS_CSUM (u32)(1) 126#define I40E_TX_FLAGS_CSUM (u32)(1)
129#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) 127#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
@@ -180,7 +178,6 @@ enum i40e_ring_state_t {
180 __I40E_TX_DETECT_HANG, 178 __I40E_TX_DETECT_HANG,
181 __I40E_HANG_CHECK_ARMED, 179 __I40E_HANG_CHECK_ARMED,
182 __I40E_RX_PS_ENABLED, 180 __I40E_RX_PS_ENABLED,
183 __I40E_RX_LRO_ENABLED,
184 __I40E_RX_16BYTE_DESC_ENABLED, 181 __I40E_RX_16BYTE_DESC_ENABLED,
185}; 182};
186 183
@@ -196,12 +193,6 @@ enum i40e_ring_state_t {
196 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) 193 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
197#define clear_check_for_tx_hang(ring) \ 194#define clear_check_for_tx_hang(ring) \
198 clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) 195 clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
199#define ring_is_lro_enabled(ring) \
200 test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
201#define set_ring_lro_enabled(ring) \
202 set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
203#define clear_ring_lro_enabled(ring) \
204 clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
205#define ring_is_16byte_desc_enabled(ring) \ 196#define ring_is_16byte_desc_enabled(ring) \
206 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) 197 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
207#define set_ring_16byte_desc_enabled(ring) \ 198#define set_ring_16byte_desc_enabled(ring) \
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 4673b3381edd..d3cf5a69de54 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -32,13 +35,11 @@
32#include "i40e_lan_hmc.h" 35#include "i40e_lan_hmc.h"
33 36
34/* Device IDs */ 37/* Device IDs */
35#define I40E_DEV_ID_SFP_XL710 0x1572 38#define I40E_DEV_ID_SFP_XL710 0x1572
36#define I40E_DEV_ID_SFP_X710 0x1573
37#define I40E_DEV_ID_QEMU 0x1574 39#define I40E_DEV_ID_QEMU 0x1574
38#define I40E_DEV_ID_KX_A 0x157F 40#define I40E_DEV_ID_KX_A 0x157F
39#define I40E_DEV_ID_KX_B 0x1580 41#define I40E_DEV_ID_KX_B 0x1580
40#define I40E_DEV_ID_KX_C 0x1581 42#define I40E_DEV_ID_KX_C 0x1581
41#define I40E_DEV_ID_KX_D 0x1582
42#define I40E_DEV_ID_QSFP_A 0x1583 43#define I40E_DEV_ID_QSFP_A 0x1583
43#define I40E_DEV_ID_QSFP_B 0x1584 44#define I40E_DEV_ID_QSFP_B 0x1584
44#define I40E_DEV_ID_QSFP_C 0x1585 45#define I40E_DEV_ID_QSFP_C 0x1585
@@ -57,8 +58,8 @@
57/* Max default timeout in ms, */ 58/* Max default timeout in ms, */
58#define I40E_MAX_NVM_TIMEOUT 18000 59#define I40E_MAX_NVM_TIMEOUT 18000
59 60
60/* Switch from mc to the 2usec global time (this is the GTIME resolution) */ 61/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
61#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2) 62#define I40E_MS_TO_GTIME(time) ((time) * 1000)
62 63
63/* forward declaration */ 64/* forward declaration */
64struct i40e_hw; 65struct i40e_hw;
@@ -101,15 +102,6 @@ enum i40e_debug_mask {
101 I40E_DEBUG_ALL = 0xFFFFFFFF 102 I40E_DEBUG_ALL = 0xFFFFFFFF
102}; 103};
103 104
104/* PCI Bus Info */
105#define I40E_PCI_LINK_WIDTH_1 0x10
106#define I40E_PCI_LINK_WIDTH_2 0x20
107#define I40E_PCI_LINK_WIDTH_4 0x40
108#define I40E_PCI_LINK_WIDTH_8 0x80
109#define I40E_PCI_LINK_SPEED_2500 0x1
110#define I40E_PCI_LINK_SPEED_5000 0x2
111#define I40E_PCI_LINK_SPEED_8000 0x3
112
113/* These are structs for managing the hardware information and the operations. 105/* These are structs for managing the hardware information and the operations.
114 * The structures of function pointers are filled out at init time when we 106 * The structures of function pointers are filled out at init time when we
115 * know for sure exactly which hardware we're working with. This gives us the 107 * know for sure exactly which hardware we're working with. This gives us the
@@ -173,6 +165,9 @@ struct i40e_link_status {
173 u8 loopback; 165 u8 loopback;
174 /* is Link Status Event notification to SW enabled */ 166 /* is Link Status Event notification to SW enabled */
175 bool lse_enable; 167 bool lse_enable;
168 u16 max_frame_size;
169 bool crc_enable;
170 u8 pacing;
176}; 171};
177 172
178struct i40e_phy_info { 173struct i40e_phy_info {
@@ -415,6 +410,7 @@ struct i40e_driver_version {
415 u8 minor_version; 410 u8 minor_version;
416 u8 build_version; 411 u8 build_version;
417 u8 subbuild_version; 412 u8 subbuild_version;
413 u8 driver_string[32];
418}; 414};
419 415
420/* RX Descriptors */ 416/* RX Descriptors */
@@ -494,9 +490,6 @@ union i40e_32byte_rx_desc {
494 } wb; /* writeback */ 490 } wb; /* writeback */
495}; 491};
496 492
497#define I40E_RXD_QW1_STATUS_SHIFT 0
498#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
499
500enum i40e_rx_desc_status_bits { 493enum i40e_rx_desc_status_bits {
501 /* Note: These are predefined bit offsets */ 494 /* Note: These are predefined bit offsets */
502 I40E_RX_DESC_STATUS_DD_SHIFT = 0, 495 I40E_RX_DESC_STATUS_DD_SHIFT = 0,
@@ -513,9 +506,14 @@ enum i40e_rx_desc_status_bits {
513 I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, 506 I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
514 I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, 507 I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
515 I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ 508 I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
516 I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18 509 I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
510 I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
517}; 511};
518 512
513#define I40E_RXD_QW1_STATUS_SHIFT 0
514#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
515 << I40E_RXD_QW1_STATUS_SHIFT)
516
519#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT 517#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
520#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ 518#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
521 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) 519 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
@@ -543,7 +541,8 @@ enum i40e_rx_desc_error_bits {
543 I40E_RX_DESC_ERROR_IPE_SHIFT = 3, 541 I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
544 I40E_RX_DESC_ERROR_L4E_SHIFT = 4, 542 I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
545 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, 543 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
546 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6 544 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
545 I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
547}; 546};
548 547
549enum i40e_rx_desc_error_l3l4e_fcoe_masks { 548enum i40e_rx_desc_error_l3l4e_fcoe_masks {
@@ -664,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
664 I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, 663 I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
665 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ 664 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
666 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ 665 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
667 I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
668 I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, 666 I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
669 I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, 667 I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
670 I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, 668 I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
@@ -868,18 +866,14 @@ struct i40e_filter_program_desc {
868 866
869/* Packet Classifier Types for filters */ 867/* Packet Classifier Types for filters */
870enum i40e_filter_pctype { 868enum i40e_filter_pctype {
871 /* Note: Values 0-28 are reserved for future use */ 869 /* Note: Values 0-30 are reserved for future use */
872 I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
873 I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
874 I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, 870 I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
875 I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32, 871 /* Note: Value 32 is reserved for future use */
876 I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, 872 I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
877 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, 873 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
878 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, 874 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
879 I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, 875 I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
880 /* Note: Values 37-38 are reserved for future use */ 876 /* Note: Values 37-40 are reserved for future use */
881 I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
882 I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
883 I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, 877 I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
884 I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42, 878 I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
885 I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, 879 I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
@@ -961,6 +955,16 @@ struct i40e_vsi_context {
961 struct i40e_aqc_vsi_properties_data info; 955 struct i40e_aqc_vsi_properties_data info;
962}; 956};
963 957
958struct i40e_veb_context {
959 u16 seid;
960 u16 uplink_seid;
961 u16 veb_number;
962 u16 vebs_allocated;
963 u16 vebs_unallocated;
964 u16 flags;
965 struct i40e_aqc_get_veb_parameters_completion info;
966};
967
964/* Statistics collected by each port, VSI, VEB, and S-channel */ 968/* Statistics collected by each port, VSI, VEB, and S-channel */
965struct i40e_eth_stats { 969struct i40e_eth_stats {
966 u64 rx_bytes; /* gorc */ 970 u64 rx_bytes; /* gorc */
@@ -968,8 +972,6 @@ struct i40e_eth_stats {
968 u64 rx_multicast; /* mprc */ 972 u64 rx_multicast; /* mprc */
969 u64 rx_broadcast; /* bprc */ 973 u64 rx_broadcast; /* bprc */
970 u64 rx_discards; /* rdpc */ 974 u64 rx_discards; /* rdpc */
971 u64 rx_errors; /* repc */
972 u64 rx_missed; /* rmpc */
973 u64 rx_unknown_protocol; /* rupp */ 975 u64 rx_unknown_protocol; /* rupp */
974 u64 tx_bytes; /* gotc */ 976 u64 tx_bytes; /* gotc */
975 u64 tx_unicast; /* uptc */ 977 u64 tx_unicast; /* uptc */
@@ -1021,9 +1023,12 @@ struct i40e_hw_port_stats {
1021 u64 tx_size_big; /* ptc9522 */ 1023 u64 tx_size_big; /* ptc9522 */
1022 u64 mac_short_packet_dropped; /* mspdc */ 1024 u64 mac_short_packet_dropped; /* mspdc */
1023 u64 checksum_error; /* xec */ 1025 u64 checksum_error; /* xec */
1026 /* flow director stats */
1027 u64 fd_atr_match;
1028 u64 fd_sb_match;
1024 /* EEE LPI */ 1029 /* EEE LPI */
1025 bool tx_lpi_status; 1030 u32 tx_lpi_status;
1026 bool rx_lpi_status; 1031 u32 rx_lpi_status;
1027 u64 tx_lpi_count; /* etlpic */ 1032 u64 tx_lpi_count; /* etlpic */
1028 u64 rx_lpi_count; /* erlpic */ 1033 u64 rx_lpi_count; /* erlpic */
1029}; 1034};
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index ccf45d04b7ef..cd18d5689006 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -338,10 +341,6 @@ struct i40e_virtchnl_pf_event {
338 int severity; 341 int severity;
339}; 342};
340 343
341/* The following are TBD, not necessary for LAN functionality.
342 * I40E_VIRTCHNL_OP_FCOE
343 */
344
345/* VF reset states - these are written into the RSTAT register: 344/* VF reset states - these are written into the RSTAT register:
346 * I40E_VFGEN_RSTAT1 on the PF 345 * I40E_VFGEN_RSTAT1 on the PF
347 * I40E_VFGEN_RSTAT on the VF 346 * I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 807807d62387..30ef519d4b91 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -77,7 +80,7 @@ struct i40e_vsi {
77#define I40EVF_MIN_TXD 64 80#define I40EVF_MIN_TXD 64
78#define I40EVF_MAX_RXD 4096 81#define I40EVF_MAX_RXD 4096
79#define I40EVF_MIN_RXD 64 82#define I40EVF_MIN_RXD 64
80#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 8 83#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
81 84
82/* Supported Rx Buffer Sizes */ 85/* Supported Rx Buffer Sizes */
83#define I40EVF_RXBUFFER_64 64 /* Used for packet split */ 86#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
@@ -193,10 +196,12 @@ struct i40evf_adapter {
193 struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; 196 struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
194 u32 tx_timeout_count; 197 u32 tx_timeout_count;
195 struct list_head mac_filter_list; 198 struct list_head mac_filter_list;
199 u32 tx_desc_count;
196 200
197 /* RX */ 201 /* RX */
198 struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; 202 struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
199 u64 hw_csum_rx_error; 203 u64 hw_csum_rx_error;
204 u32 rx_desc_count;
200 int num_msix_vectors; 205 int num_msix_vectors;
201 struct msix_entry *msix_entries; 206 struct msix_entry *msix_entries;
202 207
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 8b0db1ce179c..60407a9df0c1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -44,8 +47,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
44 I40EVF_STAT("rx_multicast", current_stats.rx_multicast), 47 I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
45 I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast), 48 I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
46 I40EVF_STAT("rx_discards", current_stats.rx_discards), 49 I40EVF_STAT("rx_discards", current_stats.rx_discards),
47 I40EVF_STAT("rx_errors", current_stats.rx_errors),
48 I40EVF_STAT("rx_missed", current_stats.rx_missed),
49 I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), 50 I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
50 I40EVF_STAT("tx_bytes", current_stats.tx_bytes), 51 I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
51 I40EVF_STAT("tx_unicast", current_stats.tx_unicast), 52 I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
@@ -56,10 +57,12 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
56}; 57};
57 58
58#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) 59#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
59#define I40EVF_QUEUE_STATS_LEN \ 60#define I40EVF_QUEUE_STATS_LEN(_dev) \
60 (((struct i40evf_adapter *) \ 61 (((struct i40evf_adapter *) \
61 netdev_priv(netdev))->vsi_res->num_queue_pairs * 4) 62 netdev_priv(_dev))->vsi_res->num_queue_pairs \
62#define I40EVF_STATS_LEN (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN) 63 * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
64#define I40EVF_STATS_LEN(_dev) \
65 (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
63 66
64/** 67/**
65 * i40evf_get_settings - Get Link Speed and Duplex settings 68 * i40evf_get_settings - Get Link Speed and Duplex settings
@@ -75,7 +78,7 @@ static int i40evf_get_settings(struct net_device *netdev,
75 /* In the future the VF will be able to query the PF for 78 /* In the future the VF will be able to query the PF for
76 * some information - for now use a dummy value 79 * some information - for now use a dummy value
77 */ 80 */
78 ecmd->supported = SUPPORTED_10000baseT_Full; 81 ecmd->supported = 0;
79 ecmd->autoneg = AUTONEG_DISABLE; 82 ecmd->autoneg = AUTONEG_DISABLE;
80 ecmd->transceiver = XCVR_DUMMY1; 83 ecmd->transceiver = XCVR_DUMMY1;
81 ecmd->port = PORT_NONE; 84 ecmd->port = PORT_NONE;
@@ -94,9 +97,9 @@ static int i40evf_get_settings(struct net_device *netdev,
94static int i40evf_get_sset_count(struct net_device *netdev, int sset) 97static int i40evf_get_sset_count(struct net_device *netdev, int sset)
95{ 98{
96 if (sset == ETH_SS_STATS) 99 if (sset == ETH_SS_STATS)
97 return I40EVF_STATS_LEN; 100 return I40EVF_STATS_LEN(netdev);
98 else 101 else
99 return -ENOTSUPP; 102 return -EINVAL;
100} 103}
101 104
102/** 105/**
@@ -219,13 +222,11 @@ static void i40evf_get_ringparam(struct net_device *netdev,
219 struct ethtool_ringparam *ring) 222 struct ethtool_ringparam *ring)
220{ 223{
221 struct i40evf_adapter *adapter = netdev_priv(netdev); 224 struct i40evf_adapter *adapter = netdev_priv(netdev);
222 struct i40e_ring *tx_ring = adapter->tx_rings[0];
223 struct i40e_ring *rx_ring = adapter->rx_rings[0];
224 225
225 ring->rx_max_pending = I40EVF_MAX_RXD; 226 ring->rx_max_pending = I40EVF_MAX_RXD;
226 ring->tx_max_pending = I40EVF_MAX_TXD; 227 ring->tx_max_pending = I40EVF_MAX_TXD;
227 ring->rx_pending = rx_ring->count; 228 ring->rx_pending = adapter->rx_desc_count;
228 ring->tx_pending = tx_ring->count; 229 ring->tx_pending = adapter->tx_desc_count;
229} 230}
230 231
231/** 232/**
@@ -241,7 +242,6 @@ static int i40evf_set_ringparam(struct net_device *netdev,
241{ 242{
242 struct i40evf_adapter *adapter = netdev_priv(netdev); 243 struct i40evf_adapter *adapter = netdev_priv(netdev);
243 u32 new_rx_count, new_tx_count; 244 u32 new_rx_count, new_tx_count;
244 int i;
245 245
246 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 246 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
247 return -EINVAL; 247 return -EINVAL;
@@ -257,17 +257,16 @@ static int i40evf_set_ringparam(struct net_device *netdev,
257 new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); 257 new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
258 258
259 /* if nothing to do return success */ 259 /* if nothing to do return success */
260 if ((new_tx_count == adapter->tx_rings[0]->count) && 260 if ((new_tx_count == adapter->tx_desc_count) &&
261 (new_rx_count == adapter->rx_rings[0]->count)) 261 (new_rx_count == adapter->rx_desc_count))
262 return 0; 262 return 0;
263 263
264 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 264 adapter->tx_desc_count = new_tx_count;
265 adapter->tx_rings[0]->count = new_tx_count; 265 adapter->rx_desc_count = new_rx_count;
266 adapter->rx_rings[0]->count = new_rx_count;
267 }
268 266
269 if (netif_running(netdev)) 267 if (netif_running(netdev))
270 i40evf_reinit_locked(adapter); 268 i40evf_reinit_locked(adapter);
269
271 return 0; 270 return 0;
272} 271}
273 272
@@ -290,14 +289,13 @@ static int i40evf_get_coalesce(struct net_device *netdev,
290 ec->rx_max_coalesced_frames = vsi->work_limit; 289 ec->rx_max_coalesced_frames = vsi->work_limit;
291 290
292 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) 291 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
293 ec->rx_coalesce_usecs = 1; 292 ec->use_adaptive_rx_coalesce = 1;
294 else
295 ec->rx_coalesce_usecs = vsi->rx_itr_setting;
296 293
297 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) 294 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
298 ec->tx_coalesce_usecs = 1; 295 ec->use_adaptive_tx_coalesce = 1;
299 else 296
300 ec->tx_coalesce_usecs = vsi->tx_itr_setting; 297 ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
298 ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
301 299
302 return 0; 300 return 0;
303} 301}
@@ -318,54 +316,361 @@ static int i40evf_set_coalesce(struct net_device *netdev,
318 struct i40e_q_vector *q_vector; 316 struct i40e_q_vector *q_vector;
319 int i; 317 int i;
320 318
321 if (ec->tx_max_coalesced_frames || ec->rx_max_coalesced_frames) 319 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
322 vsi->work_limit = ec->tx_max_coalesced_frames; 320 vsi->work_limit = ec->tx_max_coalesced_frames_irq;
321
322 if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
323 (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
324 vsi->rx_itr_setting = ec->rx_coalesce_usecs;
325
326 else
327 return -EINVAL;
328
329 if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
330 (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
331 vsi->tx_itr_setting = ec->tx_coalesce_usecs;
332 else if (ec->use_adaptive_tx_coalesce)
333 vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
334 ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
335 else
336 return -EINVAL;
337
338 if (ec->use_adaptive_rx_coalesce)
339 vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
340 else
341 vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
342
343 if (ec->use_adaptive_tx_coalesce)
344 vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
345 else
346 vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
323 347
324 switch (ec->rx_coalesce_usecs) { 348 for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
325 case 0: 349 q_vector = adapter->q_vector[i];
326 vsi->rx_itr_setting = 0; 350 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
351 wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
352 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
353 wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
354 i40e_flush(hw);
355 }
356
357 return 0;
358}
359
360/**
361 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
362 * @adapter: board private structure
363 * @cmd: ethtool rxnfc command
364 *
365 * Returns Success if the flow is supported, else Invalid Input.
366 **/
367static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
368 struct ethtool_rxnfc *cmd)
369{
370 struct i40e_hw *hw = &adapter->hw;
371 u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
372 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
373
374 /* We always hash on IP src and dest addresses */
375 cmd->data = RXH_IP_SRC | RXH_IP_DST;
376
377 switch (cmd->flow_type) {
378 case TCP_V4_FLOW:
379 if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
380 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
327 break; 381 break;
328 case 1: 382 case UDP_V4_FLOW:
329 vsi->rx_itr_setting = (I40E_ITR_DYNAMIC 383 if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
330 | ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); 384 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
331 break; 385 break;
332 default: 386
333 if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) || 387 case SCTP_V4_FLOW:
334 (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) 388 case AH_ESP_V4_FLOW:
335 return -EINVAL; 389 case AH_V4_FLOW:
336 vsi->rx_itr_setting = ec->rx_coalesce_usecs; 390 case ESP_V4_FLOW:
391 case IPV4_FLOW:
392 break;
393
394 case TCP_V6_FLOW:
395 if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
396 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
337 break; 397 break;
398 case UDP_V6_FLOW:
399 if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
400 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
401 break;
402
403 case SCTP_V6_FLOW:
404 case AH_ESP_V6_FLOW:
405 case AH_V6_FLOW:
406 case ESP_V6_FLOW:
407 case IPV6_FLOW:
408 break;
409 default:
410 cmd->data = 0;
411 return -EINVAL;
338 } 412 }
339 413
340 switch (ec->tx_coalesce_usecs) { 414 return 0;
341 case 0: 415}
342 vsi->tx_itr_setting = 0; 416
417/**
418 * i40evf_get_rxnfc - command to get RX flow classification rules
419 * @netdev: network interface device structure
420 * @cmd: ethtool rxnfc command
421 *
422 * Returns Success if the command is supported.
423 **/
424static int i40evf_get_rxnfc(struct net_device *netdev,
425 struct ethtool_rxnfc *cmd,
426 u32 *rule_locs)
427{
428 struct i40evf_adapter *adapter = netdev_priv(netdev);
429 int ret = -EOPNOTSUPP;
430
431 switch (cmd->cmd) {
432 case ETHTOOL_GRXRINGS:
433 cmd->data = adapter->vsi_res->num_queue_pairs;
434 ret = 0;
343 break; 435 break;
344 case 1: 436 case ETHTOOL_GRXFH:
345 vsi->tx_itr_setting = (I40E_ITR_DYNAMIC 437 ret = i40evf_get_rss_hash_opts(adapter, cmd);
346 | ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
347 break; 438 break;
348 default: 439 default:
349 if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) || 440 break;
350 (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) 441 }
442
443 return ret;
444}
445
446/**
447 * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
448 * @adapter: board private structure
449 * @cmd: ethtool rxnfc command
450 *
451 * Returns Success if the flow input set is supported.
452 **/
453static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
454 struct ethtool_rxnfc *nfc)
455{
456 struct i40e_hw *hw = &adapter->hw;
457
458 u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
459 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
460
461 /* RSS does not support anything other than hashing
462 * to queues on src and dst IPs and ports
463 */
464 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
465 RXH_L4_B_0_1 | RXH_L4_B_2_3))
466 return -EINVAL;
467
468 /* We need at least the IP SRC and DEST fields for hashing */
469 if (!(nfc->data & RXH_IP_SRC) ||
470 !(nfc->data & RXH_IP_DST))
471 return -EINVAL;
472
473 switch (nfc->flow_type) {
474 case TCP_V4_FLOW:
475 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
476 case 0:
477 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
478 break;
479 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
480 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
481 break;
482 default:
351 return -EINVAL; 483 return -EINVAL;
352 vsi->tx_itr_setting = ec->tx_coalesce_usecs; 484 }
485 break;
486 case TCP_V6_FLOW:
487 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
488 case 0:
489 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
490 break;
491 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
492 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
493 break;
494 default:
495 return -EINVAL;
496 }
497 break;
498 case UDP_V4_FLOW:
499 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
500 case 0:
501 hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
502 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
503 break;
504 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
505 hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
506 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
507 break;
508 default:
509 return -EINVAL;
510 }
353 break; 511 break;
512 case UDP_V6_FLOW:
513 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
514 case 0:
515 hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
516 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
517 break;
518 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
519 hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
520 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
521 break;
522 default:
523 return -EINVAL;
524 }
525 break;
526 case AH_ESP_V4_FLOW:
527 case AH_V4_FLOW:
528 case ESP_V4_FLOW:
529 case SCTP_V4_FLOW:
530 if ((nfc->data & RXH_L4_B_0_1) ||
531 (nfc->data & RXH_L4_B_2_3))
532 return -EINVAL;
533 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
534 break;
535 case AH_ESP_V6_FLOW:
536 case AH_V6_FLOW:
537 case ESP_V6_FLOW:
538 case SCTP_V6_FLOW:
539 if ((nfc->data & RXH_L4_B_0_1) ||
540 (nfc->data & RXH_L4_B_2_3))
541 return -EINVAL;
542 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
543 break;
544 case IPV4_FLOW:
545 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
546 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
547 break;
548 case IPV6_FLOW:
549 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
550 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
551 break;
552 default:
553 return -EINVAL;
354 } 554 }
355 555
356 for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) { 556 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
357 q_vector = adapter->q_vector[i]; 557 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
358 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 558 i40e_flush(hw);
359 wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr); 559
360 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 560 return 0;
361 wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr); 561}
362 i40e_flush(hw); 562
563/**
564 * i40evf_set_rxnfc - command to set RX flow classification rules
565 * @netdev: network interface device structure
566 * @cmd: ethtool rxnfc command
567 *
568 * Returns Success if the command is supported.
569 **/
570static int i40evf_set_rxnfc(struct net_device *netdev,
571 struct ethtool_rxnfc *cmd)
572{
573 struct i40evf_adapter *adapter = netdev_priv(netdev);
574 int ret = -EOPNOTSUPP;
575
576 switch (cmd->cmd) {
577 case ETHTOOL_SRXFH:
578 ret = i40evf_set_rss_hash_opt(adapter, cmd);
579 break;
580 default:
581 break;
582 }
583
584 return ret;
585}
586
587/**
588 * i40evf_get_channels: get the number of channels supported by the device
589 * @netdev: network interface device structure
590 * @ch: channel information structure
591 *
592 * For the purposes of our device, we only use combined channels, i.e. a tx/rx
593 * queue pair. Report one extra channel to match our "other" MSI-X vector.
594 **/
595static void i40evf_get_channels(struct net_device *netdev,
596 struct ethtool_channels *ch)
597{
598 struct i40evf_adapter *adapter = netdev_priv(netdev);
599
600 /* Report maximum channels */
601 ch->max_combined = adapter->vsi_res->num_queue_pairs;
602
603 ch->max_other = NONQ_VECS;
604 ch->other_count = NONQ_VECS;
605
606 ch->combined_count = adapter->vsi_res->num_queue_pairs;
607}
608
609/**
610 * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
611 * @netdev: network interface device structure
612 *
613 * Returns the table size.
614 **/
615static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
616{
617 return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
618}
619
620/**
621 * i40evf_get_rxfh - get the rx flow hash indirection table
622 * @netdev: network interface device structure
623 * @indir: indirection table
624 * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
625 *
626 * Reads the indirection table directly from the hardware. Always returns 0.
627 **/
628static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
629{
630 struct i40evf_adapter *adapter = netdev_priv(netdev);
631 struct i40e_hw *hw = &adapter->hw;
632 u32 hlut_val;
633 int i, j;
634
635 for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++) {
636 hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
637 indir[j++] = hlut_val & 0xff;
638 indir[j++] = (hlut_val >> 8) & 0xff;
639 indir[j++] = (hlut_val >> 16) & 0xff;
640 indir[j++] = (hlut_val >> 24) & 0xff;
641 }
642 return 0;
643}
644
645/**
646 * i40evf_set_rxfh - set the rx flow hash indirection table
647 * @netdev: network interface device structure
648 * @indir: indirection table
649 * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
650 *
651 * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
652 * returns 0 after programming the table.
653 **/
654static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
655 const u8 *key)
656{
657 struct i40evf_adapter *adapter = netdev_priv(netdev);
658 struct i40e_hw *hw = &adapter->hw;
659 u32 hlut_val;
660 int i, j;
661
662 for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX + 1; i++) {
663 hlut_val = indir[j++];
664 hlut_val |= indir[j++] << 8;
665 hlut_val |= indir[j++] << 16;
666 hlut_val |= indir[j++] << 24;
667 wr32(hw, I40E_VFQF_HLUT(i), hlut_val);
363 } 668 }
364 669
365 return 0; 670 return 0;
366} 671}
367 672
368static struct ethtool_ops i40evf_ethtool_ops = { 673static const struct ethtool_ops i40evf_ethtool_ops = {
369 .get_settings = i40evf_get_settings, 674 .get_settings = i40evf_get_settings,
370 .get_drvinfo = i40evf_get_drvinfo, 675 .get_drvinfo = i40evf_get_drvinfo,
371 .get_link = ethtool_op_get_link, 676 .get_link = ethtool_op_get_link,
@@ -378,6 +683,12 @@ static struct ethtool_ops i40evf_ethtool_ops = {
378 .set_msglevel = i40evf_set_msglevel, 683 .set_msglevel = i40evf_set_msglevel,
379 .get_coalesce = i40evf_get_coalesce, 684 .get_coalesce = i40evf_get_coalesce,
380 .set_coalesce = i40evf_set_coalesce, 685 .set_coalesce = i40evf_set_coalesce,
686 .get_rxnfc = i40evf_get_rxnfc,
687 .set_rxnfc = i40evf_set_rxnfc,
688 .get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
689 .get_rxfh = i40evf_get_rxfh,
690 .set_rxfh = i40evf_set_rxfh,
691 .get_channels = i40evf_get_channels,
381}; 692};
382 693
383/** 694/**
@@ -389,5 +700,5 @@ static struct ethtool_ops i40evf_ethtool_ops = {
389 **/ 700 **/
390void i40evf_set_ethtool_ops(struct net_device *netdev) 701void i40evf_set_ethtool_ops(struct net_device *netdev)
391{ 702{
392 SET_ETHTOOL_OPS(netdev, &i40evf_ethtool_ops); 703 netdev->ethtool_ops = &i40evf_ethtool_ops;
393} 704}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 2797548fde0d..7fc5f3b5d6bf 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -25,13 +28,15 @@
25#include "i40e_prototype.h" 28#include "i40e_prototype.h"
26static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter); 29static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
27static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter); 30static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
31static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
32static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
28static int i40evf_close(struct net_device *netdev); 33static int i40evf_close(struct net_device *netdev);
29 34
30char i40evf_driver_name[] = "i40evf"; 35char i40evf_driver_name[] = "i40evf";
31static const char i40evf_driver_string[] = 36static const char i40evf_driver_string[] =
32 "Intel(R) XL710 X710 Virtual Function Network Driver"; 37 "Intel(R) XL710 X710 Virtual Function Network Driver";
33 38
34#define DRV_VERSION "0.9.16" 39#define DRV_VERSION "0.9.34"
35const char i40evf_driver_version[] = DRV_VERSION; 40const char i40evf_driver_version[] = DRV_VERSION;
36static const char i40evf_copyright[] = 41static const char i40evf_copyright[] =
37 "Copyright (c) 2013 - 2014 Intel Corporation."; 42 "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -167,7 +172,6 @@ static void i40evf_tx_timeout(struct net_device *netdev)
167 struct i40evf_adapter *adapter = netdev_priv(netdev); 172 struct i40evf_adapter *adapter = netdev_priv(netdev);
168 173
169 adapter->tx_timeout_count++; 174 adapter->tx_timeout_count++;
170 dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
171 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { 175 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
172 adapter->flags |= I40EVF_FLAG_RESET_NEEDED; 176 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
173 schedule_work(&adapter->reset_task); 177 schedule_work(&adapter->reset_task);
@@ -657,12 +661,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
657 f = i40evf_find_vlan(adapter, vlan); 661 f = i40evf_find_vlan(adapter, vlan);
658 if (NULL == f) { 662 if (NULL == f) {
659 f = kzalloc(sizeof(*f), GFP_ATOMIC); 663 f = kzalloc(sizeof(*f), GFP_ATOMIC);
660 if (NULL == f) { 664 if (NULL == f)
661 dev_info(&adapter->pdev->dev,
662 "%s: no memory for new VLAN filter\n",
663 __func__);
664 return NULL; 665 return NULL;
665 } 666
666 f->vlan = vlan; 667 f->vlan = vlan;
667 668
668 INIT_LIST_HEAD(&f->list); 669 INIT_LIST_HEAD(&f->list);
@@ -688,7 +689,6 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
688 f->remove = true; 689 f->remove = true;
689 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; 690 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
690 } 691 }
691 return;
692} 692}
693 693
694/** 694/**
@@ -767,14 +767,12 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
767 if (NULL == f) { 767 if (NULL == f) {
768 f = kzalloc(sizeof(*f), GFP_ATOMIC); 768 f = kzalloc(sizeof(*f), GFP_ATOMIC);
769 if (NULL == f) { 769 if (NULL == f) {
770 dev_info(&adapter->pdev->dev,
771 "%s: no memory for new filter\n", __func__);
772 clear_bit(__I40EVF_IN_CRITICAL_TASK, 770 clear_bit(__I40EVF_IN_CRITICAL_TASK,
773 &adapter->crit_section); 771 &adapter->crit_section);
774 return NULL; 772 return NULL;
775 } 773 }
776 774
777 memcpy(f->macaddr, macaddr, ETH_ALEN); 775 ether_addr_copy(f->macaddr, macaddr);
778 776
779 list_add(&f->list, &adapter->mac_filter_list); 777 list_add(&f->list, &adapter->mac_filter_list);
780 f->add = true; 778 f->add = true;
@@ -807,9 +805,8 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
807 805
808 f = i40evf_add_filter(adapter, addr->sa_data); 806 f = i40evf_add_filter(adapter, addr->sa_data);
809 if (f) { 807 if (f) {
810 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 808 ether_addr_copy(hw->mac.addr, addr->sa_data);
811 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 809 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
812 netdev->addr_len);
813 } 810 }
814 811
815 return (f == NULL) ? -ENOMEM : 0; 812 return (f == NULL) ? -ENOMEM : 0;
@@ -841,7 +838,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
841 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 838 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
842 bool found = false; 839 bool found = false;
843 840
844 if (f->macaddr[0] & 0x01) { 841 if (is_multicast_ether_addr(f->macaddr)) {
845 netdev_for_each_mc_addr(mca, netdev) { 842 netdev_for_each_mc_addr(mca, netdev) {
846 if (ether_addr_equal(mca->addr, f->macaddr)) { 843 if (ether_addr_equal(mca->addr, f->macaddr)) {
847 found = true; 844 found = true;
@@ -970,6 +967,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
970 struct net_device *netdev = adapter->netdev; 967 struct net_device *netdev = adapter->netdev;
971 struct i40evf_mac_filter *f; 968 struct i40evf_mac_filter *f;
972 969
970 if (adapter->state == __I40EVF_DOWN)
971 return;
972
973 /* remove all MAC filters */ 973 /* remove all MAC filters */
974 list_for_each_entry(f, &adapter->mac_filter_list, list) { 974 list_for_each_entry(f, &adapter->mac_filter_list, list) {
975 f->remove = true; 975 f->remove = true;
@@ -1027,30 +1027,21 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
1027 * Right now, we simply care about how many we'll get; we'll 1027 * Right now, we simply care about how many we'll get; we'll
1028 * set them up later while requesting irq's. 1028 * set them up later while requesting irq's.
1029 */ 1029 */
1030 while (vectors >= vector_threshold) { 1030 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1031 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1031 vector_threshold, vectors);
1032 vectors); 1032 if (err < 0) {
1033 if (!err) /* Success in acquiring all requested vectors. */ 1033 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1034 break;
1035 else if (err < 0)
1036 vectors = 0; /* Nasty failure, quit now */
1037 else /* err == number of vectors we should try again with */
1038 vectors = err;
1039 }
1040
1041 if (vectors < vector_threshold) {
1042 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n");
1043 kfree(adapter->msix_entries); 1034 kfree(adapter->msix_entries);
1044 adapter->msix_entries = NULL; 1035 adapter->msix_entries = NULL;
1045 err = -EIO; 1036 return err;
1046 } else {
1047 /* Adjust for only the vectors we'll use, which is minimum
1048 * of max_msix_q_vectors + NONQ_VECS, or the number of
1049 * vectors we were allocated.
1050 */
1051 adapter->num_msix_vectors = vectors;
1052 } 1037 }
1053 return err; 1038
1039 /* Adjust for only the vectors we'll use, which is minimum
1040 * of max_msix_q_vectors + NONQ_VECS, or the number of
1041 * vectors we were allocated.
1042 */
1043 adapter->num_msix_vectors = err;
1044 return 0;
1054} 1045}
1055 1046
1056/** 1047/**
@@ -1096,14 +1087,14 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
1096 tx_ring->queue_index = i; 1087 tx_ring->queue_index = i;
1097 tx_ring->netdev = adapter->netdev; 1088 tx_ring->netdev = adapter->netdev;
1098 tx_ring->dev = &adapter->pdev->dev; 1089 tx_ring->dev = &adapter->pdev->dev;
1099 tx_ring->count = I40EVF_DEFAULT_TXD; 1090 tx_ring->count = adapter->tx_desc_count;
1100 adapter->tx_rings[i] = tx_ring; 1091 adapter->tx_rings[i] = tx_ring;
1101 1092
1102 rx_ring = &tx_ring[1]; 1093 rx_ring = &tx_ring[1];
1103 rx_ring->queue_index = i; 1094 rx_ring->queue_index = i;
1104 rx_ring->netdev = adapter->netdev; 1095 rx_ring->netdev = adapter->netdev;
1105 rx_ring->dev = &adapter->pdev->dev; 1096 rx_ring->dev = &adapter->pdev->dev;
1106 rx_ring->count = I40EVF_DEFAULT_RXD; 1097 rx_ring->count = adapter->rx_desc_count;
1107 adapter->rx_rings[i] = rx_ring; 1098 adapter->rx_rings[i] = rx_ring;
1108 } 1099 }
1109 1100
@@ -1141,9 +1132,6 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1141 v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; 1132 v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
1142 v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors); 1133 v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
1143 1134
1144 /* A failure in MSI-X entry allocation isn't fatal, but it does
1145 * mean we disable MSI-X capabilities of the adapter.
1146 */
1147 adapter->msix_entries = kcalloc(v_budget, 1135 adapter->msix_entries = kcalloc(v_budget,
1148 sizeof(struct msix_entry), GFP_KERNEL); 1136 sizeof(struct msix_entry), GFP_KERNEL);
1149 if (!adapter->msix_entries) { 1137 if (!adapter->msix_entries) {
@@ -1183,7 +1171,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1183 q_vector->vsi = &adapter->vsi; 1171 q_vector->vsi = &adapter->vsi;
1184 q_vector->v_idx = q_idx; 1172 q_vector->v_idx = q_idx;
1185 netif_napi_add(adapter->netdev, &q_vector->napi, 1173 netif_napi_add(adapter->netdev, &q_vector->napi,
1186 i40evf_napi_poll, 64); 1174 i40evf_napi_poll, NAPI_POLL_WEIGHT);
1187 adapter->q_vector[q_idx] = q_vector; 1175 adapter->q_vector[q_idx] = q_vector;
1188 } 1176 }
1189 1177
@@ -1236,8 +1224,6 @@ void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
1236 pci_disable_msix(adapter->pdev); 1224 pci_disable_msix(adapter->pdev);
1237 kfree(adapter->msix_entries); 1225 kfree(adapter->msix_entries);
1238 adapter->msix_entries = NULL; 1226 adapter->msix_entries = NULL;
1239
1240 return;
1241} 1227}
1242 1228
1243/** 1229/**
@@ -1309,7 +1295,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
1309 goto restart_watchdog; 1295 goto restart_watchdog;
1310 1296
1311 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { 1297 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1312 dev_info(&adapter->pdev->dev, "Checking for redemption\n");
1313 if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) { 1298 if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
1314 /* A chance for redemption! */ 1299 /* A chance for redemption! */
1315 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); 1300 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
@@ -1340,8 +1325,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
1340 (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) { 1325 (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
1341 adapter->state = __I40EVF_RESETTING; 1326 adapter->state = __I40EVF_RESETTING;
1342 adapter->flags |= I40EVF_FLAG_RESET_PENDING; 1327 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1343 dev_err(&adapter->pdev->dev, "Hardware reset detected.\n"); 1328 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1344 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
1345 schedule_work(&adapter->reset_task); 1329 schedule_work(&adapter->reset_task);
1346 adapter->aq_pending = 0; 1330 adapter->aq_pending = 0;
1347 adapter->aq_required = 0; 1331 adapter->aq_required = 0;
@@ -1413,7 +1397,7 @@ restart_watchdog:
1413} 1397}
1414 1398
1415/** 1399/**
1416 * i40evf_configure_rss - increment to next available tx queue 1400 * next_queue - increment to next available tx queue
1417 * @adapter: board private structure 1401 * @adapter: board private structure
1418 * @j: queue counter 1402 * @j: queue counter
1419 * 1403 *
@@ -1504,15 +1488,12 @@ static void i40evf_reset_task(struct work_struct *work)
1504 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { 1488 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1505 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & 1489 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1506 I40E_VFGEN_RSTAT_VFR_STATE_MASK; 1490 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1507 if (rstat_val != I40E_VFR_VFACTIVE) { 1491 if (rstat_val != I40E_VFR_VFACTIVE)
1508 dev_info(&adapter->pdev->dev, "Reset now occurring\n");
1509 break; 1492 break;
1510 } else { 1493 else
1511 msleep(I40EVF_RESET_WAIT_MS); 1494 msleep(I40EVF_RESET_WAIT_MS);
1512 }
1513 } 1495 }
1514 if (i == I40EVF_RESET_WAIT_COUNT) { 1496 if (i == I40EVF_RESET_WAIT_COUNT) {
1515 dev_err(&adapter->pdev->dev, "Reset was not detected\n");
1516 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; 1497 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1517 goto continue_reset; /* act like the reset happened */ 1498 goto continue_reset; /* act like the reset happened */
1518 } 1499 }
@@ -1521,22 +1502,24 @@ static void i40evf_reset_task(struct work_struct *work)
1521 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { 1502 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1522 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & 1503 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1523 I40E_VFGEN_RSTAT_VFR_STATE_MASK; 1504 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1524 if (rstat_val == I40E_VFR_VFACTIVE) { 1505 if (rstat_val == I40E_VFR_VFACTIVE)
1525 dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
1526 break; 1506 break;
1527 } else { 1507 else
1528 msleep(I40EVF_RESET_WAIT_MS); 1508 msleep(I40EVF_RESET_WAIT_MS);
1529 }
1530 } 1509 }
1531 if (i == I40EVF_RESET_WAIT_COUNT) { 1510 if (i == I40EVF_RESET_WAIT_COUNT) {
1532 /* reset never finished */ 1511 /* reset never finished */
1533 dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n", 1512 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1534 rstat_val); 1513 rstat_val);
1535 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; 1514 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
1536 1515
1537 if (netif_running(adapter->netdev)) 1516 if (netif_running(adapter->netdev)) {
1538 i40evf_close(adapter->netdev); 1517 set_bit(__I40E_DOWN, &adapter->vsi.state);
1539 1518 i40evf_down(adapter);
1519 i40evf_free_traffic_irqs(adapter);
1520 i40evf_free_all_tx_resources(adapter);
1521 i40evf_free_all_rx_resources(adapter);
1522 }
1540 i40evf_free_misc_irq(adapter); 1523 i40evf_free_misc_irq(adapter);
1541 i40evf_reset_interrupt_capability(adapter); 1524 i40evf_reset_interrupt_capability(adapter);
1542 i40evf_free_queues(adapter); 1525 i40evf_free_queues(adapter);
@@ -1591,7 +1574,7 @@ continue_reset:
1591 } 1574 }
1592 return; 1575 return;
1593reset_err: 1576reset_err:
1594 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n"); 1577 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
1595 i40evf_close(adapter->netdev); 1578 i40evf_close(adapter->netdev);
1596} 1579}
1597 1580
@@ -1607,6 +1590,7 @@ static void i40evf_adminq_task(struct work_struct *work)
1607 struct i40e_arq_event_info event; 1590 struct i40e_arq_event_info event;
1608 struct i40e_virtchnl_msg *v_msg; 1591 struct i40e_virtchnl_msg *v_msg;
1609 i40e_status ret; 1592 i40e_status ret;
1593 u32 val, oldval;
1610 u16 pending; 1594 u16 pending;
1611 1595
1612 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) 1596 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
@@ -1614,11 +1598,9 @@ static void i40evf_adminq_task(struct work_struct *work)
1614 1598
1615 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; 1599 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
1616 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 1600 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
1617 if (!event.msg_buf) { 1601 if (!event.msg_buf)
1618 dev_info(&adapter->pdev->dev, "%s: no memory for ARQ clean\n",
1619 __func__);
1620 return; 1602 return;
1621 } 1603
1622 v_msg = (struct i40e_virtchnl_msg *)&event.desc; 1604 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
1623 do { 1605 do {
1624 ret = i40evf_clean_arq_element(hw, &event, &pending); 1606 ret = i40evf_clean_arq_element(hw, &event, &pending);
@@ -1636,6 +1618,41 @@ static void i40evf_adminq_task(struct work_struct *work)
1636 } 1618 }
1637 } while (pending); 1619 } while (pending);
1638 1620
1621 /* check for error indications */
1622 val = rd32(hw, hw->aq.arq.len);
1623 oldval = val;
1624 if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
1625 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
1626 val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
1627 }
1628 if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
1629 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
1630 val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
1631 }
1632 if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
1633 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
1634 val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
1635 }
1636 if (oldval != val)
1637 wr32(hw, hw->aq.arq.len, val);
1638
1639 val = rd32(hw, hw->aq.asq.len);
1640 oldval = val;
1641 if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
1642 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
1643 val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
1644 }
1645 if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
1646 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
1647 val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
1648 }
1649 if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
1650 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
1651 val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
1652 }
1653 if (oldval != val)
1654 wr32(hw, hw->aq.asq.len, val);
1655
1639 /* re-enable Admin queue interrupt cause */ 1656 /* re-enable Admin queue interrupt cause */
1640 i40evf_misc_irq_enable(adapter); 1657 i40evf_misc_irq_enable(adapter);
1641 1658
@@ -1673,6 +1690,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
1673 int i, err = 0; 1690 int i, err = 0;
1674 1691
1675 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 1692 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
1693 adapter->tx_rings[i]->count = adapter->tx_desc_count;
1676 err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]); 1694 err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
1677 if (!err) 1695 if (!err)
1678 continue; 1696 continue;
@@ -1700,6 +1718,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
1700 int i, err = 0; 1718 int i, err = 0;
1701 1719
1702 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 1720 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
1721 adapter->rx_rings[i]->count = adapter->rx_desc_count;
1703 err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]); 1722 err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
1704 if (!err) 1723 if (!err)
1705 continue; 1724 continue;
@@ -1804,12 +1823,11 @@ static int i40evf_close(struct net_device *netdev)
1804 if (adapter->state <= __I40EVF_DOWN) 1823 if (adapter->state <= __I40EVF_DOWN)
1805 return 0; 1824 return 0;
1806 1825
1807 /* signal that we are down to the interrupt handler */
1808 adapter->state = __I40EVF_DOWN;
1809 1826
1810 set_bit(__I40E_DOWN, &adapter->vsi.state); 1827 set_bit(__I40E_DOWN, &adapter->vsi.state);
1811 1828
1812 i40evf_down(adapter); 1829 i40evf_down(adapter);
1830 adapter->state = __I40EVF_DOWN;
1813 i40evf_free_traffic_irqs(adapter); 1831 i40evf_free_traffic_irqs(adapter);
1814 1832
1815 i40evf_free_all_tx_resources(adapter); 1833 i40evf_free_all_tx_resources(adapter);
@@ -1848,8 +1866,6 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter)
1848 1866
1849 WARN_ON(in_interrupt()); 1867 WARN_ON(in_interrupt());
1850 1868
1851 adapter->state = __I40EVF_RESETTING;
1852
1853 i40evf_down(adapter); 1869 i40evf_down(adapter);
1854 1870
1855 /* allocate transmit descriptors */ 1871 /* allocate transmit descriptors */
@@ -1872,7 +1888,7 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter)
1872 return; 1888 return;
1873 1889
1874err_reinit: 1890err_reinit:
1875 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n"); 1891 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
1876 i40evf_close(netdev); 1892 i40evf_close(netdev);
1877} 1893}
1878 1894
@@ -1967,7 +1983,7 @@ static void i40evf_init_task(struct work_struct *work)
1967 } 1983 }
1968 err = i40evf_check_reset_complete(hw); 1984 err = i40evf_check_reset_complete(hw);
1969 if (err) { 1985 if (err) {
1970 dev_err(&pdev->dev, "Device is still in reset (%d)\n", 1986 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
1971 err); 1987 err);
1972 goto err; 1988 goto err;
1973 } 1989 }
@@ -1993,14 +2009,14 @@ static void i40evf_init_task(struct work_struct *work)
1993 break; 2009 break;
1994 case __I40EVF_INIT_VERSION_CHECK: 2010 case __I40EVF_INIT_VERSION_CHECK:
1995 if (!i40evf_asq_done(hw)) { 2011 if (!i40evf_asq_done(hw)) {
1996 dev_err(&pdev->dev, "Admin queue command never completed.\n"); 2012 dev_err(&pdev->dev, "Admin queue command never completed\n");
1997 goto err; 2013 goto err;
1998 } 2014 }
1999 2015
2000 /* aq msg sent, awaiting reply */ 2016 /* aq msg sent, awaiting reply */
2001 err = i40evf_verify_api_ver(adapter); 2017 err = i40evf_verify_api_ver(adapter);
2002 if (err) { 2018 if (err) {
2003 dev_err(&pdev->dev, "Unable to verify API version (%d)\n", 2019 dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
2004 err); 2020 err);
2005 goto err; 2021 goto err;
2006 } 2022 }
@@ -2074,12 +2090,12 @@ static void i40evf_init_task(struct work_struct *work)
2074 netdev->hw_features &= ~NETIF_F_RXCSUM; 2090 netdev->hw_features &= ~NETIF_F_RXCSUM;
2075 2091
2076 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 2092 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2077 dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n", 2093 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2078 adapter->hw.mac.addr); 2094 adapter->hw.mac.addr);
2079 random_ether_addr(adapter->hw.mac.addr); 2095 random_ether_addr(adapter->hw.mac.addr);
2080 } 2096 }
2081 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 2097 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2082 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 2098 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2083 2099
2084 INIT_LIST_HEAD(&adapter->mac_filter_list); 2100 INIT_LIST_HEAD(&adapter->mac_filter_list);
2085 INIT_LIST_HEAD(&adapter->vlan_filter_list); 2101 INIT_LIST_HEAD(&adapter->vlan_filter_list);
@@ -2087,7 +2103,7 @@ static void i40evf_init_task(struct work_struct *work)
2087 if (NULL == f) 2103 if (NULL == f)
2088 goto err_sw_init; 2104 goto err_sw_init;
2089 2105
2090 memcpy(f->macaddr, adapter->hw.mac.addr, ETH_ALEN); 2106 ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
2091 f->add = true; 2107 f->add = true;
2092 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; 2108 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
2093 2109
@@ -2098,6 +2114,8 @@ static void i40evf_init_task(struct work_struct *work)
2098 adapter->watchdog_timer.data = (unsigned long)adapter; 2114 adapter->watchdog_timer.data = (unsigned long)adapter;
2099 mod_timer(&adapter->watchdog_timer, jiffies + 1); 2115 mod_timer(&adapter->watchdog_timer, jiffies + 1);
2100 2116
2117 adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
2118 adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
2101 err = i40evf_init_interrupt_scheme(adapter); 2119 err = i40evf_init_interrupt_scheme(adapter);
2102 if (err) 2120 if (err)
2103 goto err_sw_init; 2121 goto err_sw_init;
@@ -2114,8 +2132,10 @@ static void i40evf_init_task(struct work_struct *work)
2114 adapter->vsi.back = adapter; 2132 adapter->vsi.back = adapter;
2115 adapter->vsi.base_vector = 1; 2133 adapter->vsi.base_vector = 1;
2116 adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; 2134 adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
2117 adapter->vsi.rx_itr_setting = I40E_ITR_DYNAMIC; 2135 adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
2118 adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC; 2136 ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
2137 adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
2138 ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
2119 adapter->vsi.netdev = adapter->netdev; 2139 adapter->vsi.netdev = adapter->netdev;
2120 2140
2121 if (!adapter->netdev_registered) { 2141 if (!adapter->netdev_registered) {
@@ -2128,7 +2148,7 @@ static void i40evf_init_task(struct work_struct *work)
2128 2148
2129 netif_tx_stop_all_queues(netdev); 2149 netif_tx_stop_all_queues(netdev);
2130 2150
2131 dev_info(&pdev->dev, "MAC address: %pMAC\n", adapter->hw.mac.addr); 2151 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2132 if (netdev->features & NETIF_F_GRO) 2152 if (netdev->features & NETIF_F_GRO)
2133 dev_info(&pdev->dev, "GRO is enabled\n"); 2153 dev_info(&pdev->dev, "GRO is enabled\n");
2134 2154
@@ -2152,12 +2172,11 @@ err_alloc:
2152err: 2172err:
2153 /* Things went into the weeds, so try again later */ 2173 /* Things went into the weeds, so try again later */
2154 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { 2174 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
2155 dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n"); 2175 dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n");
2156 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; 2176 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
2157 return; /* do not reschedule */ 2177 return; /* do not reschedule */
2158 } 2178 }
2159 schedule_delayed_work(&adapter->init_task, HZ * 3); 2179 schedule_delayed_work(&adapter->init_task, HZ * 3);
2160 return;
2161} 2180}
2162 2181
2163/** 2182/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index e294f012647d..2dc0bac76717 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -216,11 +219,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
216 len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + 219 len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
217 (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); 220 (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
218 vqci = kzalloc(len, GFP_ATOMIC); 221 vqci = kzalloc(len, GFP_ATOMIC);
219 if (!vqci) { 222 if (!vqci)
220 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
221 __func__);
222 return; 223 return;
223 } 224
224 vqci->vsi_id = adapter->vsi_res->vsi_id; 225 vqci->vsi_id = adapter->vsi_res->vsi_id;
225 vqci->num_queue_pairs = pairs; 226 vqci->num_queue_pairs = pairs;
226 vqpi = vqci->qpair; 227 vqpi = vqci->qpair;
@@ -232,6 +233,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
232 vqpi->txq.queue_id = i; 233 vqpi->txq.queue_id = i;
233 vqpi->txq.ring_len = adapter->tx_rings[i]->count; 234 vqpi->txq.ring_len = adapter->tx_rings[i]->count;
234 vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma; 235 vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
236 vqpi->txq.headwb_enabled = 1;
237 vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
238 (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
235 239
236 vqpi->rxq.vsi_id = vqci->vsi_id; 240 vqpi->rxq.vsi_id = vqci->vsi_id;
237 vqpi->rxq.queue_id = i; 241 vqpi->rxq.queue_id = i;
@@ -329,11 +333,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
329 (adapter->num_msix_vectors * 333 (adapter->num_msix_vectors *
330 sizeof(struct i40e_virtchnl_vector_map)); 334 sizeof(struct i40e_virtchnl_vector_map));
331 vimi = kzalloc(len, GFP_ATOMIC); 335 vimi = kzalloc(len, GFP_ATOMIC);
332 if (!vimi) { 336 if (!vimi)
333 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
334 __func__);
335 return; 337 return;
336 }
337 338
338 vimi->num_vectors = adapter->num_msix_vectors; 339 vimi->num_vectors = adapter->num_msix_vectors;
339 /* Queue vectors first */ 340 /* Queue vectors first */
@@ -390,7 +391,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
390 len = sizeof(struct i40e_virtchnl_ether_addr_list) + 391 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
391 (count * sizeof(struct i40e_virtchnl_ether_addr)); 392 (count * sizeof(struct i40e_virtchnl_ether_addr));
392 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 393 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
393 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n", 394 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
394 __func__); 395 __func__);
395 count = (I40EVF_MAX_AQ_BUF_SIZE - 396 count = (I40EVF_MAX_AQ_BUF_SIZE -
396 sizeof(struct i40e_virtchnl_ether_addr_list)) / 397 sizeof(struct i40e_virtchnl_ether_addr_list)) /
@@ -399,16 +400,14 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
399 } 400 }
400 401
401 veal = kzalloc(len, GFP_ATOMIC); 402 veal = kzalloc(len, GFP_ATOMIC);
402 if (!veal) { 403 if (!veal)
403 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
404 __func__);
405 return; 404 return;
406 } 405
407 veal->vsi_id = adapter->vsi_res->vsi_id; 406 veal->vsi_id = adapter->vsi_res->vsi_id;
408 veal->num_elements = count; 407 veal->num_elements = count;
409 list_for_each_entry(f, &adapter->mac_filter_list, list) { 408 list_for_each_entry(f, &adapter->mac_filter_list, list) {
410 if (f->add) { 409 if (f->add) {
411 memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN); 410 ether_addr_copy(veal->list[i].addr, f->macaddr);
412 i++; 411 i++;
413 f->add = false; 412 f->add = false;
414 } 413 }
@@ -454,7 +453,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
454 len = sizeof(struct i40e_virtchnl_ether_addr_list) + 453 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
455 (count * sizeof(struct i40e_virtchnl_ether_addr)); 454 (count * sizeof(struct i40e_virtchnl_ether_addr));
456 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 455 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
457 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n", 456 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
458 __func__); 457 __func__);
459 count = (I40EVF_MAX_AQ_BUF_SIZE - 458 count = (I40EVF_MAX_AQ_BUF_SIZE -
460 sizeof(struct i40e_virtchnl_ether_addr_list)) / 459 sizeof(struct i40e_virtchnl_ether_addr_list)) /
@@ -462,16 +461,14 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
462 len = I40EVF_MAX_AQ_BUF_SIZE; 461 len = I40EVF_MAX_AQ_BUF_SIZE;
463 } 462 }
464 veal = kzalloc(len, GFP_ATOMIC); 463 veal = kzalloc(len, GFP_ATOMIC);
465 if (!veal) { 464 if (!veal)
466 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
467 __func__);
468 return; 465 return;
469 } 466
470 veal->vsi_id = adapter->vsi_res->vsi_id; 467 veal->vsi_id = adapter->vsi_res->vsi_id;
471 veal->num_elements = count; 468 veal->num_elements = count;
472 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 469 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
473 if (f->remove) { 470 if (f->remove) {
474 memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN); 471 ether_addr_copy(veal->list[i].addr, f->macaddr);
475 i++; 472 i++;
476 list_del(&f->list); 473 list_del(&f->list);
477 kfree(f); 474 kfree(f);
@@ -518,7 +515,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
518 len = sizeof(struct i40e_virtchnl_vlan_filter_list) + 515 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
519 (count * sizeof(u16)); 516 (count * sizeof(u16));
520 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 517 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
521 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n", 518 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
522 __func__); 519 __func__);
523 count = (I40EVF_MAX_AQ_BUF_SIZE - 520 count = (I40EVF_MAX_AQ_BUF_SIZE -
524 sizeof(struct i40e_virtchnl_vlan_filter_list)) / 521 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
@@ -526,11 +523,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
526 len = I40EVF_MAX_AQ_BUF_SIZE; 523 len = I40EVF_MAX_AQ_BUF_SIZE;
527 } 524 }
528 vvfl = kzalloc(len, GFP_ATOMIC); 525 vvfl = kzalloc(len, GFP_ATOMIC);
529 if (!vvfl) { 526 if (!vvfl)
530 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
531 __func__);
532 return; 527 return;
533 } 528
534 vvfl->vsi_id = adapter->vsi_res->vsi_id; 529 vvfl->vsi_id = adapter->vsi_res->vsi_id;
535 vvfl->num_elements = count; 530 vvfl->num_elements = count;
536 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 531 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
@@ -580,7 +575,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
580 len = sizeof(struct i40e_virtchnl_vlan_filter_list) + 575 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
581 (count * sizeof(u16)); 576 (count * sizeof(u16));
582 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 577 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
583 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n", 578 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
584 __func__); 579 __func__);
585 count = (I40EVF_MAX_AQ_BUF_SIZE - 580 count = (I40EVF_MAX_AQ_BUF_SIZE -
586 sizeof(struct i40e_virtchnl_vlan_filter_list)) / 581 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
@@ -588,11 +583,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
588 len = I40EVF_MAX_AQ_BUF_SIZE; 583 len = I40EVF_MAX_AQ_BUF_SIZE;
589 } 584 }
590 vvfl = kzalloc(len, GFP_ATOMIC); 585 vvfl = kzalloc(len, GFP_ATOMIC);
591 if (!vvfl) { 586 if (!vvfl)
592 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
593 __func__);
594 return; 587 return;
595 } 588
596 vvfl->vsi_id = adapter->vsi_res->vsi_id; 589 vvfl->vsi_id = adapter->vsi_res->vsi_id;
597 vvfl->num_elements = count; 590 vvfl->num_elements = count;
598 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 591 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
@@ -721,7 +714,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
721 return; 714 return;
722 } 715 }
723 if (v_opcode != adapter->current_op) { 716 if (v_opcode != adapter->current_op) {
724 dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n", 717 dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n",
725 __func__, adapter->current_op, v_opcode); 718 __func__, adapter->current_op, v_opcode);
726 /* We're probably completely screwed at this point, but clear 719 /* We're probably completely screwed at this point, but clear
727 * the current op and try to carry on.... 720 * the current op and try to carry on....
@@ -730,7 +723,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
730 return; 723 return;
731 } 724 }
732 if (v_retval) { 725 if (v_retval) {
733 dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n", 726 dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
734 __func__, v_retval, v_opcode); 727 __func__, v_retval, v_opcode);
735 } 728 }
736 switch (v_opcode) { 729 switch (v_opcode) {
@@ -745,9 +738,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
745 stats->tx_broadcast; 738 stats->tx_broadcast;
746 adapter->net_stats.rx_bytes = stats->rx_bytes; 739 adapter->net_stats.rx_bytes = stats->rx_bytes;
747 adapter->net_stats.tx_bytes = stats->tx_bytes; 740 adapter->net_stats.tx_bytes = stats->tx_bytes;
748 adapter->net_stats.rx_errors = stats->rx_errors;
749 adapter->net_stats.tx_errors = stats->tx_errors; 741 adapter->net_stats.tx_errors = stats->tx_errors;
750 adapter->net_stats.rx_dropped = stats->rx_missed; 742 adapter->net_stats.rx_dropped = stats->rx_discards;
751 adapter->net_stats.tx_dropped = stats->tx_discards; 743 adapter->net_stats.tx_dropped = stats->tx_discards;
752 adapter->current_stats = *stats; 744 adapter->current_stats = *stats;
753 } 745 }
@@ -781,7 +773,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
781 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS); 773 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
782 break; 774 break;
783 default: 775 default:
784 dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n", 776 dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n",
785 __func__, v_opcode); 777 __func__, v_opcode);
786 break; 778 break;
787 } /* switch v_opcode */ 779 } /* switch v_opcode */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index fa36fe12e775..a2db388cc31e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27/* e1000_82575 24/* e1000_82575
28 * e1000_82576 25 * e1000_82576
@@ -73,9 +70,8 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
73static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); 70static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
74static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); 71static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
75static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); 72static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
76static const u16 e1000_82580_rxpbs_table[] = 73static const u16 e1000_82580_rxpbs_table[] = {
77 { 36, 72, 144, 1, 2, 4, 8, 16, 74 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
78 35, 70, 140 };
79 75
80/** 76/**
81 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 77 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -159,7 +155,7 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
159 ret_val = igb_check_for_link_82575(hw); 155 ret_val = igb_check_for_link_82575(hw);
160 } 156 }
161 157
162 return E1000_SUCCESS; 158 return 0;
163} 159}
164 160
165/** 161/**
@@ -526,7 +522,7 @@ out:
526static s32 igb_get_invariants_82575(struct e1000_hw *hw) 522static s32 igb_get_invariants_82575(struct e1000_hw *hw)
527{ 523{
528 struct e1000_mac_info *mac = &hw->mac; 524 struct e1000_mac_info *mac = &hw->mac;
529 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; 525 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
530 s32 ret_val; 526 s32 ret_val;
531 u32 ctrl_ext = 0; 527 u32 ctrl_ext = 0;
532 u32 link_mode = 0; 528 u32 link_mode = 0;
@@ -1008,7 +1004,6 @@ out:
1008static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) 1004static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
1009{ 1005{
1010 struct e1000_phy_info *phy = &hw->phy; 1006 struct e1000_phy_info *phy = &hw->phy;
1011 s32 ret_val = 0;
1012 u16 data; 1007 u16 data;
1013 1008
1014 data = rd32(E1000_82580_PHY_POWER_MGMT); 1009 data = rd32(E1000_82580_PHY_POWER_MGMT);
@@ -1032,7 +1027,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
1032 data &= ~E1000_82580_PM_SPD; } 1027 data &= ~E1000_82580_PM_SPD; }
1033 1028
1034 wr32(E1000_82580_PHY_POWER_MGMT, data); 1029 wr32(E1000_82580_PHY_POWER_MGMT, data);
1035 return ret_val; 1030 return 0;
1036} 1031}
1037 1032
1038/** 1033/**
@@ -1052,7 +1047,6 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
1052static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) 1047static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
1053{ 1048{
1054 struct e1000_phy_info *phy = &hw->phy; 1049 struct e1000_phy_info *phy = &hw->phy;
1055 s32 ret_val = 0;
1056 u16 data; 1050 u16 data;
1057 1051
1058 data = rd32(E1000_82580_PHY_POWER_MGMT); 1052 data = rd32(E1000_82580_PHY_POWER_MGMT);
@@ -1077,7 +1071,7 @@ static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
1077 } 1071 }
1078 1072
1079 wr32(E1000_82580_PHY_POWER_MGMT, data); 1073 wr32(E1000_82580_PHY_POWER_MGMT, data);
1080 return ret_val; 1074 return 0;
1081} 1075}
1082 1076
1083/** 1077/**
@@ -1180,8 +1174,8 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1180{ 1174{
1181 u32 swfw_sync; 1175 u32 swfw_sync;
1182 1176
1183 while (igb_get_hw_semaphore(hw) != 0); 1177 while (igb_get_hw_semaphore(hw) != 0)
1184 /* Empty */ 1178 ; /* Empty */
1185 1179
1186 swfw_sync = rd32(E1000_SW_FW_SYNC); 1180 swfw_sync = rd32(E1000_SW_FW_SYNC);
1187 swfw_sync &= ~mask; 1181 swfw_sync &= ~mask;
@@ -1203,7 +1197,6 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1203static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) 1197static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1204{ 1198{
1205 s32 timeout = PHY_CFG_TIMEOUT; 1199 s32 timeout = PHY_CFG_TIMEOUT;
1206 s32 ret_val = 0;
1207 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 1200 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
1208 1201
1209 if (hw->bus.func == 1) 1202 if (hw->bus.func == 1)
@@ -1216,7 +1209,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1216 while (timeout) { 1209 while (timeout) {
1217 if (rd32(E1000_EEMNGCTL) & mask) 1210 if (rd32(E1000_EEMNGCTL) & mask)
1218 break; 1211 break;
1219 msleep(1); 1212 usleep_range(1000, 2000);
1220 timeout--; 1213 timeout--;
1221 } 1214 }
1222 if (!timeout) 1215 if (!timeout)
@@ -1227,7 +1220,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1227 (hw->phy.type == e1000_phy_igp_3)) 1220 (hw->phy.type == e1000_phy_igp_3))
1228 igb_phy_init_script_igp3(hw); 1221 igb_phy_init_script_igp3(hw);
1229 1222
1230 return ret_val; 1223 return 0;
1231} 1224}
1232 1225
1233/** 1226/**
@@ -1269,7 +1262,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1269 1262
1270 if (hw->phy.media_type != e1000_media_type_copper) { 1263 if (hw->phy.media_type != e1000_media_type_copper) {
1271 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 1264 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
1272 &duplex); 1265 &duplex);
1273 /* Use this flag to determine if link needs to be checked or 1266 /* Use this flag to determine if link needs to be checked or
1274 * not. If we have link clear the flag so that we do not 1267 * not. If we have link clear the flag so that we do not
1275 * continue to check for link. 1268 * continue to check for link.
@@ -1316,7 +1309,7 @@ void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
1316 1309
1317 /* flush the write to verify completion */ 1310 /* flush the write to verify completion */
1318 wrfl(); 1311 wrfl();
1319 msleep(1); 1312 usleep_range(1000, 2000);
1320} 1313}
1321 1314
1322/** 1315/**
@@ -1411,7 +1404,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
1411 1404
1412 /* flush the write to verify completion */ 1405 /* flush the write to verify completion */
1413 wrfl(); 1406 wrfl();
1414 msleep(1); 1407 usleep_range(1000, 2000);
1415 } 1408 }
1416} 1409}
1417 1410
@@ -1436,9 +1429,8 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1436 1429
1437 /* set the completion timeout for interface */ 1430 /* set the completion timeout for interface */
1438 ret_val = igb_set_pcie_completion_timeout(hw); 1431 ret_val = igb_set_pcie_completion_timeout(hw);
1439 if (ret_val) { 1432 if (ret_val)
1440 hw_dbg("PCI-E Set completion timeout has failed.\n"); 1433 hw_dbg("PCI-E Set completion timeout has failed.\n");
1441 }
1442 1434
1443 hw_dbg("Masking off all interrupts\n"); 1435 hw_dbg("Masking off all interrupts\n");
1444 wr32(E1000_IMC, 0xffffffff); 1436 wr32(E1000_IMC, 0xffffffff);
@@ -1447,7 +1439,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1447 wr32(E1000_TCTL, E1000_TCTL_PSP); 1439 wr32(E1000_TCTL, E1000_TCTL_PSP);
1448 wrfl(); 1440 wrfl();
1449 1441
1450 msleep(10); 1442 usleep_range(10000, 20000);
1451 1443
1452 ctrl = rd32(E1000_CTRL); 1444 ctrl = rd32(E1000_CTRL);
1453 1445
@@ -1622,7 +1614,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1622{ 1614{
1623 u32 ctrl_ext, ctrl_reg, reg, anadv_reg; 1615 u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
1624 bool pcs_autoneg; 1616 bool pcs_autoneg;
1625 s32 ret_val = E1000_SUCCESS; 1617 s32 ret_val = 0;
1626 u16 data; 1618 u16 data;
1627 1619
1628 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1620 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
@@ -1676,7 +1668,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1676 hw->mac.type == e1000_82576) { 1668 hw->mac.type == e1000_82576) {
1677 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); 1669 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
1678 if (ret_val) { 1670 if (ret_val) {
1679 printk(KERN_DEBUG "NVM Read Error\n\n"); 1671 hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
1680 return ret_val; 1672 return ret_val;
1681 } 1673 }
1682 1674
@@ -1689,7 +1681,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1689 * link either autoneg or be forced to 1000/Full 1681 * link either autoneg or be forced to 1000/Full
1690 */ 1682 */
1691 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1683 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1692 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1684 E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1693 1685
1694 /* set speed of 1000/Full if speed/duplex is forced */ 1686 /* set speed of 1000/Full if speed/duplex is forced */
1695 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; 1687 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
@@ -1925,7 +1917,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1925 } 1917 }
1926 /* Poll all queues to verify they have shut down */ 1918 /* Poll all queues to verify they have shut down */
1927 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 1919 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1928 msleep(1); 1920 usleep_range(1000, 2000);
1929 rx_enabled = 0; 1921 rx_enabled = 0;
1930 for (i = 0; i < 4; i++) 1922 for (i = 0; i < 4; i++)
1931 rx_enabled |= rd32(E1000_RXDCTL(i)); 1923 rx_enabled |= rd32(E1000_RXDCTL(i));
@@ -1953,7 +1945,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1953 wr32(E1000_RCTL, temp_rctl); 1945 wr32(E1000_RCTL, temp_rctl);
1954 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); 1946 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1955 wrfl(); 1947 wrfl();
1956 msleep(2); 1948 usleep_range(2000, 3000);
1957 1949
1958 /* Enable RX queues that were previously enabled and restore our 1950 /* Enable RX queues that were previously enabled and restore our
1959 * previous state 1951 * previous state
@@ -2005,14 +1997,14 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
2005 * 16ms to 55ms 1997 * 16ms to 55ms
2006 */ 1998 */
2007 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 1999 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2008 &pcie_devctl2); 2000 &pcie_devctl2);
2009 if (ret_val) 2001 if (ret_val)
2010 goto out; 2002 goto out;
2011 2003
2012 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 2004 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
2013 2005
2014 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 2006 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2015 &pcie_devctl2); 2007 &pcie_devctl2);
2016out: 2008out:
2017 /* disable completion timeout resend */ 2009 /* disable completion timeout resend */
2018 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; 2010 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
@@ -2241,7 +2233,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2241 wr32(E1000_TCTL, E1000_TCTL_PSP); 2233 wr32(E1000_TCTL, E1000_TCTL_PSP);
2242 wrfl(); 2234 wrfl();
2243 2235
2244 msleep(10); 2236 usleep_range(10000, 11000);
2245 2237
2246 /* Determine whether or not a global dev reset is requested */ 2238 /* Determine whether or not a global dev reset is requested */
2247 if (global_device_reset && 2239 if (global_device_reset &&
@@ -2259,7 +2251,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2259 2251
2260 /* Add delay to insure DEV_RST has time to complete */ 2252 /* Add delay to insure DEV_RST has time to complete */
2261 if (global_device_reset) 2253 if (global_device_reset)
2262 msleep(5); 2254 usleep_range(5000, 6000);
2263 2255
2264 ret_val = igb_get_auto_rd_done(hw); 2256 ret_val = igb_get_auto_rd_done(hw);
2265 if (ret_val) { 2257 if (ret_val) {
@@ -2436,8 +2428,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2436 2428
2437 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2429 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2438 if (ret_val) { 2430 if (ret_val) {
2439 hw_dbg("NVM Read Error while updating checksum" 2431 hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
2440 " compatibility bit.\n");
2441 goto out; 2432 goto out;
2442 } 2433 }
2443 2434
@@ -2447,8 +2438,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2447 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, 2438 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2448 &nvm_data); 2439 &nvm_data);
2449 if (ret_val) { 2440 if (ret_val) {
2450 hw_dbg("NVM Write Error while updating checksum" 2441 hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
2451 " compatibility bit.\n");
2452 goto out; 2442 goto out;
2453 } 2443 }
2454 } 2444 }
@@ -2525,7 +2515,7 @@ out:
2525static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, 2515static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
2526 u16 *data, bool read) 2516 u16 *data, bool read)
2527{ 2517{
2528 s32 ret_val = E1000_SUCCESS; 2518 s32 ret_val = 0;
2529 2519
2530 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); 2520 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
2531 if (ret_val) 2521 if (ret_val)
@@ -2559,7 +2549,6 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
2559 **/ 2549 **/
2560s32 igb_set_eee_i350(struct e1000_hw *hw) 2550s32 igb_set_eee_i350(struct e1000_hw *hw)
2561{ 2551{
2562 s32 ret_val = 0;
2563 u32 ipcnfg, eeer; 2552 u32 ipcnfg, eeer;
2564 2553
2565 if ((hw->mac.type < e1000_i350) || 2554 if ((hw->mac.type < e1000_i350) ||
@@ -2593,7 +2582,7 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
2593 rd32(E1000_EEER); 2582 rd32(E1000_EEER);
2594out: 2583out:
2595 2584
2596 return ret_val; 2585 return 0;
2597} 2586}
2598 2587
2599/** 2588/**
@@ -2720,7 +2709,6 @@ static const u8 e1000_emc_therm_limit[4] = {
2720 **/ 2709 **/
2721static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) 2710static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2722{ 2711{
2723 s32 status = E1000_SUCCESS;
2724 u16 ets_offset; 2712 u16 ets_offset;
2725 u16 ets_cfg; 2713 u16 ets_cfg;
2726 u16 ets_sensor; 2714 u16 ets_sensor;
@@ -2738,7 +2726,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2738 /* Return the internal sensor only if ETS is unsupported */ 2726 /* Return the internal sensor only if ETS is unsupported */
2739 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2727 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2740 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2728 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2741 return status; 2729 return 0;
2742 2730
2743 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2731 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2744 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2732 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
@@ -2762,7 +2750,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2762 E1000_I2C_THERMAL_SENSOR_ADDR, 2750 E1000_I2C_THERMAL_SENSOR_ADDR,
2763 &data->sensor[i].temp); 2751 &data->sensor[i].temp);
2764 } 2752 }
2765 return status; 2753 return 0;
2766} 2754}
2767 2755
2768/** 2756/**
@@ -2774,7 +2762,6 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2774 **/ 2762 **/
2775static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) 2763static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2776{ 2764{
2777 s32 status = E1000_SUCCESS;
2778 u16 ets_offset; 2765 u16 ets_offset;
2779 u16 ets_cfg; 2766 u16 ets_cfg;
2780 u16 ets_sensor; 2767 u16 ets_sensor;
@@ -2800,7 +2787,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2800 /* Return the internal sensor only if ETS is unsupported */ 2787 /* Return the internal sensor only if ETS is unsupported */
2801 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2788 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2802 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2789 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2803 return status; 2790 return 0;
2804 2791
2805 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2792 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2806 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2793 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
@@ -2831,7 +2818,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2831 low_thresh_delta; 2818 low_thresh_delta;
2832 } 2819 }
2833 } 2820 }
2834 return status; 2821 return 0;
2835} 2822}
2836 2823
2837#endif 2824#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 09d78be72416..b407c55738fa 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_82575_H_ 24#ifndef _E1000_82575_H_
28#define _E1000_82575_H_ 25#define _E1000_82575_H_
@@ -37,9 +34,9 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
37 u8 data); 34 u8 data);
38 35
39#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ 36#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
40 (ID_LED_DEF1_DEF2 << 8) | \ 37 (ID_LED_DEF1_DEF2 << 8) | \
41 (ID_LED_DEF1_DEF2 << 4) | \ 38 (ID_LED_DEF1_DEF2 << 4) | \
42 (ID_LED_OFF1_ON2)) 39 (ID_LED_OFF1_ON2))
43 40
44#define E1000_RAR_ENTRIES_82575 16 41#define E1000_RAR_ENTRIES_82575 16
45#define E1000_RAR_ENTRIES_82576 24 42#define E1000_RAR_ENTRIES_82576 24
@@ -67,16 +64,16 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
67#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 64#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
68 65
69#define E1000_EICR_TX_QUEUE ( \ 66#define E1000_EICR_TX_QUEUE ( \
70 E1000_EICR_TX_QUEUE0 | \ 67 E1000_EICR_TX_QUEUE0 | \
71 E1000_EICR_TX_QUEUE1 | \ 68 E1000_EICR_TX_QUEUE1 | \
72 E1000_EICR_TX_QUEUE2 | \ 69 E1000_EICR_TX_QUEUE2 | \
73 E1000_EICR_TX_QUEUE3) 70 E1000_EICR_TX_QUEUE3)
74 71
75#define E1000_EICR_RX_QUEUE ( \ 72#define E1000_EICR_RX_QUEUE ( \
76 E1000_EICR_RX_QUEUE0 | \ 73 E1000_EICR_RX_QUEUE0 | \
77 E1000_EICR_RX_QUEUE1 | \ 74 E1000_EICR_RX_QUEUE1 | \
78 E1000_EICR_RX_QUEUE2 | \ 75 E1000_EICR_RX_QUEUE2 | \
79 E1000_EICR_RX_QUEUE3) 76 E1000_EICR_RX_QUEUE3)
80 77
81/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ 78/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
82#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ 79#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
@@ -92,8 +89,7 @@ union e1000_adv_rx_desc {
92 struct { 89 struct {
93 struct { 90 struct {
94 __le16 pkt_info; /* RSS type, Packet type */ 91 __le16 pkt_info; /* RSS type, Packet type */
95 __le16 hdr_info; /* Split Header, 92 __le16 hdr_info; /* Split Head, buf len */
96 * header buffer length */
97 } lo_dword; 93 } lo_dword;
98 union { 94 union {
99 __le32 rss; /* RSS Hash */ 95 __le32 rss; /* RSS Hash */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index b05bf925ac72..2a8bb35c2df2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_DEFINES_H_ 24#ifndef _E1000_DEFINES_H_
28#define _E1000_DEFINES_H_ 25#define _E1000_DEFINES_H_
@@ -101,11 +98,11 @@
101 98
102/* Same mask, but for extended and packet split descriptors */ 99/* Same mask, but for extended and packet split descriptors */
103#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ 100#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
104 E1000_RXDEXT_STATERR_CE | \ 101 E1000_RXDEXT_STATERR_CE | \
105 E1000_RXDEXT_STATERR_SE | \ 102 E1000_RXDEXT_STATERR_SE | \
106 E1000_RXDEXT_STATERR_SEQ | \ 103 E1000_RXDEXT_STATERR_SEQ | \
107 E1000_RXDEXT_STATERR_CXE | \ 104 E1000_RXDEXT_STATERR_CXE | \
108 E1000_RXDEXT_STATERR_RXE) 105 E1000_RXDEXT_STATERR_RXE)
109 106
110#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 107#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
111#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 108#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
@@ -307,39 +304,34 @@
307#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ 304#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
308 305
309/* DMA Coalescing register fields */ 306/* DMA Coalescing register fields */
310#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing 307#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */
311 * Watchdog Timer */ 308#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */
312#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
313 * Threshold */
314#define E1000_DMACR_DMACTHR_SHIFT 16 309#define E1000_DMACR_DMACTHR_SHIFT 16
315#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe 310#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */
316 * transactions */
317#define E1000_DMACR_DMAC_LX_SHIFT 28 311#define E1000_DMACR_DMAC_LX_SHIFT 28
318#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ 312#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
319/* DMA Coalescing BMC-to-OS Watchdog Enable */ 313/* DMA Coalescing BMC-to-OS Watchdog Enable */
320#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 314#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
321 315
322#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit 316#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */
323 * Threshold */
324 317
325#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ 318#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
326 319
327#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate 320#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */
328 * Threshold */ 321#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */
329#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
330 * current window */
331 322
332#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic 323#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */
333 * Current Cnt */
334 324
335#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold 325#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */
336 * High val */
337#define E1000_FCRTC_RTH_COAL_SHIFT 4 326#define E1000_FCRTC_RTH_COAL_SHIFT 4
338#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ 327#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
339 328
340/* Timestamp in Rx buffer */ 329/* Timestamp in Rx buffer */
341#define E1000_RXPBS_CFG_TS_EN 0x80000000 330#define E1000_RXPBS_CFG_TS_EN 0x80000000
342 331
332#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
333#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
334
343/* SerDes Control */ 335/* SerDes Control */
344#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 336#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
345 337
@@ -406,12 +398,12 @@
406 * o LSC = Link Status Change 398 * o LSC = Link Status Change
407 */ 399 */
408#define IMS_ENABLE_MASK ( \ 400#define IMS_ENABLE_MASK ( \
409 E1000_IMS_RXT0 | \ 401 E1000_IMS_RXT0 | \
410 E1000_IMS_TXDW | \ 402 E1000_IMS_TXDW | \
411 E1000_IMS_RXDMT0 | \ 403 E1000_IMS_RXDMT0 | \
412 E1000_IMS_RXSEQ | \ 404 E1000_IMS_RXSEQ | \
413 E1000_IMS_LSC | \ 405 E1000_IMS_LSC | \
414 E1000_IMS_DOUTSYNC) 406 E1000_IMS_DOUTSYNC)
415 407
416/* Interrupt Mask Set */ 408/* Interrupt Mask Set */
417#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 409#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -467,7 +459,6 @@
467#define E1000_RAH_POOL_1 0x00040000 459#define E1000_RAH_POOL_1 0x00040000
468 460
469/* Error Codes */ 461/* Error Codes */
470#define E1000_SUCCESS 0
471#define E1000_ERR_NVM 1 462#define E1000_ERR_NVM 1
472#define E1000_ERR_PHY 2 463#define E1000_ERR_PHY 2
473#define E1000_ERR_CONFIG 3 464#define E1000_ERR_CONFIG 3
@@ -1011,8 +1002,7 @@
1011#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F 1002#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
1012 1003
1013/* DMA Coalescing register fields */ 1004/* DMA Coalescing register fields */
1014#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based 1005#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
1015 on DMA coal */
1016 1006
1017/* Tx Rate-Scheduler Config fields */ 1007/* Tx Rate-Scheduler Config fields */
1018#define E1000_RTTBCNRC_RS_ENA 0x80000000 1008#define E1000_RTTBCNRC_RS_ENA 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 10741d170f2d..89925e405849 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,28 +1,24 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 *
7 under the terms and conditions of the GNU General Public License, 7 * This program is distributed in the hope it will be useful, but WITHOUT
8 version 2, as published by the Free Software Foundation. 8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 9 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * more details.
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 *
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * You should have received a copy of the GNU General Public License along with
13 more details. 13 * this program; if not, see <http://www.gnu.org/licenses/>.
14 14 *
15 You should have received a copy of the GNU General Public License along with 15 * The full GNU General Public License is included in this distribution in
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * the file called "COPYING".
17 17 *
18 The full GNU General Public License is included in this distribution in 18 * Contact Information:
19 the file called "COPYING". 19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 Contact Information: 21 */
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 22
27#ifndef _E1000_HW_H_ 23#ifndef _E1000_HW_H_
28#define _E1000_HW_H_ 24#define _E1000_HW_H_
@@ -320,15 +316,15 @@ struct e1000_host_mng_command_info {
320#include "e1000_mbx.h" 316#include "e1000_mbx.h"
321 317
322struct e1000_mac_operations { 318struct e1000_mac_operations {
323 s32 (*check_for_link)(struct e1000_hw *); 319 s32 (*check_for_link)(struct e1000_hw *);
324 s32 (*reset_hw)(struct e1000_hw *); 320 s32 (*reset_hw)(struct e1000_hw *);
325 s32 (*init_hw)(struct e1000_hw *); 321 s32 (*init_hw)(struct e1000_hw *);
326 bool (*check_mng_mode)(struct e1000_hw *); 322 bool (*check_mng_mode)(struct e1000_hw *);
327 s32 (*setup_physical_interface)(struct e1000_hw *); 323 s32 (*setup_physical_interface)(struct e1000_hw *);
328 void (*rar_set)(struct e1000_hw *, u8 *, u32); 324 void (*rar_set)(struct e1000_hw *, u8 *, u32);
329 s32 (*read_mac_addr)(struct e1000_hw *); 325 s32 (*read_mac_addr)(struct e1000_hw *);
330 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); 326 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
331 s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); 327 s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
332 void (*release_swfw_sync)(struct e1000_hw *, u16); 328 void (*release_swfw_sync)(struct e1000_hw *, u16);
333#ifdef CONFIG_IGB_HWMON 329#ifdef CONFIG_IGB_HWMON
334 s32 (*get_thermal_sensor_data)(struct e1000_hw *); 330 s32 (*get_thermal_sensor_data)(struct e1000_hw *);
@@ -338,31 +334,31 @@ struct e1000_mac_operations {
338}; 334};
339 335
340struct e1000_phy_operations { 336struct e1000_phy_operations {
341 s32 (*acquire)(struct e1000_hw *); 337 s32 (*acquire)(struct e1000_hw *);
342 s32 (*check_polarity)(struct e1000_hw *); 338 s32 (*check_polarity)(struct e1000_hw *);
343 s32 (*check_reset_block)(struct e1000_hw *); 339 s32 (*check_reset_block)(struct e1000_hw *);
344 s32 (*force_speed_duplex)(struct e1000_hw *); 340 s32 (*force_speed_duplex)(struct e1000_hw *);
345 s32 (*get_cfg_done)(struct e1000_hw *hw); 341 s32 (*get_cfg_done)(struct e1000_hw *hw);
346 s32 (*get_cable_length)(struct e1000_hw *); 342 s32 (*get_cable_length)(struct e1000_hw *);
347 s32 (*get_phy_info)(struct e1000_hw *); 343 s32 (*get_phy_info)(struct e1000_hw *);
348 s32 (*read_reg)(struct e1000_hw *, u32, u16 *); 344 s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
349 void (*release)(struct e1000_hw *); 345 void (*release)(struct e1000_hw *);
350 s32 (*reset)(struct e1000_hw *); 346 s32 (*reset)(struct e1000_hw *);
351 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); 347 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
352 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); 348 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
353 s32 (*write_reg)(struct e1000_hw *, u32, u16); 349 s32 (*write_reg)(struct e1000_hw *, u32, u16);
354 s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); 350 s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
355 s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); 351 s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
356}; 352};
357 353
358struct e1000_nvm_operations { 354struct e1000_nvm_operations {
359 s32 (*acquire)(struct e1000_hw *); 355 s32 (*acquire)(struct e1000_hw *);
360 s32 (*read)(struct e1000_hw *, u16, u16, u16 *); 356 s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
361 void (*release)(struct e1000_hw *); 357 void (*release)(struct e1000_hw *);
362 s32 (*write)(struct e1000_hw *, u16, u16, u16 *); 358 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
363 s32 (*update)(struct e1000_hw *); 359 s32 (*update)(struct e1000_hw *);
364 s32 (*validate)(struct e1000_hw *); 360 s32 (*validate)(struct e1000_hw *);
365 s32 (*valid_led_default)(struct e1000_hw *, u16 *); 361 s32 (*valid_led_default)(struct e1000_hw *, u16 *);
366}; 362};
367 363
368#define E1000_MAX_SENSORS 3 364#define E1000_MAX_SENSORS 3
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index f67f8a170b90..337161f440dd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25******************************************************************************/
26 23
27/* e1000_i210 24/* e1000_i210
28 * e1000_i211 25 * e1000_i211
@@ -100,7 +97,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
100 return -E1000_ERR_NVM; 97 return -E1000_ERR_NVM;
101 } 98 }
102 99
103 return E1000_SUCCESS; 100 return 0;
104} 101}
105 102
106/** 103/**
@@ -142,7 +139,7 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
142 u32 swfw_sync; 139 u32 swfw_sync;
143 u32 swmask = mask; 140 u32 swmask = mask;
144 u32 fwmask = mask << 16; 141 u32 fwmask = mask << 16;
145 s32 ret_val = E1000_SUCCESS; 142 s32 ret_val = 0;
146 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 143 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
147 144
148 while (i < timeout) { 145 while (i < timeout) {
@@ -187,7 +184,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
187{ 184{
188 u32 swfw_sync; 185 u32 swfw_sync;
189 186
190 while (igb_get_hw_semaphore_i210(hw) != E1000_SUCCESS) 187 while (igb_get_hw_semaphore_i210(hw))
191 ; /* Empty */ 188 ; /* Empty */
192 189
193 swfw_sync = rd32(E1000_SW_FW_SYNC); 190 swfw_sync = rd32(E1000_SW_FW_SYNC);
@@ -210,7 +207,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
210static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, 207static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
211 u16 *data) 208 u16 *data)
212{ 209{
213 s32 status = E1000_SUCCESS; 210 s32 status = 0;
214 u16 i, count; 211 u16 i, count;
215 212
216 /* We cannot hold synchronization semaphores for too long, 213 /* We cannot hold synchronization semaphores for too long,
@@ -220,7 +217,7 @@ static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
220 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { 217 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
221 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? 218 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
222 E1000_EERD_EEWR_MAX_COUNT : (words - i); 219 E1000_EERD_EEWR_MAX_COUNT : (words - i);
223 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 220 if (!(hw->nvm.ops.acquire(hw))) {
224 status = igb_read_nvm_eerd(hw, offset, count, 221 status = igb_read_nvm_eerd(hw, offset, count,
225 data + i); 222 data + i);
226 hw->nvm.ops.release(hw); 223 hw->nvm.ops.release(hw);
@@ -228,7 +225,7 @@ static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
228 status = E1000_ERR_SWFW_SYNC; 225 status = E1000_ERR_SWFW_SYNC;
229 } 226 }
230 227
231 if (status != E1000_SUCCESS) 228 if (status)
232 break; 229 break;
233 } 230 }
234 231
@@ -253,7 +250,7 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
253 struct e1000_nvm_info *nvm = &hw->nvm; 250 struct e1000_nvm_info *nvm = &hw->nvm;
254 u32 i, k, eewr = 0; 251 u32 i, k, eewr = 0;
255 u32 attempts = 100000; 252 u32 attempts = 100000;
256 s32 ret_val = E1000_SUCCESS; 253 s32 ret_val = 0;
257 254
258 /* A check for invalid values: offset too large, too many words, 255 /* A check for invalid values: offset too large, too many words,
259 * too many words for the offset, and not enough words. 256 * too many words for the offset, and not enough words.
@@ -275,13 +272,13 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
275 for (k = 0; k < attempts; k++) { 272 for (k = 0; k < attempts; k++) {
276 if (E1000_NVM_RW_REG_DONE & 273 if (E1000_NVM_RW_REG_DONE &
277 rd32(E1000_SRWR)) { 274 rd32(E1000_SRWR)) {
278 ret_val = E1000_SUCCESS; 275 ret_val = 0;
279 break; 276 break;
280 } 277 }
281 udelay(5); 278 udelay(5);
282 } 279 }
283 280
284 if (ret_val != E1000_SUCCESS) { 281 if (ret_val) {
285 hw_dbg("Shadow RAM write EEWR timed out\n"); 282 hw_dbg("Shadow RAM write EEWR timed out\n");
286 break; 283 break;
287 } 284 }
@@ -310,7 +307,7 @@ out:
310static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, 307static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
311 u16 *data) 308 u16 *data)
312{ 309{
313 s32 status = E1000_SUCCESS; 310 s32 status = 0;
314 u16 i, count; 311 u16 i, count;
315 312
316 /* We cannot hold synchronization semaphores for too long, 313 /* We cannot hold synchronization semaphores for too long,
@@ -320,7 +317,7 @@ static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
320 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { 317 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
321 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? 318 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
322 E1000_EERD_EEWR_MAX_COUNT : (words - i); 319 E1000_EERD_EEWR_MAX_COUNT : (words - i);
323 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 320 if (!(hw->nvm.ops.acquire(hw))) {
324 status = igb_write_nvm_srwr(hw, offset, count, 321 status = igb_write_nvm_srwr(hw, offset, count,
325 data + i); 322 data + i);
326 hw->nvm.ops.release(hw); 323 hw->nvm.ops.release(hw);
@@ -328,7 +325,7 @@ static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
328 status = E1000_ERR_SWFW_SYNC; 325 status = E1000_ERR_SWFW_SYNC;
329 } 326 }
330 327
331 if (status != E1000_SUCCESS) 328 if (status)
332 break; 329 break;
333 } 330 }
334 331
@@ -367,12 +364,12 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
367 *data = INVM_DWORD_TO_WORD_DATA(invm_dword); 364 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
368 hw_dbg("Read INVM Word 0x%02x = %x\n", 365 hw_dbg("Read INVM Word 0x%02x = %x\n",
369 address, *data); 366 address, *data);
370 status = E1000_SUCCESS; 367 status = 0;
371 break; 368 break;
372 } 369 }
373 } 370 }
374 } 371 }
375 if (status != E1000_SUCCESS) 372 if (status)
376 hw_dbg("Requested word 0x%02x not found in OTP\n", address); 373 hw_dbg("Requested word 0x%02x not found in OTP\n", address);
377 return status; 374 return status;
378} 375}
@@ -388,7 +385,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
388static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, 385static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
389 u16 words __always_unused, u16 *data) 386 u16 words __always_unused, u16 *data)
390{ 387{
391 s32 ret_val = E1000_SUCCESS; 388 s32 ret_val = 0;
392 389
393 /* Only the MAC addr is required to be present in the iNVM */ 390 /* Only the MAC addr is required to be present in the iNVM */
394 switch (offset) { 391 switch (offset) {
@@ -398,43 +395,44 @@ static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
398 &data[1]); 395 &data[1]);
399 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, 396 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
400 &data[2]); 397 &data[2]);
401 if (ret_val != E1000_SUCCESS) 398 if (ret_val)
402 hw_dbg("MAC Addr not found in iNVM\n"); 399 hw_dbg("MAC Addr not found in iNVM\n");
403 break; 400 break;
404 case NVM_INIT_CTRL_2: 401 case NVM_INIT_CTRL_2:
405 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 402 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
406 if (ret_val != E1000_SUCCESS) { 403 if (ret_val) {
407 *data = NVM_INIT_CTRL_2_DEFAULT_I211; 404 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
408 ret_val = E1000_SUCCESS; 405 ret_val = 0;
409 } 406 }
410 break; 407 break;
411 case NVM_INIT_CTRL_4: 408 case NVM_INIT_CTRL_4:
412 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 409 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
413 if (ret_val != E1000_SUCCESS) { 410 if (ret_val) {
414 *data = NVM_INIT_CTRL_4_DEFAULT_I211; 411 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
415 ret_val = E1000_SUCCESS; 412 ret_val = 0;
416 } 413 }
417 break; 414 break;
418 case NVM_LED_1_CFG: 415 case NVM_LED_1_CFG:
419 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 416 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
420 if (ret_val != E1000_SUCCESS) { 417 if (ret_val) {
421 *data = NVM_LED_1_CFG_DEFAULT_I211; 418 *data = NVM_LED_1_CFG_DEFAULT_I211;
422 ret_val = E1000_SUCCESS; 419 ret_val = 0;
423 } 420 }
424 break; 421 break;
425 case NVM_LED_0_2_CFG: 422 case NVM_LED_0_2_CFG:
426 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 423 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
427 if (ret_val != E1000_SUCCESS) { 424 if (ret_val) {
428 *data = NVM_LED_0_2_CFG_DEFAULT_I211; 425 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
429 ret_val = E1000_SUCCESS; 426 ret_val = 0;
430 } 427 }
431 break; 428 break;
432 case NVM_ID_LED_SETTINGS: 429 case NVM_ID_LED_SETTINGS:
433 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 430 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
434 if (ret_val != E1000_SUCCESS) { 431 if (ret_val) {
435 *data = ID_LED_RESERVED_FFFF; 432 *data = ID_LED_RESERVED_FFFF;
436 ret_val = E1000_SUCCESS; 433 ret_val = 0;
437 } 434 }
435 break;
438 case NVM_SUB_DEV_ID: 436 case NVM_SUB_DEV_ID:
439 *data = hw->subsystem_device_id; 437 *data = hw->subsystem_device_id;
440 break; 438 break;
@@ -488,14 +486,14 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
488 /* Check if we have first version location used */ 486 /* Check if we have first version location used */
489 if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { 487 if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
490 version = 0; 488 version = 0;
491 status = E1000_SUCCESS; 489 status = 0;
492 break; 490 break;
493 } 491 }
494 /* Check if we have second version location used */ 492 /* Check if we have second version location used */
495 else if ((i == 1) && 493 else if ((i == 1) &&
496 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { 494 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
497 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; 495 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
498 status = E1000_SUCCESS; 496 status = 0;
499 break; 497 break;
500 } 498 }
501 /* Check if we have odd version location 499 /* Check if we have odd version location
@@ -506,7 +504,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
506 (i != 1))) { 504 (i != 1))) {
507 version = (*next_record & E1000_INVM_VER_FIELD_TWO) 505 version = (*next_record & E1000_INVM_VER_FIELD_TWO)
508 >> 13; 506 >> 13;
509 status = E1000_SUCCESS; 507 status = 0;
510 break; 508 break;
511 } 509 }
512 /* Check if we have even version location 510 /* Check if we have even version location
@@ -515,12 +513,12 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
515 else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && 513 else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
516 ((*record & 0x3) == 0)) { 514 ((*record & 0x3) == 0)) {
517 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; 515 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
518 status = E1000_SUCCESS; 516 status = 0;
519 break; 517 break;
520 } 518 }
521 } 519 }
522 520
523 if (status == E1000_SUCCESS) { 521 if (!status) {
524 invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) 522 invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
525 >> E1000_INVM_MAJOR_SHIFT; 523 >> E1000_INVM_MAJOR_SHIFT;
526 invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; 524 invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
@@ -533,7 +531,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
533 /* Check if we have image type in first location used */ 531 /* Check if we have image type in first location used */
534 if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { 532 if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
535 invm_ver->invm_img_type = 0; 533 invm_ver->invm_img_type = 0;
536 status = E1000_SUCCESS; 534 status = 0;
537 break; 535 break;
538 } 536 }
539 /* Check if we have image type in first location used */ 537 /* Check if we have image type in first location used */
@@ -542,7 +540,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
542 ((((*record & 0x3) != 0) && (i != 1)))) { 540 ((((*record & 0x3) != 0) && (i != 1)))) {
543 invm_ver->invm_img_type = 541 invm_ver->invm_img_type =
544 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; 542 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
545 status = E1000_SUCCESS; 543 status = 0;
546 break; 544 break;
547 } 545 }
548 } 546 }
@@ -558,10 +556,10 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
558 **/ 556 **/
559static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) 557static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
560{ 558{
561 s32 status = E1000_SUCCESS; 559 s32 status = 0;
562 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); 560 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
563 561
564 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 562 if (!(hw->nvm.ops.acquire(hw))) {
565 563
566 /* Replace the read function with semaphore grabbing with 564 /* Replace the read function with semaphore grabbing with
567 * the one that skips this for a while. 565 * the one that skips this for a while.
@@ -593,7 +591,7 @@ static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
593 **/ 591 **/
594static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) 592static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
595{ 593{
596 s32 ret_val = E1000_SUCCESS; 594 s32 ret_val = 0;
597 u16 checksum = 0; 595 u16 checksum = 0;
598 u16 i, nvm_data; 596 u16 i, nvm_data;
599 597
@@ -602,12 +600,12 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
602 * EEPROM read fails 600 * EEPROM read fails
603 */ 601 */
604 ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); 602 ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
605 if (ret_val != E1000_SUCCESS) { 603 if (ret_val) {
606 hw_dbg("EEPROM read failed\n"); 604 hw_dbg("EEPROM read failed\n");
607 goto out; 605 goto out;
608 } 606 }
609 607
610 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 608 if (!(hw->nvm.ops.acquire(hw))) {
611 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read 609 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
612 * because we do not want to take the synchronization 610 * because we do not want to take the synchronization
613 * semaphores twice here. 611 * semaphores twice here.
@@ -625,7 +623,7 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
625 checksum = (u16) NVM_SUM - checksum; 623 checksum = (u16) NVM_SUM - checksum;
626 ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, 624 ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
627 &checksum); 625 &checksum);
628 if (ret_val != E1000_SUCCESS) { 626 if (ret_val) {
629 hw->nvm.ops.release(hw); 627 hw->nvm.ops.release(hw);
630 hw_dbg("NVM Write Error while updating checksum.\n"); 628 hw_dbg("NVM Write Error while updating checksum.\n");
631 goto out; 629 goto out;
@@ -654,7 +652,7 @@ static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
654 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { 652 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
655 reg = rd32(E1000_EECD); 653 reg = rd32(E1000_EECD);
656 if (reg & E1000_EECD_FLUDONE_I210) { 654 if (reg & E1000_EECD_FLUDONE_I210) {
657 ret_val = E1000_SUCCESS; 655 ret_val = 0;
658 break; 656 break;
659 } 657 }
660 udelay(5); 658 udelay(5);
@@ -687,7 +685,7 @@ bool igb_get_flash_presence_i210(struct e1000_hw *hw)
687 **/ 685 **/
688static s32 igb_update_flash_i210(struct e1000_hw *hw) 686static s32 igb_update_flash_i210(struct e1000_hw *hw)
689{ 687{
690 s32 ret_val = E1000_SUCCESS; 688 s32 ret_val = 0;
691 u32 flup; 689 u32 flup;
692 690
693 ret_val = igb_pool_flash_update_done_i210(hw); 691 ret_val = igb_pool_flash_update_done_i210(hw);
@@ -700,7 +698,7 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw)
700 wr32(E1000_EECD, flup); 698 wr32(E1000_EECD, flup);
701 699
702 ret_val = igb_pool_flash_update_done_i210(hw); 700 ret_val = igb_pool_flash_update_done_i210(hw);
703 if (ret_val == E1000_SUCCESS) 701 if (ret_val)
704 hw_dbg("Flash update complete\n"); 702 hw_dbg("Flash update complete\n");
705 else 703 else
706 hw_dbg("Flash update time out\n"); 704 hw_dbg("Flash update time out\n");
@@ -753,7 +751,7 @@ out:
753static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, 751static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
754 u8 dev_addr, u16 *data, bool read) 752 u8 dev_addr, u16 *data, bool read)
755{ 753{
756 s32 ret_val = E1000_SUCCESS; 754 s32 ret_val = 0;
757 755
758 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); 756 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
759 if (ret_val) 757 if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 907fe99a9813..9f34976687ba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_I210_H_ 24#ifndef _E1000_I210_H_
28#define _E1000_I210_H_ 25#define _E1000_I210_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 1e0c404db81a..2a88595f956c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#include <linux/if_ether.h> 24#include <linux/if_ether.h>
28#include <linux/delay.h> 25#include <linux/delay.h>
@@ -442,7 +439,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
442 * The caller must have a packed mc_addr_list of multicast addresses. 439 * The caller must have a packed mc_addr_list of multicast addresses.
443 **/ 440 **/
444void igb_update_mc_addr_list(struct e1000_hw *hw, 441void igb_update_mc_addr_list(struct e1000_hw *hw,
445 u8 *mc_addr_list, u32 mc_addr_count) 442 u8 *mc_addr_list, u32 mc_addr_count)
446{ 443{
447 u32 hash_value, hash_bit, hash_reg; 444 u32 hash_value, hash_bit, hash_reg;
448 int i; 445 int i;
@@ -866,8 +863,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
866 goto out; 863 goto out;
867 864
868 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { 865 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
869 hw_dbg("Copper PHY and Auto Neg " 866 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
870 "has not completed.\n");
871 goto out; 867 goto out;
872 } 868 }
873 869
@@ -1265,7 +1261,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1265 while (i < AUTO_READ_DONE_TIMEOUT) { 1261 while (i < AUTO_READ_DONE_TIMEOUT) {
1266 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) 1262 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1267 break; 1263 break;
1268 msleep(1); 1264 usleep_range(1000, 2000);
1269 i++; 1265 i++;
1270 } 1266 }
1271 1267
@@ -1298,7 +1294,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1298 } 1294 }
1299 1295
1300 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { 1296 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1301 switch(hw->phy.media_type) { 1297 switch (hw->phy.media_type) {
1302 case e1000_media_type_internal_serdes: 1298 case e1000_media_type_internal_serdes:
1303 *data = ID_LED_DEFAULT_82575_SERDES; 1299 *data = ID_LED_DEFAULT_82575_SERDES;
1304 break; 1300 break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 99299ba8ee3a..ea24961b0d70 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_MAC_H_ 24#ifndef _E1000_MAC_H_
28#define _E1000_MAC_H_ 25#define _E1000_MAC_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index d5b121771c31..162cc49345d0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#include "e1000_mbx.h" 24#include "e1000_mbx.h"
28 25
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index f52f5515e5a8..d20af6b2f581 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_MBX_H_ 24#ifndef _E1000_MBX_H_
28#define _E1000_MBX_H_ 25#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 9abf82919c65..e8280d0d7f02 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,28 +1,24 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 * This program is free software; you can redistribute it and/or modify it
4 Copyright(c) 2007-2014 Intel Corporation. 4 * under the terms and conditions of the GNU General Public License,
5 5 * version 2, as published by the Free Software Foundation.
6 This program is free software; you can redistribute it and/or modify it 6 *
7 under the terms and conditions of the GNU General Public License, 7 * This program is distributed in the hope it will be useful, but WITHOUT
8 version 2, as published by the Free Software Foundation. 8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 9 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * more details.
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 *
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * You should have received a copy of the GNU General Public License along with
13 more details. 13 * this program; if not, see <http://www.gnu.org/licenses/>.
14 14 *
15 You should have received a copy of the GNU General Public License along with 15 * The full GNU General Public License is included in this distribution in
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * the file called "COPYING".
17 17 *
18 The full GNU General Public License is included in this distribution in 18 * Contact Information:
19 the file called "COPYING". 19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 Contact Information: 21 */
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 22
27#include <linux/if_ether.h> 23#include <linux/if_ether.h>
28#include <linux/delay.h> 24#include <linux/delay.h>
@@ -480,6 +476,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
480 /* Loop to allow for up to whole page write of eeprom */ 476 /* Loop to allow for up to whole page write of eeprom */
481 while (widx < words) { 477 while (widx < words) {
482 u16 word_out = data[widx]; 478 u16 word_out = data[widx];
479
483 word_out = (word_out >> 8) | (word_out << 8); 480 word_out = (word_out >> 8) | (word_out << 8);
484 igb_shift_out_eec_bits(hw, word_out, 16); 481 igb_shift_out_eec_bits(hw, word_out, 16);
485 widx++; 482 widx++;
@@ -801,5 +798,4 @@ etrack_id:
801 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) 798 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
802 | eeprom_verl; 799 | eeprom_verl;
803 } 800 }
804 return;
805} 801}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 5b101170b17e..febc9cdb7391 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_NVM_H_ 24#ifndef _E1000_NVM_H_
28#define _E1000_NVM_H_ 25#define _E1000_NVM_H_
@@ -32,7 +29,7 @@ void igb_release_nvm(struct e1000_hw *hw);
32s32 igb_read_mac_addr(struct e1000_hw *hw); 29s32 igb_read_mac_addr(struct e1000_hw *hw);
33s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); 30s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
34s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, 31s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
35 u32 part_num_size); 32 u32 part_num_size);
36s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 33s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
37s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 34s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
38s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 35s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 4009bbab7407..c1bb64d8366f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#include <linux/if_ether.h> 24#include <linux/if_ether.h>
28#include <linux/delay.h> 25#include <linux/delay.h>
@@ -924,8 +921,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
924 if (phy->autoneg_wait_to_complete) { 921 if (phy->autoneg_wait_to_complete) {
925 ret_val = igb_wait_autoneg(hw); 922 ret_val = igb_wait_autoneg(hw);
926 if (ret_val) { 923 if (ret_val) {
927 hw_dbg("Error while waiting for " 924 hw_dbg("Error while waiting for autoneg to complete\n");
928 "autoneg to complete\n");
929 goto out; 925 goto out;
930 } 926 }
931 } 927 }
@@ -2208,16 +2204,10 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
2208void igb_power_up_phy_copper(struct e1000_hw *hw) 2204void igb_power_up_phy_copper(struct e1000_hw *hw)
2209{ 2205{
2210 u16 mii_reg = 0; 2206 u16 mii_reg = 0;
2211 u16 power_reg = 0;
2212 2207
2213 /* The PHY will retain its settings across a power down/up cycle */ 2208 /* The PHY will retain its settings across a power down/up cycle */
2214 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); 2209 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
2215 mii_reg &= ~MII_CR_POWER_DOWN; 2210 mii_reg &= ~MII_CR_POWER_DOWN;
2216 if (hw->phy.type == e1000_phy_i210) {
2217 hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
2218 power_reg &= ~GS40G_CS_POWER_DOWN;
2219 hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
2220 }
2221 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); 2211 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
2222} 2212}
2223 2213
@@ -2231,20 +2221,12 @@ void igb_power_up_phy_copper(struct e1000_hw *hw)
2231void igb_power_down_phy_copper(struct e1000_hw *hw) 2221void igb_power_down_phy_copper(struct e1000_hw *hw)
2232{ 2222{
2233 u16 mii_reg = 0; 2223 u16 mii_reg = 0;
2234 u16 power_reg = 0;
2235 2224
2236 /* The PHY will retain its settings across a power down/up cycle */ 2225 /* The PHY will retain its settings across a power down/up cycle */
2237 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); 2226 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
2238 mii_reg |= MII_CR_POWER_DOWN; 2227 mii_reg |= MII_CR_POWER_DOWN;
2239
2240 /* i210 Phy requires an additional bit for power up/down */
2241 if (hw->phy.type == e1000_phy_i210) {
2242 hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
2243 power_reg |= GS40G_CS_POWER_DOWN;
2244 hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
2245 }
2246 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); 2228 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
2247 msleep(1); 2229 usleep_range(1000, 2000);
2248} 2230}
2249 2231
2250/** 2232/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 4c2c36c46a73..7af4ffab0285 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_PHY_H_ 24#ifndef _E1000_PHY_H_
28#define _E1000_PHY_H_ 25#define _E1000_PHY_H_
@@ -154,7 +151,6 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
154#define GS40G_MAC_LB 0x4140 151#define GS40G_MAC_LB 0x4140
155#define GS40G_MAC_SPEED_1G 0X0006 152#define GS40G_MAC_SPEED_1G 0X0006
156#define GS40G_COPPER_SPEC 0x0010 153#define GS40G_COPPER_SPEC 0x0010
157#define GS40G_CS_POWER_DOWN 0x0002
158#define GS40G_LINE_LB 0x4000 154#define GS40G_LINE_LB 0x4000
159 155
160/* SFP modules ID memory locations */ 156/* SFP modules ID memory locations */
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index bdb246e848e1..1cc4b1a7e597 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_REGS_H_ 24#ifndef _E1000_REGS_H_
28#define _E1000_REGS_H_ 25#define _E1000_REGS_H_
@@ -195,6 +192,10 @@
195 : (0x0E038 + ((_n) * 0x40))) 192 : (0x0E038 + ((_n) * 0x40)))
196#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ 193#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
197 : (0x0E03C + ((_n) * 0x40))) 194 : (0x0E03C + ((_n) * 0x40)))
195
196#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
197#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
198
198#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ 199#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
199#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ 200#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
200#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ 201#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
@@ -301,9 +302,9 @@
301#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ 302#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
302#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) 303#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
303#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ 304#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
304 (0x054E0 + ((_i - 16) * 8))) 305 (0x054E0 + ((_i - 16) * 8)))
305#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ 306#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
306 (0x054E4 + ((_i - 16) * 8))) 307 (0x054E4 + ((_i - 16) * 8)))
307#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) 308#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
308#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) 309#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
309#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) 310#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
@@ -358,8 +359,7 @@
358#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) 359#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
359#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) 360#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
360#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) 361#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
361#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine 362#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
362 * Filter - RW */
363#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) 363#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
364 364
365struct e1000_hw; 365struct e1000_hw;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 27130065d92a..06102d1f7c03 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,29 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26
27 23
28/* Linux PRO/1000 Ethernet Driver main header file */ 24/* Linux PRO/1000 Ethernet Driver main header file */
29 25
@@ -198,6 +194,7 @@ struct igb_tx_buffer {
198 unsigned int bytecount; 194 unsigned int bytecount;
199 u16 gso_segs; 195 u16 gso_segs;
200 __be16 protocol; 196 __be16 protocol;
197
201 DEFINE_DMA_UNMAP_ADDR(dma); 198 DEFINE_DMA_UNMAP_ADDR(dma);
202 DEFINE_DMA_UNMAP_LEN(len); 199 DEFINE_DMA_UNMAP_LEN(len);
203 u32 tx_flags; 200 u32 tx_flags;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index e5570acbeea8..c737d1f40838 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27/* ethtool support for igb */ 24/* ethtool support for igb */
28 25
@@ -144,6 +141,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
144 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 141 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
145 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; 142 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
146 u32 status; 143 u32 status;
144 u32 speed;
147 145
148 status = rd32(E1000_STATUS); 146 status = rd32(E1000_STATUS);
149 if (hw->phy.media_type == e1000_media_type_copper) { 147 if (hw->phy.media_type == e1000_media_type_copper) {
@@ -218,13 +216,13 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
218 if (status & E1000_STATUS_LU) { 216 if (status & E1000_STATUS_LU) {
219 if ((status & E1000_STATUS_2P5_SKU) && 217 if ((status & E1000_STATUS_2P5_SKU) &&
220 !(status & E1000_STATUS_2P5_SKU_OVER)) { 218 !(status & E1000_STATUS_2P5_SKU_OVER)) {
221 ecmd->speed = SPEED_2500; 219 speed = SPEED_2500;
222 } else if (status & E1000_STATUS_SPEED_1000) { 220 } else if (status & E1000_STATUS_SPEED_1000) {
223 ecmd->speed = SPEED_1000; 221 speed = SPEED_1000;
224 } else if (status & E1000_STATUS_SPEED_100) { 222 } else if (status & E1000_STATUS_SPEED_100) {
225 ecmd->speed = SPEED_100; 223 speed = SPEED_100;
226 } else { 224 } else {
227 ecmd->speed = SPEED_10; 225 speed = SPEED_10;
228 } 226 }
229 if ((status & E1000_STATUS_FD) || 227 if ((status & E1000_STATUS_FD) ||
230 hw->phy.media_type != e1000_media_type_copper) 228 hw->phy.media_type != e1000_media_type_copper)
@@ -232,9 +230,10 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
232 else 230 else
233 ecmd->duplex = DUPLEX_HALF; 231 ecmd->duplex = DUPLEX_HALF;
234 } else { 232 } else {
235 ecmd->speed = -1; 233 speed = SPEED_UNKNOWN;
236 ecmd->duplex = -1; 234 ecmd->duplex = DUPLEX_UNKNOWN;
237 } 235 }
236 ethtool_cmd_speed_set(ecmd, speed);
238 if ((hw->phy.media_type == e1000_media_type_fiber) || 237 if ((hw->phy.media_type == e1000_media_type_fiber) ||
239 hw->mac.autoneg) 238 hw->mac.autoneg)
240 ecmd->autoneg = AUTONEG_ENABLE; 239 ecmd->autoneg = AUTONEG_ENABLE;
@@ -286,7 +285,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
286 } 285 }
287 286
288 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 287 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
289 msleep(1); 288 usleep_range(1000, 2000);
290 289
291 if (ecmd->autoneg == AUTONEG_ENABLE) { 290 if (ecmd->autoneg == AUTONEG_ENABLE) {
292 hw->mac.autoneg = 1; 291 hw->mac.autoneg = 1;
@@ -399,7 +398,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
399 adapter->fc_autoneg = pause->autoneg; 398 adapter->fc_autoneg = pause->autoneg;
400 399
401 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 400 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
402 msleep(1); 401 usleep_range(1000, 2000);
403 402
404 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 403 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
405 hw->fc.requested_mode = e1000_fc_default; 404 hw->fc.requested_mode = e1000_fc_default;
@@ -886,7 +885,7 @@ static int igb_set_ringparam(struct net_device *netdev,
886 } 885 }
887 886
888 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 887 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
889 msleep(1); 888 usleep_range(1000, 2000);
890 889
891 if (!netif_running(adapter->netdev)) { 890 if (!netif_running(adapter->netdev)) {
892 for (i = 0; i < adapter->num_tx_queues; i++) 891 for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1060,8 +1059,8 @@ static struct igb_reg_test reg_test_i350[] = {
1060 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1059 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1061 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1060 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1062 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1061 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1063 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 1062 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
1064 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 1063 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
1065 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1064 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1066 { E1000_RA, 0, 16, TABLE64_TEST_LO, 1065 { E1000_RA, 0, 16, TABLE64_TEST_LO,
1067 0xFFFFFFFF, 0xFFFFFFFF }, 1066 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1103,8 +1102,8 @@ static struct igb_reg_test reg_test_82580[] = {
1103 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1102 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1104 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1103 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1105 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1104 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1106 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 1105 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
1107 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 1106 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
1108 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1107 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1109 { E1000_RA, 0, 16, TABLE64_TEST_LO, 1108 { E1000_RA, 0, 16, TABLE64_TEST_LO,
1110 0xFFFFFFFF, 0xFFFFFFFF }, 1109 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1132,8 +1131,10 @@ static struct igb_reg_test reg_test_82576[] = {
1132 { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1131 { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1133 { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 1132 { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
1134 /* Enable all RX queues before testing. */ 1133 /* Enable all RX queues before testing. */
1135 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 1134 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
1136 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 1135 E1000_RXDCTL_QUEUE_ENABLE },
1136 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
1137 E1000_RXDCTL_QUEUE_ENABLE },
1137 /* RDH is read-only for 82576, only test RDT. */ 1138 /* RDH is read-only for 82576, only test RDT. */
1138 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1139 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1139 { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1140 { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
@@ -1149,14 +1150,14 @@ static struct igb_reg_test reg_test_82576[] = {
1149 { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1150 { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1150 { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 1151 { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
1151 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1152 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1152 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 1153 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
1153 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 1154 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
1154 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1155 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1155 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1156 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1156 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, 1157 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
1157 { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1158 { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1158 { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, 1159 { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
1159 { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1160 { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1160 { 0, 0, 0, 0 } 1161 { 0, 0, 0, 0 }
1161}; 1162};
1162 1163
@@ -1170,7 +1171,8 @@ static struct igb_reg_test reg_test_82575[] = {
1170 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1171 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1171 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1172 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1172 /* Enable all four RX queues before testing. */ 1173 /* Enable all four RX queues before testing. */
1173 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 1174 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
1175 E1000_RXDCTL_QUEUE_ENABLE },
1174 /* RDH is read-only for 82575, only test RDT. */ 1176 /* RDH is read-only for 82575, only test RDT. */
1175 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1177 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1176 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, 1178 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
@@ -1196,8 +1198,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
1196{ 1198{
1197 struct e1000_hw *hw = &adapter->hw; 1199 struct e1000_hw *hw = &adapter->hw;
1198 u32 pat, val; 1200 u32 pat, val;
1199 static const u32 _test[] = 1201 static const u32 _test[] = {
1200 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1202 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1201 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 1203 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
1202 wr32(reg, (_test[pat] & write)); 1204 wr32(reg, (_test[pat] & write));
1203 val = rd32(reg) & mask; 1205 val = rd32(reg) & mask;
@@ -1206,11 +1208,11 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
1206 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", 1208 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1207 reg, val, (_test[pat] & write & mask)); 1209 reg, val, (_test[pat] & write & mask));
1208 *data = reg; 1210 *data = reg;
1209 return 1; 1211 return true;
1210 } 1212 }
1211 } 1213 }
1212 1214
1213 return 0; 1215 return false;
1214} 1216}
1215 1217
1216static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, 1218static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
@@ -1218,17 +1220,18 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
1218{ 1220{
1219 struct e1000_hw *hw = &adapter->hw; 1221 struct e1000_hw *hw = &adapter->hw;
1220 u32 val; 1222 u32 val;
1223
1221 wr32(reg, write & mask); 1224 wr32(reg, write & mask);
1222 val = rd32(reg); 1225 val = rd32(reg);
1223 if ((write & mask) != (val & mask)) { 1226 if ((write & mask) != (val & mask)) {
1224 dev_err(&adapter->pdev->dev, 1227 dev_err(&adapter->pdev->dev,
1225 "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, 1228 "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1226 (val & mask), (write & mask)); 1229 reg, (val & mask), (write & mask));
1227 *data = reg; 1230 *data = reg;
1228 return 1; 1231 return true;
1229 } 1232 }
1230 1233
1231 return 0; 1234 return false;
1232} 1235}
1233 1236
1234#define REG_PATTERN_TEST(reg, mask, write) \ 1237#define REG_PATTERN_TEST(reg, mask, write) \
@@ -1387,14 +1390,14 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1387 /* Hook up test interrupt handler just for this test */ 1390 /* Hook up test interrupt handler just for this test */
1388 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1391 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1389 if (request_irq(adapter->msix_entries[0].vector, 1392 if (request_irq(adapter->msix_entries[0].vector,
1390 igb_test_intr, 0, netdev->name, adapter)) { 1393 igb_test_intr, 0, netdev->name, adapter)) {
1391 *data = 1; 1394 *data = 1;
1392 return -1; 1395 return -1;
1393 } 1396 }
1394 } else if (adapter->flags & IGB_FLAG_HAS_MSI) { 1397 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1395 shared_int = false; 1398 shared_int = false;
1396 if (request_irq(irq, 1399 if (request_irq(irq,
1397 igb_test_intr, 0, netdev->name, adapter)) { 1400 igb_test_intr, 0, netdev->name, adapter)) {
1398 *data = 1; 1401 *data = 1;
1399 return -1; 1402 return -1;
1400 } 1403 }
@@ -1412,7 +1415,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1412 /* Disable all the interrupts */ 1415 /* Disable all the interrupts */
1413 wr32(E1000_IMC, ~0); 1416 wr32(E1000_IMC, ~0);
1414 wrfl(); 1417 wrfl();
1415 msleep(10); 1418 usleep_range(10000, 11000);
1416 1419
1417 /* Define all writable bits for ICS */ 1420 /* Define all writable bits for ICS */
1418 switch (hw->mac.type) { 1421 switch (hw->mac.type) {
@@ -1459,7 +1462,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1459 wr32(E1000_IMC, mask); 1462 wr32(E1000_IMC, mask);
1460 wr32(E1000_ICS, mask); 1463 wr32(E1000_ICS, mask);
1461 wrfl(); 1464 wrfl();
1462 msleep(10); 1465 usleep_range(10000, 11000);
1463 1466
1464 if (adapter->test_icr & mask) { 1467 if (adapter->test_icr & mask) {
1465 *data = 3; 1468 *data = 3;
@@ -1481,7 +1484,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1481 wr32(E1000_IMS, mask); 1484 wr32(E1000_IMS, mask);
1482 wr32(E1000_ICS, mask); 1485 wr32(E1000_ICS, mask);
1483 wrfl(); 1486 wrfl();
1484 msleep(10); 1487 usleep_range(10000, 11000);
1485 1488
1486 if (!(adapter->test_icr & mask)) { 1489 if (!(adapter->test_icr & mask)) {
1487 *data = 4; 1490 *data = 4;
@@ -1503,7 +1506,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1503 wr32(E1000_IMC, ~mask); 1506 wr32(E1000_IMC, ~mask);
1504 wr32(E1000_ICS, ~mask); 1507 wr32(E1000_ICS, ~mask);
1505 wrfl(); 1508 wrfl();
1506 msleep(10); 1509 usleep_range(10000, 11000);
1507 1510
1508 if (adapter->test_icr & mask) { 1511 if (adapter->test_icr & mask) {
1509 *data = 5; 1512 *data = 5;
@@ -1515,7 +1518,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1515 /* Disable all the interrupts */ 1518 /* Disable all the interrupts */
1516 wr32(E1000_IMC, ~0); 1519 wr32(E1000_IMC, ~0);
1517 wrfl(); 1520 wrfl();
1518 msleep(10); 1521 usleep_range(10000, 11000);
1519 1522
1520 /* Unhook test interrupt handler */ 1523 /* Unhook test interrupt handler */
1521 if (adapter->flags & IGB_FLAG_HAS_MSIX) 1524 if (adapter->flags & IGB_FLAG_HAS_MSIX)
@@ -1664,8 +1667,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1664 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || 1667 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1665 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || 1668 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1666 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || 1669 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
1667 (hw->device_id == E1000_DEV_ID_I354_SGMII)) { 1670 (hw->device_id == E1000_DEV_ID_I354_SGMII) ||
1668 1671 (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) {
1669 /* Enable DH89xxCC MPHY for near end loopback */ 1672 /* Enable DH89xxCC MPHY for near end loopback */
1670 reg = rd32(E1000_MPHY_ADDR_CTL); 1673 reg = rd32(E1000_MPHY_ADDR_CTL);
1671 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | 1674 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
@@ -1949,6 +1952,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1949 *data = 0; 1952 *data = 0;
1950 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1953 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1951 int i = 0; 1954 int i = 0;
1955
1952 hw->mac.serdes_has_link = false; 1956 hw->mac.serdes_has_link = false;
1953 1957
1954 /* On some blade server designs, link establishment 1958 /* On some blade server designs, link establishment
@@ -2413,9 +2417,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
2413 switch (cmd->flow_type) { 2417 switch (cmd->flow_type) {
2414 case TCP_V4_FLOW: 2418 case TCP_V4_FLOW:
2415 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2419 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2420 /* Fall through */
2416 case UDP_V4_FLOW: 2421 case UDP_V4_FLOW:
2417 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) 2422 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
2418 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2423 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2424 /* Fall through */
2419 case SCTP_V4_FLOW: 2425 case SCTP_V4_FLOW:
2420 case AH_ESP_V4_FLOW: 2426 case AH_ESP_V4_FLOW:
2421 case AH_V4_FLOW: 2427 case AH_V4_FLOW:
@@ -2425,9 +2431,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
2425 break; 2431 break;
2426 case TCP_V6_FLOW: 2432 case TCP_V6_FLOW:
2427 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2433 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2434 /* Fall through */
2428 case UDP_V6_FLOW: 2435 case UDP_V6_FLOW:
2429 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) 2436 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
2430 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2437 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2438 /* Fall through */
2431 case SCTP_V6_FLOW: 2439 case SCTP_V6_FLOW:
2432 case AH_ESP_V6_FLOW: 2440 case AH_ESP_V6_FLOW:
2433 case AH_V6_FLOW: 2441 case AH_V6_FLOW:
@@ -2730,7 +2738,7 @@ static int igb_get_module_info(struct net_device *netdev,
2730{ 2738{
2731 struct igb_adapter *adapter = netdev_priv(netdev); 2739 struct igb_adapter *adapter = netdev_priv(netdev);
2732 struct e1000_hw *hw = &adapter->hw; 2740 struct e1000_hw *hw = &adapter->hw;
2733 u32 status = E1000_SUCCESS; 2741 u32 status = 0;
2734 u16 sff8472_rev, addr_mode; 2742 u16 sff8472_rev, addr_mode;
2735 bool page_swap = false; 2743 bool page_swap = false;
2736 2744
@@ -2740,12 +2748,12 @@ static int igb_get_module_info(struct net_device *netdev,
2740 2748
2741 /* Check whether we support SFF-8472 or not */ 2749 /* Check whether we support SFF-8472 or not */
2742 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); 2750 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
2743 if (status != E1000_SUCCESS) 2751 if (status)
2744 return -EIO; 2752 return -EIO;
2745 2753
2746 /* addressing mode is not supported */ 2754 /* addressing mode is not supported */
2747 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); 2755 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
2748 if (status != E1000_SUCCESS) 2756 if (status)
2749 return -EIO; 2757 return -EIO;
2750 2758
2751 /* addressing mode is not supported */ 2759 /* addressing mode is not supported */
@@ -2772,7 +2780,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
2772{ 2780{
2773 struct igb_adapter *adapter = netdev_priv(netdev); 2781 struct igb_adapter *adapter = netdev_priv(netdev);
2774 struct e1000_hw *hw = &adapter->hw; 2782 struct e1000_hw *hw = &adapter->hw;
2775 u32 status = E1000_SUCCESS; 2783 u32 status = 0;
2776 u16 *dataword; 2784 u16 *dataword;
2777 u16 first_word, last_word; 2785 u16 first_word, last_word;
2778 int i = 0; 2786 int i = 0;
@@ -2791,7 +2799,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
2791 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ 2799 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
2792 for (i = 0; i < last_word - first_word + 1; i++) { 2800 for (i = 0; i < last_word - first_word + 1; i++) {
2793 status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); 2801 status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
2794 if (status != E1000_SUCCESS) { 2802 if (status) {
2795 /* Error occurred while reading module */ 2803 /* Error occurred while reading module */
2796 kfree(dataword); 2804 kfree(dataword);
2797 return -EIO; 2805 return -EIO;
@@ -2824,7 +2832,7 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
2824 return IGB_RETA_SIZE; 2832 return IGB_RETA_SIZE;
2825} 2833}
2826 2834
2827static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir) 2835static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
2828{ 2836{
2829 struct igb_adapter *adapter = netdev_priv(netdev); 2837 struct igb_adapter *adapter = netdev_priv(netdev);
2830 int i; 2838 int i;
@@ -2870,7 +2878,8 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
2870 } 2878 }
2871} 2879}
2872 2880
2873static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir) 2881static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
2882 const u8 *key)
2874{ 2883{
2875 struct igb_adapter *adapter = netdev_priv(netdev); 2884 struct igb_adapter *adapter = netdev_priv(netdev);
2876 struct e1000_hw *hw = &adapter->hw; 2885 struct e1000_hw *hw = &adapter->hw;
@@ -3019,8 +3028,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
3019 .get_module_info = igb_get_module_info, 3028 .get_module_info = igb_get_module_info,
3020 .get_module_eeprom = igb_get_module_eeprom, 3029 .get_module_eeprom = igb_get_module_eeprom,
3021 .get_rxfh_indir_size = igb_get_rxfh_indir_size, 3030 .get_rxfh_indir_size = igb_get_rxfh_indir_size,
3022 .get_rxfh_indir = igb_get_rxfh_indir, 3031 .get_rxfh = igb_get_rxfh,
3023 .set_rxfh_indir = igb_set_rxfh_indir, 3032 .set_rxfh = igb_set_rxfh,
3024 .get_channels = igb_get_channels, 3033 .get_channels = igb_get_channels,
3025 .set_channels = igb_set_channels, 3034 .set_channels = igb_set_channels,
3026 .begin = igb_ethtool_begin, 3035 .begin = igb_ethtool_begin,
@@ -3029,5 +3038,5 @@ static const struct ethtool_ops igb_ethtool_ops = {
3029 3038
3030void igb_set_ethtool_ops(struct net_device *netdev) 3039void igb_set_ethtool_ops(struct net_device *netdev)
3031{ 3040{
3032 SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops); 3041 netdev->ethtool_ops = &igb_ethtool_ops;
3033} 3042}
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 8333f67acf96..44b6a68f1af7 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#include "igb.h" 24#include "igb.h"
28#include "e1000_82575.h" 25#include "e1000_82575.h"
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 16430a8440fa..f145adbb55ac 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 25
@@ -75,7 +72,7 @@ static const struct e1000_info *igb_info_tbl[] = {
75 [board_82575] = &e1000_82575_info, 72 [board_82575] = &e1000_82575_info,
76}; 73};
77 74
78static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { 75static const struct pci_device_id igb_pci_tbl[] = {
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, 76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, 77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, 78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
@@ -117,7 +114,6 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
117 114
118MODULE_DEVICE_TABLE(pci, igb_pci_tbl); 115MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
119 116
120void igb_reset(struct igb_adapter *);
121static int igb_setup_all_tx_resources(struct igb_adapter *); 117static int igb_setup_all_tx_resources(struct igb_adapter *);
122static int igb_setup_all_rx_resources(struct igb_adapter *); 118static int igb_setup_all_rx_resources(struct igb_adapter *);
123static void igb_free_all_tx_resources(struct igb_adapter *); 119static void igb_free_all_tx_resources(struct igb_adapter *);
@@ -141,7 +137,7 @@ static void igb_watchdog(unsigned long);
141static void igb_watchdog_task(struct work_struct *); 137static void igb_watchdog_task(struct work_struct *);
142static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); 138static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
143static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, 139static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
144 struct rtnl_link_stats64 *stats); 140 struct rtnl_link_stats64 *stats);
145static int igb_change_mtu(struct net_device *, int); 141static int igb_change_mtu(struct net_device *, int);
146static int igb_set_mac(struct net_device *, void *); 142static int igb_set_mac(struct net_device *, void *);
147static void igb_set_uta(struct igb_adapter *adapter); 143static void igb_set_uta(struct igb_adapter *adapter);
@@ -159,7 +155,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
159static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 155static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
160static void igb_tx_timeout(struct net_device *); 156static void igb_tx_timeout(struct net_device *);
161static void igb_reset_task(struct work_struct *); 157static void igb_reset_task(struct work_struct *);
162static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features); 158static void igb_vlan_mode(struct net_device *netdev,
159 netdev_features_t features);
163static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); 160static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
164static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); 161static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
165static void igb_restore_vlan(struct igb_adapter *); 162static void igb_restore_vlan(struct igb_adapter *);
@@ -172,7 +169,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
172static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); 169static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
173static int igb_ndo_set_vf_vlan(struct net_device *netdev, 170static int igb_ndo_set_vf_vlan(struct net_device *netdev,
174 int vf, u16 vlan, u8 qos); 171 int vf, u16 vlan, u8 qos);
175static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 172static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
176static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, 173static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
177 bool setting); 174 bool setting);
178static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, 175static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
@@ -215,10 +212,9 @@ static struct notifier_block dca_notifier = {
215static void igb_netpoll(struct net_device *); 212static void igb_netpoll(struct net_device *);
216#endif 213#endif
217#ifdef CONFIG_PCI_IOV 214#ifdef CONFIG_PCI_IOV
218static unsigned int max_vfs = 0; 215static unsigned int max_vfs;
219module_param(max_vfs, uint, 0); 216module_param(max_vfs, uint, 0);
220MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " 217MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
221 "per physical function");
222#endif /* CONFIG_PCI_IOV */ 218#endif /* CONFIG_PCI_IOV */
223 219
224static pci_ers_result_t igb_io_error_detected(struct pci_dev *, 220static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
@@ -384,8 +380,7 @@ static void igb_dump(struct igb_adapter *adapter)
384 /* Print netdevice Info */ 380 /* Print netdevice Info */
385 if (netdev) { 381 if (netdev) {
386 dev_info(&adapter->pdev->dev, "Net device Info\n"); 382 dev_info(&adapter->pdev->dev, "Net device Info\n");
387 pr_info("Device Name state trans_start " 383 pr_info("Device Name state trans_start last_rx\n");
388 "last_rx\n");
389 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, 384 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
390 netdev->state, netdev->trans_start, netdev->last_rx); 385 netdev->state, netdev->trans_start, netdev->last_rx);
391 } 386 }
@@ -438,9 +433,7 @@ static void igb_dump(struct igb_adapter *adapter)
438 pr_info("------------------------------------\n"); 433 pr_info("------------------------------------\n");
439 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); 434 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
440 pr_info("------------------------------------\n"); 435 pr_info("------------------------------------\n");
441 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] " 436 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
442 "[bi->dma ] leng ntw timestamp "
443 "bi->skb\n");
444 437
445 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 438 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
446 const char *next_desc; 439 const char *next_desc;
@@ -458,9 +451,8 @@ static void igb_dump(struct igb_adapter *adapter)
458 else 451 else
459 next_desc = ""; 452 next_desc = "";
460 453
461 pr_info("T [0x%03X] %016llX %016llX %016llX" 454 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
462 " %04X %p %016llX %p%s\n", i, 455 i, le64_to_cpu(u0->a),
463 le64_to_cpu(u0->a),
464 le64_to_cpu(u0->b), 456 le64_to_cpu(u0->b),
465 (u64)dma_unmap_addr(buffer_info, dma), 457 (u64)dma_unmap_addr(buffer_info, dma),
466 dma_unmap_len(buffer_info, len), 458 dma_unmap_len(buffer_info, len),
@@ -519,10 +511,8 @@ rx_ring_summary:
519 pr_info("------------------------------------\n"); 511 pr_info("------------------------------------\n");
520 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); 512 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
521 pr_info("------------------------------------\n"); 513 pr_info("------------------------------------\n");
522 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] " 514 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
523 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); 515 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
524 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
525 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
526 516
527 for (i = 0; i < rx_ring->count; i++) { 517 for (i = 0; i < rx_ring->count; i++) {
528 const char *next_desc; 518 const char *next_desc;
@@ -584,7 +574,7 @@ static int igb_get_i2c_data(void *data)
584 struct e1000_hw *hw = &adapter->hw; 574 struct e1000_hw *hw = &adapter->hw;
585 s32 i2cctl = rd32(E1000_I2CPARAMS); 575 s32 i2cctl = rd32(E1000_I2CPARAMS);
586 576
587 return ((i2cctl & E1000_I2C_DATA_IN) != 0); 577 return !!(i2cctl & E1000_I2C_DATA_IN);
588} 578}
589 579
590/** 580/**
@@ -648,7 +638,7 @@ static int igb_get_i2c_clk(void *data)
648 struct e1000_hw *hw = &adapter->hw; 638 struct e1000_hw *hw = &adapter->hw;
649 s32 i2cctl = rd32(E1000_I2CPARAMS); 639 s32 i2cctl = rd32(E1000_I2CPARAMS);
650 640
651 return ((i2cctl & E1000_I2C_CLK_IN) != 0); 641 return !!(i2cctl & E1000_I2C_CLK_IN);
652} 642}
653 643
654static const struct i2c_algo_bit_data igb_i2c_algo = { 644static const struct i2c_algo_bit_data igb_i2c_algo = {
@@ -681,9 +671,9 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
681static int __init igb_init_module(void) 671static int __init igb_init_module(void)
682{ 672{
683 int ret; 673 int ret;
674
684 pr_info("%s - version %s\n", 675 pr_info("%s - version %s\n",
685 igb_driver_string, igb_driver_version); 676 igb_driver_string, igb_driver_version);
686
687 pr_info("%s\n", igb_copyright); 677 pr_info("%s\n", igb_copyright);
688 678
689#ifdef CONFIG_IGB_DCA 679#ifdef CONFIG_IGB_DCA
@@ -736,12 +726,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
736 adapter->rx_ring[i]->reg_idx = rbase_offset + 726 adapter->rx_ring[i]->reg_idx = rbase_offset +
737 Q_IDX_82576(i); 727 Q_IDX_82576(i);
738 } 728 }
729 /* Fall through */
739 case e1000_82575: 730 case e1000_82575:
740 case e1000_82580: 731 case e1000_82580:
741 case e1000_i350: 732 case e1000_i350:
742 case e1000_i354: 733 case e1000_i354:
743 case e1000_i210: 734 case e1000_i210:
744 case e1000_i211: 735 case e1000_i211:
736 /* Fall through */
745 default: 737 default:
746 for (; i < adapter->num_rx_queues; i++) 738 for (; i < adapter->num_rx_queues; i++)
747 adapter->rx_ring[i]->reg_idx = rbase_offset + i; 739 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -1292,8 +1284,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
1292 if (adapter->hw.mac.type >= e1000_82576) 1284 if (adapter->hw.mac.type >= e1000_82576)
1293 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); 1285 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1294 1286
1295 /* 1287 /* On i350, i354, i210, and i211, loopback VLAN packets
1296 * On i350, i354, i210, and i211, loopback VLAN packets
1297 * have the tag byte-swapped. 1288 * have the tag byte-swapped.
1298 */ 1289 */
1299 if (adapter->hw.mac.type >= e1000_i350) 1290 if (adapter->hw.mac.type >= e1000_i350)
@@ -1345,6 +1336,7 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1345 for (; v_idx < q_vectors; v_idx++) { 1336 for (; v_idx < q_vectors; v_idx++) {
1346 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 1337 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1347 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 1338 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1339
1348 err = igb_alloc_q_vector(adapter, q_vectors, v_idx, 1340 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1349 tqpv, txr_idx, rqpv, rxr_idx); 1341 tqpv, txr_idx, rqpv, rxr_idx);
1350 1342
@@ -1484,6 +1476,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
1484 */ 1476 */
1485 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1477 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1486 u32 regval = rd32(E1000_EIAM); 1478 u32 regval = rd32(E1000_EIAM);
1479
1487 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); 1480 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1488 wr32(E1000_EIMC, adapter->eims_enable_mask); 1481 wr32(E1000_EIMC, adapter->eims_enable_mask);
1489 regval = rd32(E1000_EIAC); 1482 regval = rd32(E1000_EIAC);
@@ -1495,6 +1488,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
1495 wrfl(); 1488 wrfl();
1496 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1489 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1497 int i; 1490 int i;
1491
1498 for (i = 0; i < adapter->num_q_vectors; i++) 1492 for (i = 0; i < adapter->num_q_vectors; i++)
1499 synchronize_irq(adapter->msix_entries[i].vector); 1493 synchronize_irq(adapter->msix_entries[i].vector);
1500 } else { 1494 } else {
@@ -1513,6 +1507,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
1513 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1507 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1514 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; 1508 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1515 u32 regval = rd32(E1000_EIAC); 1509 u32 regval = rd32(E1000_EIAC);
1510
1516 wr32(E1000_EIAC, regval | adapter->eims_enable_mask); 1511 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1517 regval = rd32(E1000_EIAM); 1512 regval = rd32(E1000_EIAM);
1518 wr32(E1000_EIAM, regval | adapter->eims_enable_mask); 1513 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
@@ -1745,6 +1740,7 @@ int igb_up(struct igb_adapter *adapter)
1745 /* notify VFs that reset has been completed */ 1740 /* notify VFs that reset has been completed */
1746 if (adapter->vfs_allocated_count) { 1741 if (adapter->vfs_allocated_count) {
1747 u32 reg_data = rd32(E1000_CTRL_EXT); 1742 u32 reg_data = rd32(E1000_CTRL_EXT);
1743
1748 reg_data |= E1000_CTRL_EXT_PFRSTD; 1744 reg_data |= E1000_CTRL_EXT_PFRSTD;
1749 wr32(E1000_CTRL_EXT, reg_data); 1745 wr32(E1000_CTRL_EXT, reg_data);
1750 } 1746 }
@@ -1787,7 +1783,7 @@ void igb_down(struct igb_adapter *adapter)
1787 wr32(E1000_TCTL, tctl); 1783 wr32(E1000_TCTL, tctl);
1788 /* flush both disables and wait for them to finish */ 1784 /* flush both disables and wait for them to finish */
1789 wrfl(); 1785 wrfl();
1790 msleep(10); 1786 usleep_range(10000, 11000);
1791 1787
1792 igb_irq_disable(adapter); 1788 igb_irq_disable(adapter);
1793 1789
@@ -1827,7 +1823,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
1827{ 1823{
1828 WARN_ON(in_interrupt()); 1824 WARN_ON(in_interrupt());
1829 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 1825 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1830 msleep(1); 1826 usleep_range(1000, 2000);
1831 igb_down(adapter); 1827 igb_down(adapter);
1832 igb_up(adapter); 1828 igb_up(adapter);
1833 clear_bit(__IGB_RESETTING, &adapter->state); 1829 clear_bit(__IGB_RESETTING, &adapter->state);
@@ -1960,6 +1956,7 @@ void igb_reset(struct igb_adapter *adapter)
1960 /* disable receive for all VFs and wait one second */ 1956 /* disable receive for all VFs and wait one second */
1961 if (adapter->vfs_allocated_count) { 1957 if (adapter->vfs_allocated_count) {
1962 int i; 1958 int i;
1959
1963 for (i = 0 ; i < adapter->vfs_allocated_count; i++) 1960 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1964 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; 1961 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
1965 1962
@@ -2087,7 +2084,7 @@ static const struct net_device_ops igb_netdev_ops = {
2087 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, 2084 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
2088 .ndo_set_vf_mac = igb_ndo_set_vf_mac, 2085 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2089 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, 2086 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
2090 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, 2087 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
2091 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, 2088 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
2092 .ndo_get_vf_config = igb_ndo_get_vf_config, 2089 .ndo_get_vf_config = igb_ndo_get_vf_config,
2093#ifdef CONFIG_NET_POLL_CONTROLLER 2090#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2142,7 +2139,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
2142 } 2139 }
2143 break; 2140 break;
2144 } 2141 }
2145 return;
2146} 2142}
2147 2143
2148/** 2144/**
@@ -2203,11 +2199,11 @@ static void igb_init_mas(struct igb_adapter *adapter)
2203 **/ 2199 **/
2204static s32 igb_init_i2c(struct igb_adapter *adapter) 2200static s32 igb_init_i2c(struct igb_adapter *adapter)
2205{ 2201{
2206 s32 status = E1000_SUCCESS; 2202 s32 status = 0;
2207 2203
2208 /* I2C interface supported on i350 devices */ 2204 /* I2C interface supported on i350 devices */
2209 if (adapter->hw.mac.type != e1000_i350) 2205 if (adapter->hw.mac.type != e1000_i350)
2210 return E1000_SUCCESS; 2206 return 0;
2211 2207
2212 /* Initialize the i2c bus which is controlled by the registers. 2208 /* Initialize the i2c bus which is controlled by the registers.
2213 * This bus will use the i2c_algo_bit structue that implements 2209 * This bus will use the i2c_algo_bit structue that implements
@@ -2437,6 +2433,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2437 /* get firmware version for ethtool -i */ 2433 /* get firmware version for ethtool -i */
2438 igb_set_fw_version(adapter); 2434 igb_set_fw_version(adapter);
2439 2435
2436 /* configure RXPBSIZE and TXPBSIZE */
2437 if (hw->mac.type == e1000_i210) {
2438 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
2439 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
2440 }
2441
2440 setup_timer(&adapter->watchdog_timer, igb_watchdog, 2442 setup_timer(&adapter->watchdog_timer, igb_watchdog,
2441 (unsigned long) adapter); 2443 (unsigned long) adapter);
2442 setup_timer(&adapter->phy_info_timer, igb_update_phy_info, 2444 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
@@ -2529,7 +2531,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2529 } 2531 }
2530 2532
2531 /* let the f/w know that the h/w is now under the control of the 2533 /* let the f/w know that the h/w is now under the control of the
2532 * driver. */ 2534 * driver.
2535 */
2533 igb_get_hw_control(adapter); 2536 igb_get_hw_control(adapter);
2534 2537
2535 strcpy(netdev->name, "eth%d"); 2538 strcpy(netdev->name, "eth%d");
@@ -3077,6 +3080,7 @@ static int __igb_open(struct net_device *netdev, bool resuming)
3077 /* notify VFs that reset has been completed */ 3080 /* notify VFs that reset has been completed */
3078 if (adapter->vfs_allocated_count) { 3081 if (adapter->vfs_allocated_count) {
3079 u32 reg_data = rd32(E1000_CTRL_EXT); 3082 u32 reg_data = rd32(E1000_CTRL_EXT);
3083
3080 reg_data |= E1000_CTRL_EXT_PFRSTD; 3084 reg_data |= E1000_CTRL_EXT_PFRSTD;
3081 wr32(E1000_CTRL_EXT, reg_data); 3085 wr32(E1000_CTRL_EXT, reg_data);
3082 } 3086 }
@@ -3248,7 +3252,7 @@ void igb_setup_tctl(struct igb_adapter *adapter)
3248 * Configure a transmit ring after a reset. 3252 * Configure a transmit ring after a reset.
3249 **/ 3253 **/
3250void igb_configure_tx_ring(struct igb_adapter *adapter, 3254void igb_configure_tx_ring(struct igb_adapter *adapter,
3251 struct igb_ring *ring) 3255 struct igb_ring *ring)
3252{ 3256{
3253 struct e1000_hw *hw = &adapter->hw; 3257 struct e1000_hw *hw = &adapter->hw;
3254 u32 txdctl = 0; 3258 u32 txdctl = 0;
@@ -3389,7 +3393,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3389 3393
3390 if (adapter->rss_indir_tbl_init != num_rx_queues) { 3394 if (adapter->rss_indir_tbl_init != num_rx_queues) {
3391 for (j = 0; j < IGB_RETA_SIZE; j++) 3395 for (j = 0; j < IGB_RETA_SIZE; j++)
3392 adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE; 3396 adapter->rss_indir_tbl[j] =
3397 (j * num_rx_queues) / IGB_RETA_SIZE;
3393 adapter->rss_indir_tbl_init = num_rx_queues; 3398 adapter->rss_indir_tbl_init = num_rx_queues;
3394 } 3399 }
3395 igb_write_rss_indir_tbl(adapter); 3400 igb_write_rss_indir_tbl(adapter);
@@ -3430,6 +3435,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3430 if (hw->mac.type > e1000_82575) { 3435 if (hw->mac.type > e1000_82575) {
3431 /* Set the default pool for the PF's first queue */ 3436 /* Set the default pool for the PF's first queue */
3432 u32 vtctl = rd32(E1000_VT_CTL); 3437 u32 vtctl = rd32(E1000_VT_CTL);
3438
3433 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | 3439 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
3434 E1000_VT_CTL_DISABLE_DEF_POOL); 3440 E1000_VT_CTL_DISABLE_DEF_POOL);
3435 vtctl |= adapter->vfs_allocated_count << 3441 vtctl |= adapter->vfs_allocated_count <<
@@ -3511,7 +3517,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
3511} 3517}
3512 3518
3513static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, 3519static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3514 int vfn) 3520 int vfn)
3515{ 3521{
3516 struct e1000_hw *hw = &adapter->hw; 3522 struct e1000_hw *hw = &adapter->hw;
3517 u32 vmolr; 3523 u32 vmolr;
@@ -4058,7 +4064,8 @@ static void igb_check_wvbr(struct igb_adapter *adapter)
4058 switch (hw->mac.type) { 4064 switch (hw->mac.type) {
4059 case e1000_82576: 4065 case e1000_82576:
4060 case e1000_i350: 4066 case e1000_i350:
4061 if (!(wvbr = rd32(E1000_WVBR))) 4067 wvbr = rd32(E1000_WVBR);
4068 if (!wvbr)
4062 return; 4069 return;
4063 break; 4070 break;
4064 default: 4071 default:
@@ -4077,7 +4084,7 @@ static void igb_spoof_check(struct igb_adapter *adapter)
4077 if (!adapter->wvbr) 4084 if (!adapter->wvbr)
4078 return; 4085 return;
4079 4086
4080 for(j = 0; j < adapter->vfs_allocated_count; j++) { 4087 for (j = 0; j < adapter->vfs_allocated_count; j++) {
4081 if (adapter->wvbr & (1 << j) || 4088 if (adapter->wvbr & (1 << j) ||
4082 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { 4089 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
4083 dev_warn(&adapter->pdev->dev, 4090 dev_warn(&adapter->pdev->dev,
@@ -4209,14 +4216,15 @@ static void igb_watchdog_task(struct work_struct *work)
4209 4216
4210 if (!netif_carrier_ok(netdev)) { 4217 if (!netif_carrier_ok(netdev)) {
4211 u32 ctrl; 4218 u32 ctrl;
4219
4212 hw->mac.ops.get_speed_and_duplex(hw, 4220 hw->mac.ops.get_speed_and_duplex(hw,
4213 &adapter->link_speed, 4221 &adapter->link_speed,
4214 &adapter->link_duplex); 4222 &adapter->link_duplex);
4215 4223
4216 ctrl = rd32(E1000_CTRL); 4224 ctrl = rd32(E1000_CTRL);
4217 /* Links status message must follow this format */ 4225 /* Links status message must follow this format */
4218 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s " 4226 netdev_info(netdev,
4219 "Duplex, Flow Control: %s\n", 4227 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4220 netdev->name, 4228 netdev->name,
4221 adapter->link_speed, 4229 adapter->link_speed,
4222 adapter->link_duplex == FULL_DUPLEX ? 4230 adapter->link_duplex == FULL_DUPLEX ?
@@ -4242,11 +4250,8 @@ static void igb_watchdog_task(struct work_struct *work)
4242 4250
4243 /* check for thermal sensor event */ 4251 /* check for thermal sensor event */
4244 if (igb_thermal_sensor_event(hw, 4252 if (igb_thermal_sensor_event(hw,
4245 E1000_THSTAT_LINK_THROTTLE)) { 4253 E1000_THSTAT_LINK_THROTTLE))
4246 netdev_info(netdev, "The network adapter link " 4254 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
4247 "speed was downshifted because it "
4248 "overheated\n");
4249 }
4250 4255
4251 /* adjust timeout factor according to speed/duplex */ 4256 /* adjust timeout factor according to speed/duplex */
4252 adapter->tx_timeout_factor = 1; 4257 adapter->tx_timeout_factor = 1;
@@ -4277,12 +4282,11 @@ static void igb_watchdog_task(struct work_struct *work)
4277 /* check for thermal sensor event */ 4282 /* check for thermal sensor event */
4278 if (igb_thermal_sensor_event(hw, 4283 if (igb_thermal_sensor_event(hw,
4279 E1000_THSTAT_PWR_DOWN)) { 4284 E1000_THSTAT_PWR_DOWN)) {
4280 netdev_err(netdev, "The network adapter was " 4285 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
4281 "stopped because it overheated\n");
4282 } 4286 }
4283 4287
4284 /* Links status message must follow this format */ 4288 /* Links status message must follow this format */
4285 printk(KERN_INFO "igb: %s NIC Link is Down\n", 4289 netdev_info(netdev, "igb: %s NIC Link is Down\n",
4286 netdev->name); 4290 netdev->name);
4287 netif_carrier_off(netdev); 4291 netif_carrier_off(netdev);
4288 4292
@@ -4344,6 +4348,7 @@ static void igb_watchdog_task(struct work_struct *work)
4344 /* Cause software interrupt to ensure Rx ring is cleaned */ 4348 /* Cause software interrupt to ensure Rx ring is cleaned */
4345 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 4349 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
4346 u32 eics = 0; 4350 u32 eics = 0;
4351
4347 for (i = 0; i < adapter->num_q_vectors; i++) 4352 for (i = 0; i < adapter->num_q_vectors; i++)
4348 eics |= adapter->q_vector[i]->eims_value; 4353 eics |= adapter->q_vector[i]->eims_value;
4349 wr32(E1000_EICS, eics); 4354 wr32(E1000_EICS, eics);
@@ -4483,13 +4488,12 @@ static void igb_update_itr(struct igb_q_vector *q_vector,
4483 case low_latency: /* 50 usec aka 20000 ints/s */ 4488 case low_latency: /* 50 usec aka 20000 ints/s */
4484 if (bytes > 10000) { 4489 if (bytes > 10000) {
4485 /* this if handles the TSO accounting */ 4490 /* this if handles the TSO accounting */
4486 if (bytes/packets > 8000) { 4491 if (bytes/packets > 8000)
4487 itrval = bulk_latency; 4492 itrval = bulk_latency;
4488 } else if ((packets < 10) || ((bytes/packets) > 1200)) { 4493 else if ((packets < 10) || ((bytes/packets) > 1200))
4489 itrval = bulk_latency; 4494 itrval = bulk_latency;
4490 } else if ((packets > 35)) { 4495 else if ((packets > 35))
4491 itrval = lowest_latency; 4496 itrval = lowest_latency;
4492 }
4493 } else if (bytes/packets > 2000) { 4497 } else if (bytes/packets > 2000) {
4494 itrval = bulk_latency; 4498 itrval = bulk_latency;
4495 } else if (packets <= 2 && bytes < 512) { 4499 } else if (packets <= 2 && bytes < 512) {
@@ -4675,6 +4679,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4675 return; 4679 return;
4676 } else { 4680 } else {
4677 u8 l4_hdr = 0; 4681 u8 l4_hdr = 0;
4682
4678 switch (first->protocol) { 4683 switch (first->protocol) {
4679 case htons(ETH_P_IP): 4684 case htons(ETH_P_IP):
4680 vlan_macip_lens |= skb_network_header_len(skb); 4685 vlan_macip_lens |= skb_network_header_len(skb);
@@ -4962,6 +4967,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4962 */ 4967 */
4963 if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) { 4968 if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
4964 unsigned short f; 4969 unsigned short f;
4970
4965 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 4971 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
4966 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 4972 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4967 } else { 4973 } else {
@@ -5140,7 +5146,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
5140 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; 5146 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
5141 5147
5142 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 5148 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
5143 msleep(1); 5149 usleep_range(1000, 2000);
5144 5150
5145 /* igb_down has a dependency on max_frame_size */ 5151 /* igb_down has a dependency on max_frame_size */
5146 adapter->max_frame_size = max_frame; 5152 adapter->max_frame_size = max_frame;
@@ -5621,6 +5627,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5621 vmolr |= E1000_VMOLR_MPME; 5627 vmolr |= E1000_VMOLR_MPME;
5622 } else if (vf_data->num_vf_mc_hashes) { 5628 } else if (vf_data->num_vf_mc_hashes) {
5623 int j; 5629 int j;
5630
5624 vmolr |= E1000_VMOLR_ROMPE; 5631 vmolr |= E1000_VMOLR_ROMPE;
5625 for (j = 0; j < vf_data->num_vf_mc_hashes; j++) 5632 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5626 igb_mta_set(hw, vf_data->vf_mc_hashes[j]); 5633 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
@@ -5672,6 +5679,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5672 5679
5673 for (i = 0; i < adapter->vfs_allocated_count; i++) { 5680 for (i = 0; i < adapter->vfs_allocated_count; i++) {
5674 u32 vmolr = rd32(E1000_VMOLR(i)); 5681 u32 vmolr = rd32(E1000_VMOLR(i));
5682
5675 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); 5683 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5676 5684
5677 vf_data = &adapter->vf_data[i]; 5685 vf_data = &adapter->vf_data[i];
@@ -5770,6 +5778,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5770 5778
5771 if (!adapter->vf_data[vf].vlans_enabled) { 5779 if (!adapter->vf_data[vf].vlans_enabled) {
5772 u32 size; 5780 u32 size;
5781
5773 reg = rd32(E1000_VMOLR(vf)); 5782 reg = rd32(E1000_VMOLR(vf));
5774 size = reg & E1000_VMOLR_RLPML_MASK; 5783 size = reg & E1000_VMOLR_RLPML_MASK;
5775 size += 4; 5784 size += 4;
@@ -5798,6 +5807,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5798 adapter->vf_data[vf].vlans_enabled--; 5807 adapter->vf_data[vf].vlans_enabled--;
5799 if (!adapter->vf_data[vf].vlans_enabled) { 5808 if (!adapter->vf_data[vf].vlans_enabled) {
5800 u32 size; 5809 u32 size;
5810
5801 reg = rd32(E1000_VMOLR(vf)); 5811 reg = rd32(E1000_VMOLR(vf));
5802 size = reg & E1000_VMOLR_RLPML_MASK; 5812 size = reg & E1000_VMOLR_RLPML_MASK;
5803 size -= 4; 5813 size -= 4;
@@ -5902,8 +5912,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5902 */ 5912 */
5903 if (!add && (adapter->netdev->flags & IFF_PROMISC)) { 5913 if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
5904 u32 vlvf, bits; 5914 u32 vlvf, bits;
5905
5906 int regndx = igb_find_vlvf_entry(adapter, vid); 5915 int regndx = igb_find_vlvf_entry(adapter, vid);
5916
5907 if (regndx < 0) 5917 if (regndx < 0)
5908 goto out; 5918 goto out;
5909 /* See if any other pools are set for this VLAN filter 5919 /* See if any other pools are set for this VLAN filter
@@ -6494,7 +6504,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6494 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 6504 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
6495 6505
6496 /* transfer page from old buffer to new buffer */ 6506 /* transfer page from old buffer to new buffer */
6497 memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer)); 6507 *new_buff = *old_buff;
6498 6508
6499 /* sync the buffer for use by the device */ 6509 /* sync the buffer for use by the device */
6500 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, 6510 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -6963,6 +6973,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
6963 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 6973 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
6964 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { 6974 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
6965 u16 vid; 6975 u16 vid;
6976
6966 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && 6977 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
6967 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) 6978 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
6968 vid = be16_to_cpu(rx_desc->wb.upper.vlan); 6979 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
@@ -7051,7 +7062,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
7051 if (cleaned_count) 7062 if (cleaned_count)
7052 igb_alloc_rx_buffers(rx_ring, cleaned_count); 7063 igb_alloc_rx_buffers(rx_ring, cleaned_count);
7053 7064
7054 return (total_packets < budget); 7065 return total_packets < budget;
7055} 7066}
7056 7067
7057static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, 7068static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
@@ -7172,7 +7183,7 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7172 break; 7183 break;
7173 case SIOCGMIIREG: 7184 case SIOCGMIIREG:
7174 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, 7185 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
7175 &data->val_out)) 7186 &data->val_out))
7176 return -EIO; 7187 return -EIO;
7177 break; 7188 break;
7178 case SIOCSMIIREG: 7189 case SIOCSMIIREG:
@@ -7873,7 +7884,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7873 } 7884 }
7874} 7885}
7875 7886
7876static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 7887static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
7888 int min_tx_rate, int max_tx_rate)
7877{ 7889{
7878 struct igb_adapter *adapter = netdev_priv(netdev); 7890 struct igb_adapter *adapter = netdev_priv(netdev);
7879 struct e1000_hw *hw = &adapter->hw; 7891 struct e1000_hw *hw = &adapter->hw;
@@ -7882,15 +7894,19 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
7882 if (hw->mac.type != e1000_82576) 7894 if (hw->mac.type != e1000_82576)
7883 return -EOPNOTSUPP; 7895 return -EOPNOTSUPP;
7884 7896
7897 if (min_tx_rate)
7898 return -EINVAL;
7899
7885 actual_link_speed = igb_link_mbps(adapter->link_speed); 7900 actual_link_speed = igb_link_mbps(adapter->link_speed);
7886 if ((vf >= adapter->vfs_allocated_count) || 7901 if ((vf >= adapter->vfs_allocated_count) ||
7887 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || 7902 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
7888 (tx_rate < 0) || (tx_rate > actual_link_speed)) 7903 (max_tx_rate < 0) ||
7904 (max_tx_rate > actual_link_speed))
7889 return -EINVAL; 7905 return -EINVAL;
7890 7906
7891 adapter->vf_rate_link_speed = actual_link_speed; 7907 adapter->vf_rate_link_speed = actual_link_speed;
7892 adapter->vf_data[vf].tx_rate = (u16)tx_rate; 7908 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
7893 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); 7909 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
7894 7910
7895 return 0; 7911 return 0;
7896} 7912}
@@ -7919,7 +7935,7 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
7919 wr32(reg_offset, reg_val); 7935 wr32(reg_offset, reg_val);
7920 7936
7921 adapter->vf_data[vf].spoofchk_enabled = setting; 7937 adapter->vf_data[vf].spoofchk_enabled = setting;
7922 return E1000_SUCCESS; 7938 return 0;
7923} 7939}
7924 7940
7925static int igb_ndo_get_vf_config(struct net_device *netdev, 7941static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -7930,7 +7946,8 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
7930 return -EINVAL; 7946 return -EINVAL;
7931 ivi->vf = vf; 7947 ivi->vf = vf;
7932 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); 7948 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
7933 ivi->tx_rate = adapter->vf_data[vf].tx_rate; 7949 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
7950 ivi->min_tx_rate = 0;
7934 ivi->vlan = adapter->vf_data[vf].pf_vlan; 7951 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7935 ivi->qos = adapter->vf_data[vf].pf_qos; 7952 ivi->qos = adapter->vf_data[vf].pf_qos;
7936 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; 7953 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
@@ -7955,11 +7972,13 @@ static void igb_vmm_control(struct igb_adapter *adapter)
7955 reg = rd32(E1000_DTXCTL); 7972 reg = rd32(E1000_DTXCTL);
7956 reg |= E1000_DTXCTL_VLAN_ADDED; 7973 reg |= E1000_DTXCTL_VLAN_ADDED;
7957 wr32(E1000_DTXCTL, reg); 7974 wr32(E1000_DTXCTL, reg);
7975 /* Fall through */
7958 case e1000_82580: 7976 case e1000_82580:
7959 /* enable replication vlan tag stripping */ 7977 /* enable replication vlan tag stripping */
7960 reg = rd32(E1000_RPLOLR); 7978 reg = rd32(E1000_RPLOLR);
7961 reg |= E1000_RPLOLR_STRVLAN; 7979 reg |= E1000_RPLOLR_STRVLAN;
7962 wr32(E1000_RPLOLR, reg); 7980 wr32(E1000_RPLOLR, reg);
7981 /* Fall through */
7963 case e1000_i350: 7982 case e1000_i350:
7964 /* none of the above registers are supported by i350 */ 7983 /* none of the above registers are supported by i350 */
7965 break; 7984 break;
@@ -8049,6 +8068,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
8049 } /* endif adapter->dmac is not disabled */ 8068 } /* endif adapter->dmac is not disabled */
8050 } else if (hw->mac.type == e1000_82580) { 8069 } else if (hw->mac.type == e1000_82580) {
8051 u32 reg = rd32(E1000_PCIEMISC); 8070 u32 reg = rd32(E1000_PCIEMISC);
8071
8052 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); 8072 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
8053 wr32(E1000_DMACR, 0); 8073 wr32(E1000_DMACR, 0);
8054 } 8074 }
@@ -8077,8 +8097,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8077 8097
8078 swfw_mask = E1000_SWFW_PHY0_SM; 8098 swfw_mask = E1000_SWFW_PHY0_SM;
8079 8099
8080 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) 8100 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
8081 != E1000_SUCCESS)
8082 return E1000_ERR_SWFW_SYNC; 8101 return E1000_ERR_SWFW_SYNC;
8083 8102
8084 status = i2c_smbus_read_byte_data(this_client, byte_offset); 8103 status = i2c_smbus_read_byte_data(this_client, byte_offset);
@@ -8088,7 +8107,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8088 return E1000_ERR_I2C; 8107 return E1000_ERR_I2C;
8089 else { 8108 else {
8090 *data = status; 8109 *data = status;
8091 return E1000_SUCCESS; 8110 return 0;
8092 } 8111 }
8093} 8112}
8094 8113
@@ -8113,7 +8132,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8113 if (!this_client) 8132 if (!this_client)
8114 return E1000_ERR_I2C; 8133 return E1000_ERR_I2C;
8115 8134
8116 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) 8135 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
8117 return E1000_ERR_SWFW_SYNC; 8136 return E1000_ERR_SWFW_SYNC;
8118 status = i2c_smbus_write_byte_data(this_client, byte_offset, data); 8137 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
8119 hw->mac.ops.release_swfw_sync(hw, swfw_mask); 8138 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
@@ -8121,7 +8140,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8121 if (status) 8140 if (status)
8122 return E1000_ERR_I2C; 8141 return E1000_ERR_I2C;
8123 else 8142 else
8124 return E1000_SUCCESS; 8143 return 0;
8125 8144
8126} 8145}
8127 8146
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ab25e49365f7..794c139f0cc0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -360,8 +360,8 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
360 return 0; 360 return 0;
361} 361}
362 362
363static int igb_ptp_enable(struct ptp_clock_info *ptp, 363static int igb_ptp_feature_enable(struct ptp_clock_info *ptp,
364 struct ptp_clock_request *rq, int on) 364 struct ptp_clock_request *rq, int on)
365{ 365{
366 return -EOPNOTSUPP; 366 return -EOPNOTSUPP;
367} 367}
@@ -559,10 +559,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
559 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 559 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
560 -EFAULT : 0; 560 -EFAULT : 0;
561} 561}
562
562/** 563/**
563 * igb_ptp_set_ts_config - control hardware time stamping 564 * igb_ptp_set_timestamp_mode - setup hardware for timestamping
564 * @netdev: 565 * @adapter: networking device structure
565 * @ifreq: 566 * @config: hwtstamp configuration
566 * 567 *
567 * Outgoing time stamping can be enabled and disabled. Play nice and 568 * Outgoing time stamping can be enabled and disabled. Play nice and
568 * disable it when requested, although it shouldn't case any overhead 569 * disable it when requested, although it shouldn't case any overhead
@@ -575,12 +576,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
575 * type has to be specified. Matching the kind of event packet is 576 * type has to be specified. Matching the kind of event packet is
576 * not supported, with the exception of "all V2 events regardless of 577 * not supported, with the exception of "all V2 events regardless of
577 * level 2 or 4". 578 * level 2 or 4".
578 **/ 579 */
579int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) 580static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
581 struct hwtstamp_config *config)
580{ 582{
581 struct igb_adapter *adapter = netdev_priv(netdev);
582 struct e1000_hw *hw = &adapter->hw; 583 struct e1000_hw *hw = &adapter->hw;
583 struct hwtstamp_config *config = &adapter->tstamp_config;
584 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; 584 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
585 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; 585 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
586 u32 tsync_rx_cfg = 0; 586 u32 tsync_rx_cfg = 0;
@@ -588,9 +588,6 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
588 bool is_l2 = false; 588 bool is_l2 = false;
589 u32 regval; 589 u32 regval;
590 590
591 if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
592 return -EFAULT;
593
594 /* reserved for future extensions */ 591 /* reserved for future extensions */
595 if (config->flags) 592 if (config->flags)
596 return -EINVAL; 593 return -EINVAL;
@@ -725,7 +722,33 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
725 regval = rd32(E1000_RXSTMPL); 722 regval = rd32(E1000_RXSTMPL);
726 regval = rd32(E1000_RXSTMPH); 723 regval = rd32(E1000_RXSTMPH);
727 724
728 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 725 return 0;
726}
727
728/**
729 * igb_ptp_set_ts_config - set hardware time stamping config
730 * @netdev:
731 * @ifreq:
732 *
733 **/
734int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
735{
736 struct igb_adapter *adapter = netdev_priv(netdev);
737 struct hwtstamp_config config;
738 int err;
739
740 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
741 return -EFAULT;
742
743 err = igb_ptp_set_timestamp_mode(adapter, &config);
744 if (err)
745 return err;
746
747 /* save these settings for future reference */
748 memcpy(&adapter->tstamp_config, &config,
749 sizeof(adapter->tstamp_config));
750
751 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
729 -EFAULT : 0; 752 -EFAULT : 0;
730} 753}
731 754
@@ -745,7 +768,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
745 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; 768 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
746 adapter->ptp_caps.gettime = igb_ptp_gettime_82576; 769 adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
747 adapter->ptp_caps.settime = igb_ptp_settime_82576; 770 adapter->ptp_caps.settime = igb_ptp_settime_82576;
748 adapter->ptp_caps.enable = igb_ptp_enable; 771 adapter->ptp_caps.enable = igb_ptp_feature_enable;
749 adapter->cc.read = igb_ptp_read_82576; 772 adapter->cc.read = igb_ptp_read_82576;
750 adapter->cc.mask = CLOCKSOURCE_MASK(64); 773 adapter->cc.mask = CLOCKSOURCE_MASK(64);
751 adapter->cc.mult = 1; 774 adapter->cc.mult = 1;
@@ -765,7 +788,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
765 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; 788 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
766 adapter->ptp_caps.gettime = igb_ptp_gettime_82576; 789 adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
767 adapter->ptp_caps.settime = igb_ptp_settime_82576; 790 adapter->ptp_caps.settime = igb_ptp_settime_82576;
768 adapter->ptp_caps.enable = igb_ptp_enable; 791 adapter->ptp_caps.enable = igb_ptp_feature_enable;
769 adapter->cc.read = igb_ptp_read_82580; 792 adapter->cc.read = igb_ptp_read_82580;
770 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); 793 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
771 adapter->cc.mult = 1; 794 adapter->cc.mult = 1;
@@ -784,7 +807,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
784 adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; 807 adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
785 adapter->ptp_caps.gettime = igb_ptp_gettime_i210; 808 adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
786 adapter->ptp_caps.settime = igb_ptp_settime_i210; 809 adapter->ptp_caps.settime = igb_ptp_settime_i210;
787 adapter->ptp_caps.enable = igb_ptp_enable; 810 adapter->ptp_caps.enable = igb_ptp_feature_enable;
788 /* Enable the timer functions by clearing bit 31. */ 811 /* Enable the timer functions by clearing bit 31. */
789 wr32(E1000_TSAUXC, 0x0); 812 wr32(E1000_TSAUXC, 0x0);
790 break; 813 break;
@@ -820,6 +843,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
820 wr32(E1000_IMS, E1000_IMS_TS); 843 wr32(E1000_IMS, E1000_IMS_TS);
821 } 844 }
822 845
846 adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
847 adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
848
823 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, 849 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
824 &adapter->pdev->dev); 850 &adapter->pdev->dev);
825 if (IS_ERR(adapter->ptp_clock)) { 851 if (IS_ERR(adapter->ptp_clock)) {
@@ -884,7 +910,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
884 return; 910 return;
885 911
886 /* reset the tstamp_config */ 912 /* reset the tstamp_config */
887 memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config)); 913 igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
888 914
889 switch (adapter->hw.mac.type) { 915 switch (adapter->hw.mac.type) {
890 case e1000_82576: 916 case e1000_82576:
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 90eef07943f4..2178f87e9f61 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -101,8 +101,8 @@ static int igbvf_get_settings(struct net_device *netdev,
101 else 101 else
102 ecmd->duplex = DUPLEX_HALF; 102 ecmd->duplex = DUPLEX_HALF;
103 } else { 103 } else {
104 ethtool_cmd_speed_set(ecmd, -1); 104 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
105 ecmd->duplex = -1; 105 ecmd->duplex = DUPLEX_UNKNOWN;
106 } 106 }
107 107
108 ecmd->autoneg = AUTONEG_DISABLE; 108 ecmd->autoneg = AUTONEG_DISABLE;
@@ -119,7 +119,6 @@ static int igbvf_set_settings(struct net_device *netdev,
119static void igbvf_get_pauseparam(struct net_device *netdev, 119static void igbvf_get_pauseparam(struct net_device *netdev,
120 struct ethtool_pauseparam *pause) 120 struct ethtool_pauseparam *pause)
121{ 121{
122 return;
123} 122}
124 123
125static int igbvf_set_pauseparam(struct net_device *netdev, 124static int igbvf_set_pauseparam(struct net_device *netdev,
@@ -476,5 +475,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
476 475
477void igbvf_set_ethtool_ops(struct net_device *netdev) 476void igbvf_set_ethtool_ops(struct net_device *netdev)
478{ 477{
479 SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops); 478 netdev->ethtool_ops = &igbvf_ethtool_ops;
480} 479}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index dbb7dd2f8e36..b311e9e710d2 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -107,8 +107,8 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
107 ethtool_cmd_speed_set(ecmd, SPEED_10000); 107 ethtool_cmd_speed_set(ecmd, SPEED_10000);
108 ecmd->duplex = DUPLEX_FULL; 108 ecmd->duplex = DUPLEX_FULL;
109 } else { 109 } else {
110 ethtool_cmd_speed_set(ecmd, -1); 110 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
111 ecmd->duplex = -1; 111 ecmd->duplex = DUPLEX_UNKNOWN;
112 } 112 }
113 113
114 ecmd->autoneg = AUTONEG_DISABLE; 114 ecmd->autoneg = AUTONEG_DISABLE;
@@ -656,5 +656,5 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
656 656
657void ixgb_set_ethtool_ops(struct net_device *netdev) 657void ixgb_set_ethtool_ops(struct net_device *netdev)
658{ 658{
659 SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops); 659 netdev->ethtool_ops = &ixgb_ethtool_ops;
660} 660}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index c6c4ca7d68e6..ac9f2148cdc5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -155,7 +155,6 @@ struct vf_data_storage {
155struct vf_macvlans { 155struct vf_macvlans {
156 struct list_head l; 156 struct list_head l;
157 int vf; 157 int vf;
158 int rar_entry;
159 bool free; 158 bool free;
160 bool is_macvlan; 159 bool is_macvlan;
161 u8 vf_macvlan[ETH_ALEN]; 160 u8 vf_macvlan[ETH_ALEN];
@@ -363,7 +362,7 @@ struct ixgbe_ring_container {
363 for (pos = (head).ring; pos != NULL; pos = pos->next) 362 for (pos = (head).ring; pos != NULL; pos = pos->next)
364 363
365#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ 364#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
366 ? 8 : 1) 365 ? 8 : 1)
367#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS 366#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
368 367
369/* MAX_Q_VECTORS of these are allocated, 368/* MAX_Q_VECTORS of these are allocated,
@@ -613,6 +612,15 @@ static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
613#define MAX_MSIX_VECTORS_82598 18 612#define MAX_MSIX_VECTORS_82598 18
614#define MAX_Q_VECTORS_82598 16 613#define MAX_Q_VECTORS_82598 16
615 614
615struct ixgbe_mac_addr {
616 u8 addr[ETH_ALEN];
617 u16 queue;
618 u16 state; /* bitmask */
619};
620#define IXGBE_MAC_STATE_DEFAULT 0x1
621#define IXGBE_MAC_STATE_MODIFIED 0x2
622#define IXGBE_MAC_STATE_IN_USE 0x4
623
616#define MAX_Q_VECTORS MAX_Q_VECTORS_82599 624#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
617#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 625#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
618 626
@@ -785,6 +793,7 @@ struct ixgbe_adapter {
785 793
786 u32 timer_event_accumulator; 794 u32 timer_event_accumulator;
787 u32 vferr_refcount; 795 u32 vferr_refcount;
796 struct ixgbe_mac_addr *mac_table;
788 struct kobject *info_kobj; 797 struct kobject *info_kobj;
789#ifdef CONFIG_IXGBE_HWMON 798#ifdef CONFIG_IXGBE_HWMON
790 struct hwmon_buff *ixgbe_hwmon_buff; 799 struct hwmon_buff *ixgbe_hwmon_buff;
@@ -863,6 +872,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
863int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 872int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
864int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, 873int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
865 u16 subdevice_id); 874 u16 subdevice_id);
875#ifdef CONFIG_PCI_IOV
876void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
877#endif
878int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
879 u8 *addr, u16 queue);
880int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
881 u8 *addr, u16 queue);
866void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 882void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
867netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, 883netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
868 struct ixgbe_ring *); 884 struct ixgbe_ring *);
@@ -941,6 +957,7 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
941} 957}
942 958
943void ixgbe_ptp_init(struct ixgbe_adapter *adapter); 959void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
960void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
944void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); 961void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
945void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); 962void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
946void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); 963void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4c78ea8946c1..15609331ec17 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -41,10 +41,10 @@
41#define IXGBE_82598_RX_PB_SIZE 512 41#define IXGBE_82598_RX_PB_SIZE 512
42 42
43static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 43static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
44 ixgbe_link_speed speed, 44 ixgbe_link_speed speed,
45 bool autoneg_wait_to_complete); 45 bool autoneg_wait_to_complete);
46static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 46static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
47 u8 *eeprom_data); 47 u8 *eeprom_data);
48 48
49/** 49/**
50 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout 50 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
@@ -140,7 +140,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
140 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 140 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
141 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 141 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
142 phy->ops.get_firmware_version = 142 phy->ops.get_firmware_version =
143 &ixgbe_get_phy_firmware_version_tnx; 143 &ixgbe_get_phy_firmware_version_tnx;
144 break; 144 break;
145 case ixgbe_phy_nl: 145 case ixgbe_phy_nl:
146 phy->ops.reset = &ixgbe_reset_phy_nl; 146 phy->ops.reset = &ixgbe_reset_phy_nl;
@@ -156,8 +156,8 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
156 156
157 /* Check to see if SFP+ module is supported */ 157 /* Check to see if SFP+ module is supported */
158 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, 158 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
159 &list_offset, 159 &list_offset,
160 &data_offset); 160 &data_offset);
161 if (ret_val != 0) { 161 if (ret_val != 0) {
162 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 162 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
163 goto out; 163 goto out;
@@ -219,8 +219,8 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
219 * Determines the link capabilities by reading the AUTOC register. 219 * Determines the link capabilities by reading the AUTOC register.
220 **/ 220 **/
221static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 221static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
222 ixgbe_link_speed *speed, 222 ixgbe_link_speed *speed,
223 bool *autoneg) 223 bool *autoneg)
224{ 224{
225 s32 status = 0; 225 s32 status = 0;
226 u32 autoc = 0; 226 u32 autoc = 0;
@@ -337,19 +337,25 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
337 int i; 337 int i;
338 bool link_up; 338 bool link_up;
339 339
340 /* 340 /* Validate the water mark configuration */
341 * Validate the water mark configuration for packet buffer 0. Zero 341 if (!hw->fc.pause_time) {
342 * water marks indicate that the packet buffer was not configured
343 * and the watermarks for packet buffer 0 should always be configured.
344 */
345 if (!hw->fc.low_water ||
346 !hw->fc.high_water[0] ||
347 !hw->fc.pause_time) {
348 hw_dbg(hw, "Invalid water mark configuration\n");
349 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 342 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
350 goto out; 343 goto out;
351 } 344 }
352 345
346 /* Low water mark of zero causes XOFF floods */
347 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
348 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
349 hw->fc.high_water[i]) {
350 if (!hw->fc.low_water[i] ||
351 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
352 hw_dbg(hw, "Invalid water mark configuration\n");
353 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
354 goto out;
355 }
356 }
357 }
358
353 /* 359 /*
354 * On 82598 having Rx FC on causes resets while doing 1G 360 * On 82598 having Rx FC on causes resets while doing 1G
355 * so if it's on turn it off once we know link_speed. For 361 * so if it's on turn it off once we know link_speed. For
@@ -432,12 +438,11 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
432 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 438 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
433 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 439 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
434 440
435 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
436
437 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 441 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
438 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 442 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
439 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 443 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
440 hw->fc.high_water[i]) { 444 hw->fc.high_water[i]) {
445 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
441 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 446 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
442 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); 447 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
443 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); 448 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
@@ -468,7 +473,7 @@ out:
468 * Restarts the link. Performs autonegotiation if needed. 473 * Restarts the link. Performs autonegotiation if needed.
469 **/ 474 **/
470static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 475static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
471 bool autoneg_wait_to_complete) 476 bool autoneg_wait_to_complete)
472{ 477{
473 u32 autoc_reg; 478 u32 autoc_reg;
474 u32 links_reg; 479 u32 links_reg;
@@ -550,8 +555,8 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
550 * Reads the links register to determine if link is up and the current speed 555 * Reads the links register to determine if link is up and the current speed
551 **/ 556 **/
552static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 557static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
553 ixgbe_link_speed *speed, bool *link_up, 558 ixgbe_link_speed *speed, bool *link_up,
554 bool link_up_wait_to_complete) 559 bool link_up_wait_to_complete)
555{ 560{
556 u32 links_reg; 561 u32 links_reg;
557 u32 i; 562 u32 i;
@@ -567,7 +572,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
567 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); 572 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
568 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); 573 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
569 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, 574 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
570 &adapt_comp_reg); 575 &adapt_comp_reg);
571 if (link_up_wait_to_complete) { 576 if (link_up_wait_to_complete) {
572 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 577 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
573 if ((link_reg & 1) && 578 if ((link_reg & 1) &&
@@ -579,11 +584,11 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
579 } 584 }
580 msleep(100); 585 msleep(100);
581 hw->phy.ops.read_reg(hw, 0xC79F, 586 hw->phy.ops.read_reg(hw, 0xC79F,
582 MDIO_MMD_PMAPMD, 587 MDIO_MMD_PMAPMD,
583 &link_reg); 588 &link_reg);
584 hw->phy.ops.read_reg(hw, 0xC00C, 589 hw->phy.ops.read_reg(hw, 0xC00C,
585 MDIO_MMD_PMAPMD, 590 MDIO_MMD_PMAPMD,
586 &adapt_comp_reg); 591 &adapt_comp_reg);
587 } 592 }
588 } else { 593 } else {
589 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) 594 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
@@ -656,7 +661,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
656 661
657 /* Set KX4/KX support according to speed requested */ 662 /* Set KX4/KX support according to speed requested */
658 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || 663 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
659 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 664 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
660 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; 665 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
661 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 666 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
662 autoc |= IXGBE_AUTOC_KX4_SUPP; 667 autoc |= IXGBE_AUTOC_KX4_SUPP;
@@ -689,14 +694,14 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
689 * Sets the link speed in the AUTOC register in the MAC and restarts link. 694 * Sets the link speed in the AUTOC register in the MAC and restarts link.
690 **/ 695 **/
691static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 696static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
692 ixgbe_link_speed speed, 697 ixgbe_link_speed speed,
693 bool autoneg_wait_to_complete) 698 bool autoneg_wait_to_complete)
694{ 699{
695 s32 status; 700 s32 status;
696 701
697 /* Setup the PHY according to input speed */ 702 /* Setup the PHY according to input speed */
698 status = hw->phy.ops.setup_link_speed(hw, speed, 703 status = hw->phy.ops.setup_link_speed(hw, speed,
699 autoneg_wait_to_complete); 704 autoneg_wait_to_complete);
700 /* Set up MAC */ 705 /* Set up MAC */
701 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 706 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
702 707
@@ -735,28 +740,28 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
735 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 740 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
736 /* Enable Tx Atlas so packets can be transmitted again */ 741 /* Enable Tx Atlas so packets can be transmitted again */
737 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 742 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
738 &analog_val); 743 &analog_val);
739 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 744 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
740 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 745 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
741 analog_val); 746 analog_val);
742 747
743 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 748 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
744 &analog_val); 749 &analog_val);
745 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 750 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
746 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 751 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
747 analog_val); 752 analog_val);
748 753
749 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 754 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
750 &analog_val); 755 &analog_val);
751 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 756 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
752 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 757 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
753 analog_val); 758 analog_val);
754 759
755 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 760 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
756 &analog_val); 761 &analog_val);
757 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 762 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
758 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 763 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
759 analog_val); 764 analog_val);
760 } 765 }
761 766
762 /* Reset PHY */ 767 /* Reset PHY */
@@ -955,7 +960,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
955 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) 960 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
956 for (offset = 0; offset < hw->mac.vft_size; offset++) 961 for (offset = 0; offset < hw->mac.vft_size; offset++)
957 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 962 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
958 0); 963 0);
959 964
960 return 0; 965 return 0;
961} 966}
@@ -973,7 +978,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
973 u32 atlas_ctl; 978 u32 atlas_ctl;
974 979
975 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, 980 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
976 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); 981 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
977 IXGBE_WRITE_FLUSH(hw); 982 IXGBE_WRITE_FLUSH(hw);
978 udelay(10); 983 udelay(10);
979 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 984 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
@@ -1273,8 +1278,6 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1273 /* Setup Tx packet buffer sizes */ 1278 /* Setup Tx packet buffer sizes */
1274 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1279 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1275 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); 1280 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1276
1277 return;
1278} 1281}
1279 1282
1280static struct ixgbe_mac_operations mac_ops_82598 = { 1283static struct ixgbe_mac_operations mac_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index f32b3dd1ba8e..bc7c924240a5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -48,17 +48,17 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
48 ixgbe_link_speed speed, 48 ixgbe_link_speed speed,
49 bool autoneg_wait_to_complete); 49 bool autoneg_wait_to_complete);
50static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 50static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
51 ixgbe_link_speed speed, 51 ixgbe_link_speed speed,
52 bool autoneg_wait_to_complete); 52 bool autoneg_wait_to_complete);
53static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); 53static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
54static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 54static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
55 bool autoneg_wait_to_complete); 55 bool autoneg_wait_to_complete);
56static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 56static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
57 ixgbe_link_speed speed, 57 ixgbe_link_speed speed,
58 bool autoneg_wait_to_complete); 58 bool autoneg_wait_to_complete);
59static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 59static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
60 ixgbe_link_speed speed, 60 ixgbe_link_speed speed,
61 bool autoneg_wait_to_complete); 61 bool autoneg_wait_to_complete);
62static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 62static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
63static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 63static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
64 u8 dev_addr, u8 *data); 64 u8 dev_addr, u8 *data);
@@ -96,9 +96,9 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
96 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && 96 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
97 !ixgbe_mng_enabled(hw)) { 97 !ixgbe_mng_enabled(hw)) {
98 mac->ops.disable_tx_laser = 98 mac->ops.disable_tx_laser =
99 &ixgbe_disable_tx_laser_multispeed_fiber; 99 &ixgbe_disable_tx_laser_multispeed_fiber;
100 mac->ops.enable_tx_laser = 100 mac->ops.enable_tx_laser =
101 &ixgbe_enable_tx_laser_multispeed_fiber; 101 &ixgbe_enable_tx_laser_multispeed_fiber;
102 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 102 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
103 } else { 103 } else {
104 mac->ops.disable_tx_laser = NULL; 104 mac->ops.disable_tx_laser = NULL;
@@ -132,13 +132,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
132 hw->phy.ops.reset = NULL; 132 hw->phy.ops.reset = NULL;
133 133
134 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 134 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
135 &data_offset); 135 &data_offset);
136 if (ret_val != 0) 136 if (ret_val != 0)
137 goto setup_sfp_out; 137 goto setup_sfp_out;
138 138
139 /* PHY config will finish before releasing the semaphore */ 139 /* PHY config will finish before releasing the semaphore */
140 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 140 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
141 IXGBE_GSSR_MAC_CSR_SM); 141 IXGBE_GSSR_MAC_CSR_SM);
142 if (ret_val != 0) { 142 if (ret_val != 0) {
143 ret_val = IXGBE_ERR_SWFW_SYNC; 143 ret_val = IXGBE_ERR_SWFW_SYNC;
144 goto setup_sfp_out; 144 goto setup_sfp_out;
@@ -334,7 +334,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
334 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 334 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
335 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 335 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
336 phy->ops.get_firmware_version = 336 phy->ops.get_firmware_version =
337 &ixgbe_get_phy_firmware_version_tnx; 337 &ixgbe_get_phy_firmware_version_tnx;
338 break; 338 break;
339 default: 339 default:
340 break; 340 break;
@@ -352,7 +352,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
352 * Determines the link capabilities by reading the AUTOC register. 352 * Determines the link capabilities by reading the AUTOC register.
353 **/ 353 **/
354static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 354static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
355 ixgbe_link_speed *speed, 355 ixgbe_link_speed *speed,
356 bool *autoneg) 356 bool *autoneg)
357{ 357{
358 s32 status = 0; 358 s32 status = 0;
@@ -543,7 +543,7 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
543 * Restarts the link. Performs autonegotiation if needed. 543 * Restarts the link. Performs autonegotiation if needed.
544 **/ 544 **/
545static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 545static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
546 bool autoneg_wait_to_complete) 546 bool autoneg_wait_to_complete)
547{ 547{
548 u32 autoc_reg; 548 u32 autoc_reg;
549 u32 links_reg; 549 u32 links_reg;
@@ -672,8 +672,8 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
672 * Set the link speed in the AUTOC register and restarts link. 672 * Set the link speed in the AUTOC register and restarts link.
673 **/ 673 **/
674static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 674static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
675 ixgbe_link_speed speed, 675 ixgbe_link_speed speed,
676 bool autoneg_wait_to_complete) 676 bool autoneg_wait_to_complete)
677{ 677{
678 s32 status = 0; 678 s32 status = 0;
679 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 679 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -820,8 +820,8 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
820 */ 820 */
821 if (speedcnt > 1) 821 if (speedcnt > 1)
822 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 822 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
823 highest_link_speed, 823 highest_link_speed,
824 autoneg_wait_to_complete); 824 autoneg_wait_to_complete);
825 825
826out: 826out:
827 /* Set autoneg_advertised value based on input link speed */ 827 /* Set autoneg_advertised value based on input link speed */
@@ -1009,8 +1009,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1009 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 1009 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1010 autoc |= IXGBE_AUTOC_KX_SUPP; 1010 autoc |= IXGBE_AUTOC_KX_SUPP;
1011 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 1011 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
1012 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 1012 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
1013 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 1013 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
1014 /* Switch from 1G SFI to 10G SFI if requested */ 1014 /* Switch from 1G SFI to 10G SFI if requested */
1015 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 1015 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
1016 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 1016 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
@@ -1018,7 +1018,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1018 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 1018 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
1019 } 1019 }
1020 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 1020 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
1021 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 1021 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
1022 /* Switch from 10G SFI to 1G SFI if requested */ 1022 /* Switch from 10G SFI to 1G SFI if requested */
1023 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 1023 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1024 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 1024 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
@@ -1051,7 +1051,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1051 } 1051 }
1052 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 1052 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1053 status = 1053 status =
1054 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 1054 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1055 hw_dbg(hw, "Autoneg did not complete.\n"); 1055 hw_dbg(hw, "Autoneg did not complete.\n");
1056 } 1056 }
1057 } 1057 }
@@ -1074,14 +1074,14 @@ out:
1074 * Restarts link on PHY and MAC based on settings passed in. 1074 * Restarts link on PHY and MAC based on settings passed in.
1075 **/ 1075 **/
1076static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 1076static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1077 ixgbe_link_speed speed, 1077 ixgbe_link_speed speed,
1078 bool autoneg_wait_to_complete) 1078 bool autoneg_wait_to_complete)
1079{ 1079{
1080 s32 status; 1080 s32 status;
1081 1081
1082 /* Setup the PHY according to input speed */ 1082 /* Setup the PHY according to input speed */
1083 status = hw->phy.ops.setup_link_speed(hw, speed, 1083 status = hw->phy.ops.setup_link_speed(hw, speed,
1084 autoneg_wait_to_complete); 1084 autoneg_wait_to_complete);
1085 /* Set up MAC */ 1085 /* Set up MAC */
1086 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 1086 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1087 1087
@@ -1224,7 +1224,7 @@ mac_reset_top:
1224 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1224 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1225 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1225 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1226 autoc2 |= (hw->mac.orig_autoc2 & 1226 autoc2 |= (hw->mac.orig_autoc2 &
1227 IXGBE_AUTOC2_UPPER_MASK); 1227 IXGBE_AUTOC2_UPPER_MASK);
1228 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1228 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1229 } 1229 }
1230 } 1230 }
@@ -1246,7 +1246,7 @@ mac_reset_top:
1246 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1246 /* Add the SAN MAC address to the RAR only if it's a valid address */
1247 if (is_valid_ether_addr(hw->mac.san_addr)) { 1247 if (is_valid_ether_addr(hw->mac.san_addr)) {
1248 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 1248 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1249 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1249 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1250 1250
1251 /* Save the SAN MAC RAR index */ 1251 /* Save the SAN MAC RAR index */
1252 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; 1252 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -1257,7 +1257,7 @@ mac_reset_top:
1257 1257
1258 /* Store the alternative WWNN/WWPN prefix */ 1258 /* Store the alternative WWNN/WWPN prefix */
1259 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1259 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1260 &hw->mac.wwpn_prefix); 1260 &hw->mac.wwpn_prefix);
1261 1261
1262reset_hw_out: 1262reset_hw_out:
1263 return status; 1263 return status;
@@ -1271,6 +1271,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1271{ 1271{
1272 int i; 1272 int i;
1273 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1273 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1274
1274 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1275 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1275 1276
1276 /* 1277 /*
@@ -1284,8 +1285,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1284 udelay(10); 1285 udelay(10);
1285 } 1286 }
1286 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1287 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1287 hw_dbg(hw, "Flow Director previous command isn't complete, " 1288 hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n");
1288 "aborting table re-initialization.\n");
1289 return IXGBE_ERR_FDIR_REINIT_FAILED; 1289 return IXGBE_ERR_FDIR_REINIT_FAILED;
1290 } 1290 }
1291 1291
@@ -1299,12 +1299,12 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1299 * - write 0 to bit 8 of FDIRCMD register 1299 * - write 0 to bit 8 of FDIRCMD register
1300 */ 1300 */
1301 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1301 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1302 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1302 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1303 IXGBE_FDIRCMD_CLEARHT)); 1303 IXGBE_FDIRCMD_CLEARHT));
1304 IXGBE_WRITE_FLUSH(hw); 1304 IXGBE_WRITE_FLUSH(hw);
1305 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1305 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1306 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1306 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1307 ~IXGBE_FDIRCMD_CLEARHT)); 1307 ~IXGBE_FDIRCMD_CLEARHT));
1308 IXGBE_WRITE_FLUSH(hw); 1308 IXGBE_WRITE_FLUSH(hw);
1309 /* 1309 /*
1310 * Clear FDIR Hash register to clear any leftover hashes 1310 * Clear FDIR Hash register to clear any leftover hashes
@@ -1319,7 +1319,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1319 /* Poll init-done after we write FDIRCTRL register */ 1319 /* Poll init-done after we write FDIRCTRL register */
1320 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1320 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1321 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1321 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1322 IXGBE_FDIRCTRL_INIT_DONE) 1322 IXGBE_FDIRCTRL_INIT_DONE)
1323 break; 1323 break;
1324 usleep_range(1000, 2000); 1324 usleep_range(1000, 2000);
1325 } 1325 }
@@ -1368,7 +1368,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1368 IXGBE_WRITE_FLUSH(hw); 1368 IXGBE_WRITE_FLUSH(hw);
1369 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1369 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1370 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1370 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1371 IXGBE_FDIRCTRL_INIT_DONE) 1371 IXGBE_FDIRCTRL_INIT_DONE)
1372 break; 1372 break;
1373 usleep_range(1000, 2000); 1373 usleep_range(1000, 2000);
1374 } 1374 }
@@ -1453,7 +1453,7 @@ do { \
1453 bucket_hash ^= hi_hash_dword >> n; \ 1453 bucket_hash ^= hi_hash_dword >> n; \
1454 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ 1454 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1455 sig_hash ^= hi_hash_dword << (16 - n); \ 1455 sig_hash ^= hi_hash_dword << (16 - n); \
1456} while (0); 1456} while (0)
1457 1457
1458/** 1458/**
1459 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash 1459 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
@@ -1529,9 +1529,9 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1529 * @queue: queue index to direct traffic to 1529 * @queue: queue index to direct traffic to
1530 **/ 1530 **/
1531s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1531s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1532 union ixgbe_atr_hash_dword input, 1532 union ixgbe_atr_hash_dword input,
1533 union ixgbe_atr_hash_dword common, 1533 union ixgbe_atr_hash_dword common,
1534 u8 queue) 1534 u8 queue)
1535{ 1535{
1536 u64 fdirhashcmd; 1536 u64 fdirhashcmd;
1537 u32 fdircmd; 1537 u32 fdircmd;
@@ -1555,7 +1555,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1555 1555
1556 /* configure FDIRCMD register */ 1556 /* configure FDIRCMD register */
1557 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1557 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1558 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1558 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1559 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1559 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1560 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1560 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1561 1561
@@ -1579,7 +1579,7 @@ do { \
1579 bucket_hash ^= lo_hash_dword >> n; \ 1579 bucket_hash ^= lo_hash_dword >> n; \
1580 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1580 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1581 bucket_hash ^= hi_hash_dword >> n; \ 1581 bucket_hash ^= hi_hash_dword >> n; \
1582} while (0); 1582} while (0)
1583 1583
1584/** 1584/**
1585 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash 1585 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
@@ -1651,6 +1651,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1651static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) 1651static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1652{ 1652{
1653 u32 mask = ntohs(input_mask->formatted.dst_port); 1653 u32 mask = ntohs(input_mask->formatted.dst_port);
1654
1654 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1655 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1655 mask |= ntohs(input_mask->formatted.src_port); 1656 mask |= ntohs(input_mask->formatted.src_port);
1656 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1657 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
@@ -1885,7 +1886,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
1885 u32 core_ctl; 1886 u32 core_ctl;
1886 1887
1887 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 1888 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
1888 (reg << 8)); 1889 (reg << 8));
1889 IXGBE_WRITE_FLUSH(hw); 1890 IXGBE_WRITE_FLUSH(hw);
1890 udelay(10); 1891 udelay(10);
1891 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 1892 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 981b8a7b100d..4e5385a2a465 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -41,7 +41,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
41static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 41static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
42static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 42static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
43static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 43static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
44 u16 count); 44 u16 count);
45static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 45static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
46static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 46static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 47static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
@@ -271,6 +271,7 @@ out:
271 **/ 271 **/
272s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 272s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
273{ 273{
274 s32 ret_val;
274 u32 ctrl_ext; 275 u32 ctrl_ext;
275 276
276 /* Set the media type */ 277 /* Set the media type */
@@ -292,12 +293,15 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
292 IXGBE_WRITE_FLUSH(hw); 293 IXGBE_WRITE_FLUSH(hw);
293 294
294 /* Setup flow control */ 295 /* Setup flow control */
295 ixgbe_setup_fc(hw); 296 ret_val = ixgbe_setup_fc(hw);
297 if (!ret_val)
298 goto out;
296 299
297 /* Clear adapter stopped flag */ 300 /* Clear adapter stopped flag */
298 hw->adapter_stopped = false; 301 hw->adapter_stopped = false;
299 302
300 return 0; 303out:
304 return ret_val;
301} 305}
302 306
303/** 307/**
@@ -481,7 +485,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
481 * Reads the part number string from the EEPROM. 485 * Reads the part number string from the EEPROM.
482 **/ 486 **/
483s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 487s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
484 u32 pba_num_size) 488 u32 pba_num_size)
485{ 489{
486 s32 ret_val; 490 s32 ret_val;
487 u16 data; 491 u16 data;
@@ -814,9 +818,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
814 eeprom->address_bits = 16; 818 eeprom->address_bits = 16;
815 else 819 else
816 eeprom->address_bits = 8; 820 eeprom->address_bits = 8;
817 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: " 821 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n",
818 "%d\n", eeprom->type, eeprom->word_size, 822 eeprom->type, eeprom->word_size, eeprom->address_bits);
819 eeprom->address_bits);
820 } 823 }
821 824
822 return 0; 825 return 0;
@@ -1388,8 +1391,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1388 } 1391 }
1389 1392
1390 if (i == timeout) { 1393 if (i == timeout) {
1391 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore " 1394 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
1392 "not granted.\n");
1393 /* 1395 /*
1394 * this release is particularly important because our attempts 1396 * this release is particularly important because our attempts
1395 * above to get the semaphore may have succeeded, and if there 1397 * above to get the semaphore may have succeeded, and if there
@@ -1434,14 +1436,12 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1434 * was not granted because we don't have access to the EEPROM 1436 * was not granted because we don't have access to the EEPROM
1435 */ 1437 */
1436 if (i >= timeout) { 1438 if (i >= timeout) {
1437 hw_dbg(hw, "SWESMBI Software EEPROM semaphore " 1439 hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
1438 "not granted.\n");
1439 ixgbe_release_eeprom_semaphore(hw); 1440 ixgbe_release_eeprom_semaphore(hw);
1440 status = IXGBE_ERR_EEPROM; 1441 status = IXGBE_ERR_EEPROM;
1441 } 1442 }
1442 } else { 1443 } else {
1443 hw_dbg(hw, "Software semaphore SMBI between device drivers " 1444 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
1444 "not granted.\n");
1445 } 1445 }
1446 1446
1447 return status; 1447 return status;
@@ -1483,7 +1483,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1483 */ 1483 */
1484 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1484 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1485 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1485 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1486 IXGBE_EEPROM_OPCODE_BITS); 1486 IXGBE_EEPROM_OPCODE_BITS);
1487 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1487 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1488 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1488 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1489 break; 1489 break;
@@ -1532,7 +1532,7 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1532 * @count: number of bits to shift out 1532 * @count: number of bits to shift out
1533 **/ 1533 **/
1534static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1534static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1535 u16 count) 1535 u16 count)
1536{ 1536{
1537 u32 eec; 1537 u32 eec;
1538 u32 mask; 1538 u32 mask;
@@ -1736,7 +1736,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1736 * caller does not need checksum_val, the value can be NULL. 1736 * caller does not need checksum_val, the value can be NULL.
1737 **/ 1737 **/
1738s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 1738s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1739 u16 *checksum_val) 1739 u16 *checksum_val)
1740{ 1740{
1741 s32 status; 1741 s32 status;
1742 u16 checksum; 1742 u16 checksum;
@@ -1809,7 +1809,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1809 * Puts an ethernet address into a receive address register. 1809 * Puts an ethernet address into a receive address register.
1810 **/ 1810 **/
1811s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 1811s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1812 u32 enable_addr) 1812 u32 enable_addr)
1813{ 1813{
1814 u32 rar_low, rar_high; 1814 u32 rar_low, rar_high;
1815 u32 rar_entries = hw->mac.num_rar_entries; 1815 u32 rar_entries = hw->mac.num_rar_entries;
@@ -2053,7 +2053,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
2053 2053
2054 if (hw->addr_ctrl.mta_in_use > 0) 2054 if (hw->addr_ctrl.mta_in_use > 0)
2055 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2055 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2056 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2056 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2057 2057
2058 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); 2058 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
2059 return 0; 2059 return 0;
@@ -2071,7 +2071,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2071 2071
2072 if (a->mta_in_use > 0) 2072 if (a->mta_in_use > 0)
2073 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2073 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2074 hw->mac.mc_filter_type); 2074 hw->mac.mc_filter_type);
2075 2075
2076 return 0; 2076 return 0;
2077} 2077}
@@ -2106,19 +2106,25 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2106 u32 fcrtl, fcrth; 2106 u32 fcrtl, fcrth;
2107 int i; 2107 int i;
2108 2108
2109 /* 2109 /* Validate the water mark configuration. */
2110 * Validate the water mark configuration for packet buffer 0. Zero 2110 if (!hw->fc.pause_time) {
2111 * water marks indicate that the packet buffer was not configured
2112 * and the watermarks for packet buffer 0 should always be configured.
2113 */
2114 if (!hw->fc.low_water ||
2115 !hw->fc.high_water[0] ||
2116 !hw->fc.pause_time) {
2117 hw_dbg(hw, "Invalid water mark configuration\n");
2118 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2111 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2119 goto out; 2112 goto out;
2120 } 2113 }
2121 2114
2115 /* Low water mark of zero causes XOFF floods */
2116 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2117 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2118 hw->fc.high_water[i]) {
2119 if (!hw->fc.low_water[i] ||
2120 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2121 hw_dbg(hw, "Invalid water mark configuration\n");
2122 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2123 goto out;
2124 }
2125 }
2126 }
2127
2122 /* Negotiate the fc mode to use */ 2128 /* Negotiate the fc mode to use */
2123 ixgbe_fc_autoneg(hw); 2129 ixgbe_fc_autoneg(hw);
2124 2130
@@ -2181,12 +2187,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2181 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2187 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2182 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2188 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2183 2189
2184 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
2185
2186 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 2190 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2187 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2191 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2188 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2192 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2189 hw->fc.high_water[i]) { 2193 hw->fc.high_water[i]) {
2194 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2190 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 2195 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2191 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 2196 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2192 } else { 2197 } else {
@@ -2654,8 +2659,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
2654 2659
2655 /* For informational purposes only */ 2660 /* For informational purposes only */
2656 if (i >= IXGBE_MAX_SECRX_POLL) 2661 if (i >= IXGBE_MAX_SECRX_POLL)
2657 hw_dbg(hw, "Rx unit being enabled before security " 2662 hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n");
2658 "path fully disabled. Continuing with init.\n");
2659 2663
2660 return 0; 2664 return 0;
2661 2665
@@ -2782,7 +2786,7 @@ out:
2782 * get and set mac_addr routines. 2786 * get and set mac_addr routines.
2783 **/ 2787 **/
2784static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2788static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2785 u16 *san_mac_offset) 2789 u16 *san_mac_offset)
2786{ 2790{
2787 s32 ret_val; 2791 s32 ret_val;
2788 2792
@@ -2828,7 +2832,7 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2828 hw->mac.ops.set_lan_id(hw); 2832 hw->mac.ops.set_lan_id(hw);
2829 /* apply the port offset to the address offset */ 2833 /* apply the port offset to the address offset */
2830 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2834 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2831 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2835 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2832 for (i = 0; i < 3; i++) { 2836 for (i = 0; i < 3; i++) {
2833 ret_val = hw->eeprom.ops.read(hw, san_mac_offset, 2837 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
2834 &san_mac_data); 2838 &san_mac_data);
@@ -3068,7 +3072,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3068 * Turn on/off specified VLAN in the VLAN filter table. 3072 * Turn on/off specified VLAN in the VLAN filter table.
3069 **/ 3073 **/
3070s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3074s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3071 bool vlan_on) 3075 bool vlan_on)
3072{ 3076{
3073 s32 regindex; 3077 s32 regindex;
3074 u32 bitindex; 3078 u32 bitindex;
@@ -3190,9 +3194,9 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3190 * Ignore it. */ 3194 * Ignore it. */
3191 vfta_changed = false; 3195 vfta_changed = false;
3192 } 3196 }
3193 } 3197 } else {
3194 else
3195 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 3198 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3199 }
3196 } 3200 }
3197 3201
3198 if (vfta_changed) 3202 if (vfta_changed)
@@ -3292,7 +3296,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3292 * block to check the support for the alternative WWNN/WWPN prefix support. 3296 * block to check the support for the alternative WWNN/WWPN prefix support.
3293 **/ 3297 **/
3294s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3298s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3295 u16 *wwpn_prefix) 3299 u16 *wwpn_prefix)
3296{ 3300{
3297 u16 offset, caps; 3301 u16 offset, caps;
3298 u16 alt_san_mac_blk_offset; 3302 u16 alt_san_mac_blk_offset;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f12c40fb5537..2ae5d4b8fc93 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -39,7 +39,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
39s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); 39s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
40s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); 40s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
41s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 41s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
42 u32 pba_num_size); 42 u32 pba_num_size);
43s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); 43s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
44enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status); 44enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
45enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status); 45enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
@@ -61,16 +61,16 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
61s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 61s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
62 u16 words, u16 *data); 62 u16 words, u16 *data);
63s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 63s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
64 u16 *data); 64 u16 *data);
65s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 65s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
66 u16 words, u16 *data); 66 u16 words, u16 *data);
67u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); 67u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
68s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 68s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
69 u16 *checksum_val); 69 u16 *checksum_val);
70s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); 70s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
71 71
72s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 72s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
73 u32 enable_addr); 73 u32 enable_addr);
74s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); 74s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
75s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); 75s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
76s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 76s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
@@ -92,13 +92,13 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
92s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 92s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
93s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); 93s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
94s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, 94s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
95 u32 vind, bool vlan_on); 95 u32 vind, bool vlan_on);
96s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); 96s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
97s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, 97s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
98 ixgbe_link_speed *speed, 98 ixgbe_link_speed *speed,
99 bool *link_up, bool link_up_wait_to_complete); 99 bool *link_up, bool link_up_wait_to_complete);
100s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 100s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
101 u16 *wwpn_prefix); 101 u16 *wwpn_prefix);
102 102
103s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); 103s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
104s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); 104s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
@@ -141,8 +141,6 @@ static inline bool ixgbe_removed(void __iomem *addr)
141 return unlikely(!addr); 141 return unlikely(!addr);
142} 142}
143 143
144void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg);
145
146static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) 144static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
147{ 145{
148 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); 146 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
@@ -172,18 +170,7 @@ static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
172} 170}
173#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value)) 171#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
174 172
175static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) 173u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
176{
177 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
178 u32 value;
179
180 if (ixgbe_removed(reg_addr))
181 return IXGBE_FAILED_READ_REG;
182 value = readl(reg_addr + reg);
183 if (unlikely(value == IXGBE_FAILED_READ_REG))
184 ixgbe_check_remove(hw, reg);
185 return value;
186}
187#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg)) 174#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
188 175
189#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \ 176#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index e055e000131b..a689ee0d4bed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -267,7 +267,7 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
267 * Configure dcb settings and enable dcb mode. 267 * Configure dcb settings and enable dcb mode.
268 */ 268 */
269s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, 269s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
270 struct ixgbe_dcb_config *dcb_config) 270 struct ixgbe_dcb_config *dcb_config)
271{ 271{
272 s32 ret = 0; 272 s32 ret = 0;
273 u8 pfc_en; 273 u8 pfc_en;
@@ -389,7 +389,6 @@ static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map)
389 for (i = 0; i < MAX_USER_PRIORITY; i++) 389 for (i = 0; i < MAX_USER_PRIORITY; i++)
390 map[i] = IXGBE_RTRUP2TC_UP_MASK & 390 map[i] = IXGBE_RTRUP2TC_UP_MASK &
391 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); 391 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
392 return;
393} 392}
394 393
395void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) 394void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 7a77f37a7cbc..d3ba63f9ad37 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -208,7 +208,6 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
208 208
209 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); 209 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
210 210
211 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
212 /* Configure PFC Tx thresholds per TC */ 211 /* Configure PFC Tx thresholds per TC */
213 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 212 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
214 if (!(pfc_en & (1 << i))) { 213 if (!(pfc_en & (1 << i))) {
@@ -217,6 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
217 continue; 216 continue;
218 } 217 }
219 218
219 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
220 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 220 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
221 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); 221 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
222 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); 222 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index bdb99b3b0f30..3b932fe64ab6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -242,7 +242,6 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
242 max_tc = prio_tc[i]; 242 max_tc = prio_tc[i];
243 } 243 }
244 244
245 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
246 245
247 /* Configure PFC Tx thresholds per TC */ 246 /* Configure PFC Tx thresholds per TC */
248 for (i = 0; i <= max_tc; i++) { 247 for (i = 0; i <= max_tc; i++) {
@@ -257,6 +256,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
257 256
258 if (enabled) { 257 if (enabled) {
259 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 258 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
259 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
260 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 260 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
261 } else { 261 } else {
262 reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; 262 reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index d5a1e3db0774..90c370230e20 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -31,17 +31,17 @@
31 31
32/* DCB register definitions */ 32/* DCB register definitions */
33#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, 33#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin,
34 * 1 WSP - Weighted Strict Priority 34 * 1 WSP - Weighted Strict Priority
35 */ 35 */
36#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, 36#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin,
37 * 1 WRR - Weighted Round Robin 37 * 1 WRR - Weighted Round Robin
38 */ 38 */
39#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ 39#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */
40#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ 40#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
41#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ 41#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */
42#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must 42#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must
43 * clear! 43 * clear!
44 */ 44 */
45#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ 45#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */
46 46
47/* Receive UP2TC mapping */ 47/* Receive UP2TC mapping */
@@ -56,11 +56,11 @@
56#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ 56#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */
57 57
58#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet 58#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
59 * buffers enable 59 * buffers enable
60 */ 60 */
61#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores 61#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
62 * (RSS) enable 62 * (RSS) enable
63 */ 63 */
64 64
65/* RTRPCS Bit Masks */ 65/* RTRPCS Bit Masks */
66#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ 66#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */
@@ -81,8 +81,8 @@
81 81
82/* RTTPCS Bit Masks */ 82/* RTTPCS Bit Masks */
83#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, 83#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin,
84 * 1 SP - Strict Priority 84 * 1 SP - Strict Priority
85 */ 85 */
86#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ 86#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */
87#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ 87#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */
88#define IXGBE_RTTPCS_ARBD_SHIFT 22 88#define IXGBE_RTTPCS_ARBD_SHIFT 22
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index edd89a1ef27f..5172b6b12c09 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -192,8 +192,8 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
192} 192}
193 193
194static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, 194static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
195 u8 prio, u8 bwg_id, u8 bw_pct, 195 u8 prio, u8 bwg_id, u8 bw_pct,
196 u8 up_map) 196 u8 up_map)
197{ 197{
198 struct ixgbe_adapter *adapter = netdev_priv(netdev); 198 struct ixgbe_adapter *adapter = netdev_priv(netdev);
199 199
@@ -210,7 +210,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
210} 210}
211 211
212static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 212static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
213 u8 bw_pct) 213 u8 bw_pct)
214{ 214{
215 struct ixgbe_adapter *adapter = netdev_priv(netdev); 215 struct ixgbe_adapter *adapter = netdev_priv(netdev);
216 216
@@ -218,8 +218,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
218} 218}
219 219
220static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, 220static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
221 u8 prio, u8 bwg_id, u8 bw_pct, 221 u8 prio, u8 bwg_id, u8 bw_pct,
222 u8 up_map) 222 u8 up_map)
223{ 223{
224 struct ixgbe_adapter *adapter = netdev_priv(netdev); 224 struct ixgbe_adapter *adapter = netdev_priv(netdev);
225 225
@@ -236,7 +236,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
236} 236}
237 237
238static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 238static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
239 u8 bw_pct) 239 u8 bw_pct)
240{ 240{
241 struct ixgbe_adapter *adapter = netdev_priv(netdev); 241 struct ixgbe_adapter *adapter = netdev_priv(netdev);
242 242
@@ -244,8 +244,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
244} 244}
245 245
246static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, 246static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
247 u8 *prio, u8 *bwg_id, u8 *bw_pct, 247 u8 *prio, u8 *bwg_id, u8 *bw_pct,
248 u8 *up_map) 248 u8 *up_map)
249{ 249{
250 struct ixgbe_adapter *adapter = netdev_priv(netdev); 250 struct ixgbe_adapter *adapter = netdev_priv(netdev);
251 251
@@ -256,7 +256,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
256} 256}
257 257
258static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 258static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
259 u8 *bw_pct) 259 u8 *bw_pct)
260{ 260{
261 struct ixgbe_adapter *adapter = netdev_priv(netdev); 261 struct ixgbe_adapter *adapter = netdev_priv(netdev);
262 262
@@ -264,8 +264,8 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
264} 264}
265 265
266static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, 266static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
267 u8 *prio, u8 *bwg_id, u8 *bw_pct, 267 u8 *prio, u8 *bwg_id, u8 *bw_pct,
268 u8 *up_map) 268 u8 *up_map)
269{ 269{
270 struct ixgbe_adapter *adapter = netdev_priv(netdev); 270 struct ixgbe_adapter *adapter = netdev_priv(netdev);
271 271
@@ -276,7 +276,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
276} 276}
277 277
278static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 278static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
279 u8 *bw_pct) 279 u8 *bw_pct)
280{ 280{
281 struct ixgbe_adapter *adapter = netdev_priv(netdev); 281 struct ixgbe_adapter *adapter = netdev_priv(netdev);
282 282
@@ -284,7 +284,7 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
284} 284}
285 285
286static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, 286static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
287 u8 setting) 287 u8 setting)
288{ 288{
289 struct ixgbe_adapter *adapter = netdev_priv(netdev); 289 struct ixgbe_adapter *adapter = netdev_priv(netdev);
290 290
@@ -295,7 +295,7 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
295} 295}
296 296
297static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, 297static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
298 u8 *setting) 298 u8 *setting)
299{ 299{
300 struct ixgbe_adapter *adapter = netdev_priv(netdev); 300 struct ixgbe_adapter *adapter = netdev_priv(netdev);
301 301
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 472b0f450bf9..5e2c1e35e517 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -253,8 +253,7 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
253 **/ 253 **/
254void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) 254void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
255{ 255{
256 if (adapter->ixgbe_dbg_adapter) 256 debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
257 debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
258 adapter->ixgbe_dbg_adapter = NULL; 257 adapter->ixgbe_dbg_adapter = NULL;
259} 258}
260 259
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 6c55c14d082a..a452730a3278 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -141,8 +141,8 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
141 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ 141 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
142 / sizeof(u64)) 142 / sizeof(u64))
143#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ 143#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
144 IXGBE_PB_STATS_LEN + \ 144 IXGBE_PB_STATS_LEN + \
145 IXGBE_QUEUE_STATS_LEN) 145 IXGBE_QUEUE_STATS_LEN)
146 146
147static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 147static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
148 "Register test (offline)", "Eeprom test (offline)", 148 "Register test (offline)", "Eeprom test (offline)",
@@ -152,7 +152,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
152#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN 152#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
153 153
154static int ixgbe_get_settings(struct net_device *netdev, 154static int ixgbe_get_settings(struct net_device *netdev,
155 struct ethtool_cmd *ecmd) 155 struct ethtool_cmd *ecmd)
156{ 156{
157 struct ixgbe_adapter *adapter = netdev_priv(netdev); 157 struct ixgbe_adapter *adapter = netdev_priv(netdev);
158 struct ixgbe_hw *hw = &adapter->hw; 158 struct ixgbe_hw *hw = &adapter->hw;
@@ -161,13 +161,6 @@ static int ixgbe_get_settings(struct net_device *netdev,
161 bool autoneg = false; 161 bool autoneg = false;
162 bool link_up; 162 bool link_up;
163 163
164 /* SFP type is needed for get_link_capabilities */
165 if (hw->phy.media_type & (ixgbe_media_type_fiber |
166 ixgbe_media_type_fiber_qsfp)) {
167 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
168 hw->phy.ops.identify_sfp(hw);
169 }
170
171 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); 164 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
172 165
173 /* set the supported link speeds */ 166 /* set the supported link speeds */
@@ -303,15 +296,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
303 } 296 }
304 ecmd->duplex = DUPLEX_FULL; 297 ecmd->duplex = DUPLEX_FULL;
305 } else { 298 } else {
306 ethtool_cmd_speed_set(ecmd, -1); 299 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
307 ecmd->duplex = -1; 300 ecmd->duplex = DUPLEX_UNKNOWN;
308 } 301 }
309 302
310 return 0; 303 return 0;
311} 304}
312 305
313static int ixgbe_set_settings(struct net_device *netdev, 306static int ixgbe_set_settings(struct net_device *netdev,
314 struct ethtool_cmd *ecmd) 307 struct ethtool_cmd *ecmd)
315{ 308{
316 struct ixgbe_adapter *adapter = netdev_priv(netdev); 309 struct ixgbe_adapter *adapter = netdev_priv(netdev);
317 struct ixgbe_hw *hw = &adapter->hw; 310 struct ixgbe_hw *hw = &adapter->hw;
@@ -368,7 +361,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
368} 361}
369 362
370static void ixgbe_get_pauseparam(struct net_device *netdev, 363static void ixgbe_get_pauseparam(struct net_device *netdev,
371 struct ethtool_pauseparam *pause) 364 struct ethtool_pauseparam *pause)
372{ 365{
373 struct ixgbe_adapter *adapter = netdev_priv(netdev); 366 struct ixgbe_adapter *adapter = netdev_priv(netdev);
374 struct ixgbe_hw *hw = &adapter->hw; 367 struct ixgbe_hw *hw = &adapter->hw;
@@ -390,7 +383,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
390} 383}
391 384
392static int ixgbe_set_pauseparam(struct net_device *netdev, 385static int ixgbe_set_pauseparam(struct net_device *netdev,
393 struct ethtool_pauseparam *pause) 386 struct ethtool_pauseparam *pause)
394{ 387{
395 struct ixgbe_adapter *adapter = netdev_priv(netdev); 388 struct ixgbe_adapter *adapter = netdev_priv(netdev);
396 struct ixgbe_hw *hw = &adapter->hw; 389 struct ixgbe_hw *hw = &adapter->hw;
@@ -450,7 +443,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
450#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 443#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
451 444
452static void ixgbe_get_regs(struct net_device *netdev, 445static void ixgbe_get_regs(struct net_device *netdev,
453 struct ethtool_regs *regs, void *p) 446 struct ethtool_regs *regs, void *p)
454{ 447{
455 struct ixgbe_adapter *adapter = netdev_priv(netdev); 448 struct ixgbe_adapter *adapter = netdev_priv(netdev);
456 struct ixgbe_hw *hw = &adapter->hw; 449 struct ixgbe_hw *hw = &adapter->hw;
@@ -812,7 +805,7 @@ static int ixgbe_get_eeprom_len(struct net_device *netdev)
812} 805}
813 806
814static int ixgbe_get_eeprom(struct net_device *netdev, 807static int ixgbe_get_eeprom(struct net_device *netdev,
815 struct ethtool_eeprom *eeprom, u8 *bytes) 808 struct ethtool_eeprom *eeprom, u8 *bytes)
816{ 809{
817 struct ixgbe_adapter *adapter = netdev_priv(netdev); 810 struct ixgbe_adapter *adapter = netdev_priv(netdev);
818 struct ixgbe_hw *hw = &adapter->hw; 811 struct ixgbe_hw *hw = &adapter->hw;
@@ -918,7 +911,7 @@ err:
918} 911}
919 912
920static void ixgbe_get_drvinfo(struct net_device *netdev, 913static void ixgbe_get_drvinfo(struct net_device *netdev,
921 struct ethtool_drvinfo *drvinfo) 914 struct ethtool_drvinfo *drvinfo)
922{ 915{
923 struct ixgbe_adapter *adapter = netdev_priv(netdev); 916 struct ixgbe_adapter *adapter = netdev_priv(netdev);
924 u32 nvm_track_id; 917 u32 nvm_track_id;
@@ -940,7 +933,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
940} 933}
941 934
942static void ixgbe_get_ringparam(struct net_device *netdev, 935static void ixgbe_get_ringparam(struct net_device *netdev,
943 struct ethtool_ringparam *ring) 936 struct ethtool_ringparam *ring)
944{ 937{
945 struct ixgbe_adapter *adapter = netdev_priv(netdev); 938 struct ixgbe_adapter *adapter = netdev_priv(netdev);
946 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 939 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
@@ -953,7 +946,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
953} 946}
954 947
955static int ixgbe_set_ringparam(struct net_device *netdev, 948static int ixgbe_set_ringparam(struct net_device *netdev,
956 struct ethtool_ringparam *ring) 949 struct ethtool_ringparam *ring)
957{ 950{
958 struct ixgbe_adapter *adapter = netdev_priv(netdev); 951 struct ixgbe_adapter *adapter = netdev_priv(netdev);
959 struct ixgbe_ring *temp_ring; 952 struct ixgbe_ring *temp_ring;
@@ -1082,7 +1075,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1082} 1075}
1083 1076
1084static void ixgbe_get_ethtool_stats(struct net_device *netdev, 1077static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1085 struct ethtool_stats *stats, u64 *data) 1078 struct ethtool_stats *stats, u64 *data)
1086{ 1079{
1087 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1080 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1088 struct rtnl_link_stats64 temp; 1081 struct rtnl_link_stats64 temp;
@@ -1110,7 +1103,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1110 } 1103 }
1111 1104
1112 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1105 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1113 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1106 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1114 } 1107 }
1115 for (j = 0; j < netdev->num_tx_queues; j++) { 1108 for (j = 0; j < netdev->num_tx_queues; j++) {
1116 ring = adapter->tx_ring[j]; 1109 ring = adapter->tx_ring[j];
@@ -1180,7 +1173,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1180} 1173}
1181 1174
1182static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 1175static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1183 u8 *data) 1176 u8 *data)
1184{ 1177{
1185 char *p = (char *)data; 1178 char *p = (char *)data;
1186 int i; 1179 int i;
@@ -1357,8 +1350,7 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1357 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write); 1350 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1358 val = ixgbe_read_reg(&adapter->hw, reg); 1351 val = ixgbe_read_reg(&adapter->hw, reg);
1359 if (val != (test_pattern[pat] & write & mask)) { 1352 if (val != (test_pattern[pat] & write & mask)) {
1360 e_err(drv, "pattern test reg %04X failed: got " 1353 e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1361 "0x%08X expected 0x%08X\n",
1362 reg, val, (test_pattern[pat] & write & mask)); 1354 reg, val, (test_pattern[pat] & write & mask));
1363 *data = reg; 1355 *data = reg;
1364 ixgbe_write_reg(&adapter->hw, reg, before); 1356 ixgbe_write_reg(&adapter->hw, reg, before);
@@ -1382,8 +1374,8 @@ static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1382 ixgbe_write_reg(&adapter->hw, reg, write & mask); 1374 ixgbe_write_reg(&adapter->hw, reg, write & mask);
1383 val = ixgbe_read_reg(&adapter->hw, reg); 1375 val = ixgbe_read_reg(&adapter->hw, reg);
1384 if ((write & mask) != (val & mask)) { 1376 if ((write & mask) != (val & mask)) {
1385 e_err(drv, "set/check reg %04X test failed: got 0x%08X " 1377 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1386 "expected 0x%08X\n", reg, (val & mask), (write & mask)); 1378 reg, (val & mask), (write & mask));
1387 *data = reg; 1379 *data = reg;
1388 ixgbe_write_reg(&adapter->hw, reg, before); 1380 ixgbe_write_reg(&adapter->hw, reg, before);
1389 return true; 1381 return true;
@@ -1430,8 +1422,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1430 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle); 1422 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1431 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle; 1423 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1432 if (value != after) { 1424 if (value != after) {
1433 e_err(drv, "failed STATUS register test got: 0x%08X " 1425 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1434 "expected: 0x%08X\n", after, value); 1426 after, value);
1435 *data = 1; 1427 *data = 1;
1436 return 1; 1428 return 1;
1437 } 1429 }
@@ -1533,10 +1525,10 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1533 return -1; 1525 return -1;
1534 } 1526 }
1535 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, 1527 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1536 netdev->name, netdev)) { 1528 netdev->name, netdev)) {
1537 shared_int = false; 1529 shared_int = false;
1538 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, 1530 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1539 netdev->name, netdev)) { 1531 netdev->name, netdev)) {
1540 *data = 1; 1532 *data = 1;
1541 return -1; 1533 return -1;
1542 } 1534 }
@@ -1563,9 +1555,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1563 */ 1555 */
1564 adapter->test_icr = 0; 1556 adapter->test_icr = 0;
1565 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1557 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1566 ~mask & 0x00007FFF); 1558 ~mask & 0x00007FFF);
1567 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1559 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1568 ~mask & 0x00007FFF); 1560 ~mask & 0x00007FFF);
1569 IXGBE_WRITE_FLUSH(&adapter->hw); 1561 IXGBE_WRITE_FLUSH(&adapter->hw);
1570 usleep_range(10000, 20000); 1562 usleep_range(10000, 20000);
1571 1563
@@ -1587,7 +1579,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1587 IXGBE_WRITE_FLUSH(&adapter->hw); 1579 IXGBE_WRITE_FLUSH(&adapter->hw);
1588 usleep_range(10000, 20000); 1580 usleep_range(10000, 20000);
1589 1581
1590 if (!(adapter->test_icr &mask)) { 1582 if (!(adapter->test_icr & mask)) {
1591 *data = 4; 1583 *data = 4;
1592 break; 1584 break;
1593 } 1585 }
@@ -1602,9 +1594,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1602 */ 1594 */
1603 adapter->test_icr = 0; 1595 adapter->test_icr = 0;
1604 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1596 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1605 ~mask & 0x00007FFF); 1597 ~mask & 0x00007FFF);
1606 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1598 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1607 ~mask & 0x00007FFF); 1599 ~mask & 0x00007FFF);
1608 IXGBE_WRITE_FLUSH(&adapter->hw); 1600 IXGBE_WRITE_FLUSH(&adapter->hw);
1609 usleep_range(10000, 20000); 1601 usleep_range(10000, 20000);
1610 1602
@@ -1964,7 +1956,7 @@ out:
1964} 1956}
1965 1957
1966static void ixgbe_diag_test(struct net_device *netdev, 1958static void ixgbe_diag_test(struct net_device *netdev,
1967 struct ethtool_test *eth_test, u64 *data) 1959 struct ethtool_test *eth_test, u64 *data)
1968{ 1960{
1969 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1961 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1970 bool if_running = netif_running(netdev); 1962 bool if_running = netif_running(netdev);
@@ -1987,10 +1979,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
1987 int i; 1979 int i;
1988 for (i = 0; i < adapter->num_vfs; i++) { 1980 for (i = 0; i < adapter->num_vfs; i++) {
1989 if (adapter->vfinfo[i].clear_to_send) { 1981 if (adapter->vfinfo[i].clear_to_send) {
1990 netdev_warn(netdev, "%s", 1982 netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
1991 "offline diagnostic is not "
1992 "supported when VFs are "
1993 "present\n");
1994 data[0] = 1; 1983 data[0] = 1;
1995 data[1] = 1; 1984 data[1] = 1;
1996 data[2] = 1; 1985 data[2] = 1;
@@ -2037,8 +2026,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
2037 * loopback diagnostic. */ 2026 * loopback diagnostic. */
2038 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | 2027 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2039 IXGBE_FLAG_VMDQ_ENABLED)) { 2028 IXGBE_FLAG_VMDQ_ENABLED)) {
2040 e_info(hw, "Skip MAC loopback diagnostic in VT " 2029 e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2041 "mode\n");
2042 data[3] = 0; 2030 data[3] = 0;
2043 goto skip_loopback; 2031 goto skip_loopback;
2044 } 2032 }
@@ -2078,7 +2066,7 @@ skip_ol_tests:
2078} 2066}
2079 2067
2080static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, 2068static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2081 struct ethtool_wolinfo *wol) 2069 struct ethtool_wolinfo *wol)
2082{ 2070{
2083 struct ixgbe_hw *hw = &adapter->hw; 2071 struct ixgbe_hw *hw = &adapter->hw;
2084 int retval = 0; 2072 int retval = 0;
@@ -2094,12 +2082,12 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2094} 2082}
2095 2083
2096static void ixgbe_get_wol(struct net_device *netdev, 2084static void ixgbe_get_wol(struct net_device *netdev,
2097 struct ethtool_wolinfo *wol) 2085 struct ethtool_wolinfo *wol)
2098{ 2086{
2099 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2087 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2100 2088
2101 wol->supported = WAKE_UCAST | WAKE_MCAST | 2089 wol->supported = WAKE_UCAST | WAKE_MCAST |
2102 WAKE_BCAST | WAKE_MAGIC; 2090 WAKE_BCAST | WAKE_MAGIC;
2103 wol->wolopts = 0; 2091 wol->wolopts = 0;
2104 2092
2105 if (ixgbe_wol_exclusion(adapter, wol) || 2093 if (ixgbe_wol_exclusion(adapter, wol) ||
@@ -2181,7 +2169,7 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
2181} 2169}
2182 2170
2183static int ixgbe_get_coalesce(struct net_device *netdev, 2171static int ixgbe_get_coalesce(struct net_device *netdev,
2184 struct ethtool_coalesce *ec) 2172 struct ethtool_coalesce *ec)
2185{ 2173{
2186 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2174 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2187 2175
@@ -2222,8 +2210,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2222 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { 2210 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2223 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { 2211 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2224 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 2212 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2225 e_info(probe, "rx-usecs value high enough " 2213 e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2226 "to re-enable RSC\n");
2227 return true; 2214 return true;
2228 } 2215 }
2229 /* if interrupt rate is too high then disable RSC */ 2216 /* if interrupt rate is too high then disable RSC */
@@ -2236,7 +2223,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2236} 2223}
2237 2224
2238static int ixgbe_set_coalesce(struct net_device *netdev, 2225static int ixgbe_set_coalesce(struct net_device *netdev,
2239 struct ethtool_coalesce *ec) 2226 struct ethtool_coalesce *ec)
2240{ 2227{
2241 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2228 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2242 struct ixgbe_q_vector *q_vector; 2229 struct ixgbe_q_vector *q_vector;
@@ -2421,9 +2408,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2421 switch (cmd->flow_type) { 2408 switch (cmd->flow_type) {
2422 case TCP_V4_FLOW: 2409 case TCP_V4_FLOW:
2423 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2410 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2411 /* fallthrough */
2424 case UDP_V4_FLOW: 2412 case UDP_V4_FLOW:
2425 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2413 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2426 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2414 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2415 /* fallthrough */
2427 case SCTP_V4_FLOW: 2416 case SCTP_V4_FLOW:
2428 case AH_ESP_V4_FLOW: 2417 case AH_ESP_V4_FLOW:
2429 case AH_V4_FLOW: 2418 case AH_V4_FLOW:
@@ -2433,9 +2422,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2433 break; 2422 break;
2434 case TCP_V6_FLOW: 2423 case TCP_V6_FLOW:
2435 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2424 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2425 /* fallthrough */
2436 case UDP_V6_FLOW: 2426 case UDP_V6_FLOW:
2437 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2427 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2438 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2428 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2429 /* fallthrough */
2439 case SCTP_V6_FLOW: 2430 case SCTP_V6_FLOW:
2440 case AH_ESP_V6_FLOW: 2431 case AH_ESP_V6_FLOW:
2441 case AH_V6_FLOW: 2432 case AH_V6_FLOW:
@@ -2787,8 +2778,7 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2787 2778
2788 if ((flags2 & UDP_RSS_FLAGS) && 2779 if ((flags2 & UDP_RSS_FLAGS) &&
2789 !(adapter->flags2 & UDP_RSS_FLAGS)) 2780 !(adapter->flags2 & UDP_RSS_FLAGS))
2790 e_warn(drv, "enabling UDP RSS: fragmented packets" 2781 e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2791 " may arrive out of order to the stack above\n");
2792 2782
2793 adapter->flags2 = flags2; 2783 adapter->flags2 = flags2;
2794 2784
@@ -3099,5 +3089,5 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
3099 3089
3100void ixgbe_set_ethtool_ops(struct net_device *netdev) 3090void ixgbe_set_ethtool_ops(struct net_device *netdev)
3101{ 3091{
3102 SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); 3092 netdev->ethtool_ops = &ixgbe_ethtool_ops;
3103} 3093}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index b16cc786750d..0772b7730fce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -81,9 +81,7 @@ struct ixgbe_fcoe {
81 void *extra_ddp_buffer; 81 void *extra_ddp_buffer;
82 dma_addr_t extra_ddp_buffer_dma; 82 dma_addr_t extra_ddp_buffer_dma;
83 unsigned long mode; 83 unsigned long mode;
84#ifdef CONFIG_IXGBE_DCB
85 u8 up; 84 u8 up;
86#endif
87}; 85};
88 86
89#endif /* _IXGBE_FCOE_H */ 87#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 2067d392cc3d..2d9451e39686 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1113,8 +1113,8 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1113 err = pci_enable_msi(adapter->pdev); 1113 err = pci_enable_msi(adapter->pdev);
1114 if (err) { 1114 if (err) {
1115 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, 1115 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
1116 "Unable to allocate MSI interrupt, " 1116 "Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1117 "falling back to legacy. Error: %d\n", err); 1117 err);
1118 return; 1118 return;
1119 } 1119 }
1120 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 1120 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c047c3ef8d71..f5aa3311ea28 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -301,7 +301,7 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
301 ixgbe_service_event_schedule(adapter); 301 ixgbe_service_event_schedule(adapter);
302} 302}
303 303
304void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) 304static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
305{ 305{
306 u32 value; 306 u32 value;
307 307
@@ -320,6 +320,32 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
320 ixgbe_remove_adapter(hw); 320 ixgbe_remove_adapter(hw);
321} 321}
322 322
323/**
324 * ixgbe_read_reg - Read from device register
325 * @hw: hw specific details
326 * @reg: offset of register to read
327 *
328 * Returns : value read or IXGBE_FAILED_READ_REG if removed
329 *
330 * This function is used to read device registers. It checks for device
331 * removal by confirming any read that returns all ones by checking the
332 * status register value for all ones. This function avoids reading from
333 * the hardware if a removal was previously detected in which case it
334 * returns IXGBE_FAILED_READ_REG (all ones).
335 */
336u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
337{
338 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
339 u32 value;
340
341 if (ixgbe_removed(reg_addr))
342 return IXGBE_FAILED_READ_REG;
343 value = readl(reg_addr + reg);
344 if (unlikely(value == IXGBE_FAILED_READ_REG))
345 ixgbe_check_remove(hw, reg);
346 return value;
347}
348
323static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) 349static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
324{ 350{
325 u16 value; 351 u16 value;
@@ -3743,35 +3769,6 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
3743} 3769}
3744 3770
3745/** 3771/**
3746 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3747 * @adapter: driver data
3748 */
3749static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3750{
3751 struct ixgbe_hw *hw = &adapter->hw;
3752 u32 vlnctrl;
3753
3754 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3755 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3756 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3757}
3758
3759/**
3760 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3761 * @adapter: driver data
3762 */
3763static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3764{
3765 struct ixgbe_hw *hw = &adapter->hw;
3766 u32 vlnctrl;
3767
3768 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3769 vlnctrl |= IXGBE_VLNCTRL_VFE;
3770 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3771 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3772}
3773
3774/**
3775 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping 3772 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3776 * @adapter: driver data 3773 * @adapter: driver data
3777 */ 3774 */
@@ -3850,6 +3847,158 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3850} 3847}
3851 3848
3852/** 3849/**
3850 * ixgbe_write_mc_addr_list - write multicast addresses to MTA
3851 * @netdev: network interface device structure
3852 *
3853 * Writes multicast address list to the MTA hash table.
3854 * Returns: -ENOMEM on failure
3855 * 0 on no addresses written
3856 * X on writing X addresses to MTA
3857 **/
3858static int ixgbe_write_mc_addr_list(struct net_device *netdev)
3859{
3860 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3861 struct ixgbe_hw *hw = &adapter->hw;
3862
3863 if (!netif_running(netdev))
3864 return 0;
3865
3866 if (hw->mac.ops.update_mc_addr_list)
3867 hw->mac.ops.update_mc_addr_list(hw, netdev);
3868 else
3869 return -ENOMEM;
3870
3871#ifdef CONFIG_PCI_IOV
3872 ixgbe_restore_vf_multicasts(adapter);
3873#endif
3874
3875 return netdev_mc_count(netdev);
3876}
3877
3878#ifdef CONFIG_PCI_IOV
3879void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
3880{
3881 struct ixgbe_hw *hw = &adapter->hw;
3882 int i;
3883 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3884 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
3885 hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
3886 adapter->mac_table[i].queue,
3887 IXGBE_RAH_AV);
3888 else
3889 hw->mac.ops.clear_rar(hw, i);
3890
3891 adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
3892 }
3893}
3894#endif
3895
3896static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
3897{
3898 struct ixgbe_hw *hw = &adapter->hw;
3899 int i;
3900 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3901 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
3902 if (adapter->mac_table[i].state &
3903 IXGBE_MAC_STATE_IN_USE)
3904 hw->mac.ops.set_rar(hw, i,
3905 adapter->mac_table[i].addr,
3906 adapter->mac_table[i].queue,
3907 IXGBE_RAH_AV);
3908 else
3909 hw->mac.ops.clear_rar(hw, i);
3910
3911 adapter->mac_table[i].state &=
3912 ~(IXGBE_MAC_STATE_MODIFIED);
3913 }
3914 }
3915}
3916
3917static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
3918{
3919 int i;
3920 struct ixgbe_hw *hw = &adapter->hw;
3921
3922 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3923 adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
3924 adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
3925 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
3926 adapter->mac_table[i].queue = 0;
3927 }
3928 ixgbe_sync_mac_table(adapter);
3929}
3930
3931static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
3932{
3933 struct ixgbe_hw *hw = &adapter->hw;
3934 int i, count = 0;
3935
3936 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3937 if (adapter->mac_table[i].state == 0)
3938 count++;
3939 }
3940 return count;
3941}
3942
3943/* this function destroys the first RAR entry */
3944static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
3945 u8 *addr)
3946{
3947 struct ixgbe_hw *hw = &adapter->hw;
3948
3949 memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
3950 adapter->mac_table[0].queue = VMDQ_P(0);
3951 adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
3952 IXGBE_MAC_STATE_IN_USE);
3953 hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
3954 adapter->mac_table[0].queue,
3955 IXGBE_RAH_AV);
3956}
3957
3958int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
3959{
3960 struct ixgbe_hw *hw = &adapter->hw;
3961 int i;
3962
3963 if (is_zero_ether_addr(addr))
3964 return -EINVAL;
3965
3966 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3967 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
3968 continue;
3969 adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
3970 IXGBE_MAC_STATE_IN_USE);
3971 ether_addr_copy(adapter->mac_table[i].addr, addr);
3972 adapter->mac_table[i].queue = queue;
3973 ixgbe_sync_mac_table(adapter);
3974 return i;
3975 }
3976 return -ENOMEM;
3977}
3978
3979int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
3980{
3981 /* search table for addr, if found, set to 0 and sync */
3982 int i;
3983 struct ixgbe_hw *hw = &adapter->hw;
3984
3985 if (is_zero_ether_addr(addr))
3986 return -EINVAL;
3987
3988 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3989 if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
3990 adapter->mac_table[i].queue == queue) {
3991 adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
3992 adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
3993 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
3994 adapter->mac_table[i].queue = 0;
3995 ixgbe_sync_mac_table(adapter);
3996 return 0;
3997 }
3998 }
3999 return -ENOMEM;
4000}
4001/**
3853 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table 4002 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
3854 * @netdev: network interface device structure 4003 * @netdev: network interface device structure
3855 * 4004 *
@@ -3858,39 +4007,23 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3858 * 0 on no addresses written 4007 * 0 on no addresses written
3859 * X on writing X addresses to the RAR table 4008 * X on writing X addresses to the RAR table
3860 **/ 4009 **/
3861static int ixgbe_write_uc_addr_list(struct net_device *netdev) 4010static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
3862{ 4011{
3863 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4012 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3864 struct ixgbe_hw *hw = &adapter->hw;
3865 unsigned int rar_entries = hw->mac.num_rar_entries - 1;
3866 int count = 0; 4013 int count = 0;
3867 4014
3868 /* In SR-IOV/VMDQ modes significantly less RAR entries are available */
3869 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3870 rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
3871
3872 /* return ENOMEM indicating insufficient memory for addresses */ 4015 /* return ENOMEM indicating insufficient memory for addresses */
3873 if (netdev_uc_count(netdev) > rar_entries) 4016 if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
3874 return -ENOMEM; 4017 return -ENOMEM;
3875 4018
3876 if (!netdev_uc_empty(netdev)) { 4019 if (!netdev_uc_empty(netdev)) {
3877 struct netdev_hw_addr *ha; 4020 struct netdev_hw_addr *ha;
3878 /* return error if we do not support writing to RAR table */
3879 if (!hw->mac.ops.set_rar)
3880 return -ENOMEM;
3881
3882 netdev_for_each_uc_addr(ha, netdev) { 4021 netdev_for_each_uc_addr(ha, netdev) {
3883 if (!rar_entries) 4022 ixgbe_del_mac_filter(adapter, ha->addr, vfn);
3884 break; 4023 ixgbe_add_mac_filter(adapter, ha->addr, vfn);
3885 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3886 VMDQ_P(0), IXGBE_RAH_AV);
3887 count++; 4024 count++;
3888 } 4025 }
3889 } 4026 }
3890 /* write the addresses in reverse order to avoid write combining */
3891 for (; rar_entries > 0 ; rar_entries--)
3892 hw->mac.ops.clear_rar(hw, rar_entries);
3893
3894 return count; 4027 return count;
3895} 4028}
3896 4029
@@ -3908,11 +4041,12 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3908 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4041 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3909 struct ixgbe_hw *hw = &adapter->hw; 4042 struct ixgbe_hw *hw = &adapter->hw;
3910 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; 4043 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4044 u32 vlnctrl;
3911 int count; 4045 int count;
3912 4046
3913 /* Check for Promiscuous and All Multicast modes */ 4047 /* Check for Promiscuous and All Multicast modes */
3914
3915 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4048 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4049 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3916 4050
3917 /* set all bits that we expect to always be set */ 4051 /* set all bits that we expect to always be set */
3918 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ 4052 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
@@ -3922,26 +4056,24 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3922 4056
3923 /* clear the bits we are changing the status of */ 4057 /* clear the bits we are changing the status of */
3924 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4058 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3925 4059 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3926 if (netdev->flags & IFF_PROMISC) { 4060 if (netdev->flags & IFF_PROMISC) {
3927 hw->addr_ctrl.user_set_promisc = true; 4061 hw->addr_ctrl.user_set_promisc = true;
3928 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4062 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3929 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); 4063 vmolr |= IXGBE_VMOLR_MPE;
3930 /* Only disable hardware filter vlans in promiscuous mode 4064 /* Only disable hardware filter vlans in promiscuous mode
3931 * if SR-IOV and VMDQ are disabled - otherwise ensure 4065 * if SR-IOV and VMDQ are disabled - otherwise ensure
3932 * that hardware VLAN filters remain enabled. 4066 * that hardware VLAN filters remain enabled.
3933 */ 4067 */
3934 if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | 4068 if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
3935 IXGBE_FLAG_SRIOV_ENABLED))) 4069 IXGBE_FLAG_SRIOV_ENABLED)))
3936 ixgbe_vlan_filter_disable(adapter); 4070 vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3937 else
3938 ixgbe_vlan_filter_enable(adapter);
3939 } else { 4071 } else {
3940 if (netdev->flags & IFF_ALLMULTI) { 4072 if (netdev->flags & IFF_ALLMULTI) {
3941 fctrl |= IXGBE_FCTRL_MPE; 4073 fctrl |= IXGBE_FCTRL_MPE;
3942 vmolr |= IXGBE_VMOLR_MPE; 4074 vmolr |= IXGBE_VMOLR_MPE;
3943 } 4075 }
3944 ixgbe_vlan_filter_enable(adapter); 4076 vlnctrl |= IXGBE_VLNCTRL_VFE;
3945 hw->addr_ctrl.user_set_promisc = false; 4077 hw->addr_ctrl.user_set_promisc = false;
3946 } 4078 }
3947 4079
@@ -3950,7 +4082,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3950 * sufficient space to store all the addresses then enable 4082 * sufficient space to store all the addresses then enable
3951 * unicast promiscuous mode 4083 * unicast promiscuous mode
3952 */ 4084 */
3953 count = ixgbe_write_uc_addr_list(netdev); 4085 count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
3954 if (count < 0) { 4086 if (count < 0) {
3955 fctrl |= IXGBE_FCTRL_UPE; 4087 fctrl |= IXGBE_FCTRL_UPE;
3956 vmolr |= IXGBE_VMOLR_ROPE; 4088 vmolr |= IXGBE_VMOLR_ROPE;
@@ -3960,11 +4092,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3960 * then we should just turn on promiscuous mode so 4092 * then we should just turn on promiscuous mode so
3961 * that we can at least receive multicast traffic 4093 * that we can at least receive multicast traffic
3962 */ 4094 */
3963 hw->mac.ops.update_mc_addr_list(hw, netdev); 4095 count = ixgbe_write_mc_addr_list(netdev);
3964 vmolr |= IXGBE_VMOLR_ROMPE; 4096 if (count < 0) {
3965 4097 fctrl |= IXGBE_FCTRL_MPE;
3966 if (adapter->num_vfs) 4098 vmolr |= IXGBE_VMOLR_MPE;
3967 ixgbe_restore_vf_multicasts(adapter); 4099 } else if (count) {
4100 vmolr |= IXGBE_VMOLR_ROMPE;
4101 }
3968 4102
3969 if (hw->mac.type != ixgbe_mac_82598EB) { 4103 if (hw->mac.type != ixgbe_mac_82598EB) {
3970 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) & 4104 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
@@ -3985,6 +4119,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3985 /* NOTE: VLAN filtering is disabled by setting PROMISC */ 4119 /* NOTE: VLAN filtering is disabled by setting PROMISC */
3986 } 4120 }
3987 4121
4122 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3988 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4123 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3989 4124
3990 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 4125 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -4101,8 +4236,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4101 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && 4236 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4102 (pb == ixgbe_fcoe_get_tc(adapter))) 4237 (pb == ixgbe_fcoe_get_tc(adapter)))
4103 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; 4238 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4104
4105#endif 4239#endif
4240
4106 /* Calculate delay value for device */ 4241 /* Calculate delay value for device */
4107 switch (hw->mac.type) { 4242 switch (hw->mac.type) {
4108 case ixgbe_mac_X540: 4243 case ixgbe_mac_X540:
@@ -4143,7 +4278,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4143 * @adapter: board private structure to calculate for 4278 * @adapter: board private structure to calculate for
4144 * @pb: packet buffer to calculate 4279 * @pb: packet buffer to calculate
4145 */ 4280 */
4146static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter) 4281static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
4147{ 4282{
4148 struct ixgbe_hw *hw = &adapter->hw; 4283 struct ixgbe_hw *hw = &adapter->hw;
4149 struct net_device *dev = adapter->netdev; 4284 struct net_device *dev = adapter->netdev;
@@ -4153,6 +4288,14 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
4153 /* Calculate max LAN frame size */ 4288 /* Calculate max LAN frame size */
4154 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; 4289 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4155 4290
4291#ifdef IXGBE_FCOE
4292 /* FCoE traffic class uses FCOE jumbo frames */
4293 if ((dev->features & NETIF_F_FCOE_MTU) &&
4294 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4295 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
4296 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4297#endif
4298
4156 /* Calculate delay value for device */ 4299 /* Calculate delay value for device */
4157 switch (hw->mac.type) { 4300 switch (hw->mac.type) {
4158 case ixgbe_mac_X540: 4301 case ixgbe_mac_X540:
@@ -4179,15 +4322,17 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
4179 if (!num_tc) 4322 if (!num_tc)
4180 num_tc = 1; 4323 num_tc = 1;
4181 4324
4182 hw->fc.low_water = ixgbe_lpbthresh(adapter);
4183
4184 for (i = 0; i < num_tc; i++) { 4325 for (i = 0; i < num_tc; i++) {
4185 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); 4326 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
4327 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
4186 4328
4187 /* Low water marks must not be larger than high water marks */ 4329 /* Low water marks must not be larger than high water marks */
4188 if (hw->fc.low_water > hw->fc.high_water[i]) 4330 if (hw->fc.low_water[i] > hw->fc.high_water[i])
4189 hw->fc.low_water = 0; 4331 hw->fc.low_water[i] = 0;
4190 } 4332 }
4333
4334 for (; i < MAX_TRAFFIC_CLASS; i++)
4335 hw->fc.high_water[i] = 0;
4191} 4336}
4192 4337
4193static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) 4338static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
@@ -4249,20 +4394,10 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
4249 vmolr |= IXGBE_VMOLR_ROMPE; 4394 vmolr |= IXGBE_VMOLR_ROMPE;
4250 hw->mac.ops.update_mc_addr_list(hw, dev); 4395 hw->mac.ops.update_mc_addr_list(hw, dev);
4251 } 4396 }
4252 ixgbe_write_uc_addr_list(adapter->netdev); 4397 ixgbe_write_uc_addr_list(adapter->netdev, pool);
4253 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); 4398 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4254} 4399}
4255 4400
4256static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4257 u8 *addr, u16 pool)
4258{
4259 struct ixgbe_hw *hw = &adapter->hw;
4260 unsigned int entry;
4261
4262 entry = hw->mac.num_rar_entries - pool;
4263 hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
4264}
4265
4266static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) 4401static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
4267{ 4402{
4268 struct ixgbe_adapter *adapter = vadapter->real_adapter; 4403 struct ixgbe_adapter *adapter = vadapter->real_adapter;
@@ -4521,6 +4656,8 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
4521 case ixgbe_phy_qsfp_active_unknown: 4656 case ixgbe_phy_qsfp_active_unknown:
4522 case ixgbe_phy_qsfp_intel: 4657 case ixgbe_phy_qsfp_intel:
4523 case ixgbe_phy_qsfp_unknown: 4658 case ixgbe_phy_qsfp_unknown:
4659 /* ixgbe_phy_none is set when no SFP module is present */
4660 case ixgbe_phy_none:
4524 return true; 4661 return true;
4525 case ixgbe_phy_nl: 4662 case ixgbe_phy_nl:
4526 if (hw->mac.type == ixgbe_mac_82598EB) 4663 if (hw->mac.type == ixgbe_mac_82598EB)
@@ -4742,7 +4879,9 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
4742void ixgbe_reset(struct ixgbe_adapter *adapter) 4879void ixgbe_reset(struct ixgbe_adapter *adapter)
4743{ 4880{
4744 struct ixgbe_hw *hw = &adapter->hw; 4881 struct ixgbe_hw *hw = &adapter->hw;
4882 struct net_device *netdev = adapter->netdev;
4745 int err; 4883 int err;
4884 u8 old_addr[ETH_ALEN];
4746 4885
4747 if (ixgbe_removed(hw->hw_addr)) 4886 if (ixgbe_removed(hw->hw_addr))
4748 return; 4887 return;
@@ -4778,9 +4917,10 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
4778 } 4917 }
4779 4918
4780 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 4919 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
4781 4920 /* do not flush user set addresses */
4782 /* reprogram the RAR[0] in case user changed it. */ 4921 memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
4783 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV); 4922 ixgbe_flush_sw_mac_table(adapter);
4923 ixgbe_mac_set_default_filter(adapter, old_addr);
4784 4924
4785 /* update SAN MAC vmdq pool selection */ 4925 /* update SAN MAC vmdq pool selection */
4786 if (hw->mac.san_mac_rar_index) 4926 if (hw->mac.san_mac_rar_index)
@@ -5026,6 +5166,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
5026#endif /* CONFIG_IXGBE_DCB */ 5166#endif /* CONFIG_IXGBE_DCB */
5027#endif /* IXGBE_FCOE */ 5167#endif /* IXGBE_FCOE */
5028 5168
5169 adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
5170 hw->mac.num_rar_entries,
5171 GFP_ATOMIC);
5172
5029 /* Set MAC specific capability flags and exceptions */ 5173 /* Set MAC specific capability flags and exceptions */
5030 switch (hw->mac.type) { 5174 switch (hw->mac.type) {
5031 case ixgbe_mac_82598EB: 5175 case ixgbe_mac_82598EB:
@@ -5517,6 +5661,17 @@ err_setup_tx:
5517 return err; 5661 return err;
5518} 5662}
5519 5663
5664static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
5665{
5666 ixgbe_ptp_suspend(adapter);
5667
5668 ixgbe_down(adapter);
5669 ixgbe_free_irq(adapter);
5670
5671 ixgbe_free_all_tx_resources(adapter);
5672 ixgbe_free_all_rx_resources(adapter);
5673}
5674
5520/** 5675/**
5521 * ixgbe_close - Disables a network interface 5676 * ixgbe_close - Disables a network interface
5522 * @netdev: network interface device structure 5677 * @netdev: network interface device structure
@@ -5534,14 +5689,10 @@ static int ixgbe_close(struct net_device *netdev)
5534 5689
5535 ixgbe_ptp_stop(adapter); 5690 ixgbe_ptp_stop(adapter);
5536 5691
5537 ixgbe_down(adapter); 5692 ixgbe_close_suspend(adapter);
5538 ixgbe_free_irq(adapter);
5539 5693
5540 ixgbe_fdir_filter_exit(adapter); 5694 ixgbe_fdir_filter_exit(adapter);
5541 5695
5542 ixgbe_free_all_tx_resources(adapter);
5543 ixgbe_free_all_rx_resources(adapter);
5544
5545 ixgbe_release_hw_control(adapter); 5696 ixgbe_release_hw_control(adapter);
5546 5697
5547 return 0; 5698 return 0;
@@ -5608,12 +5759,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5608 netif_device_detach(netdev); 5759 netif_device_detach(netdev);
5609 5760
5610 rtnl_lock(); 5761 rtnl_lock();
5611 if (netif_running(netdev)) { 5762 if (netif_running(netdev))
5612 ixgbe_down(adapter); 5763 ixgbe_close_suspend(adapter);
5613 ixgbe_free_irq(adapter);
5614 ixgbe_free_all_tx_resources(adapter);
5615 ixgbe_free_all_rx_resources(adapter);
5616 }
5617 rtnl_unlock(); 5764 rtnl_unlock();
5618 5765
5619 ixgbe_clear_interrupt_scheme(adapter); 5766 ixgbe_clear_interrupt_scheme(adapter);
@@ -5945,7 +6092,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
5945 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 6092 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5946 for (i = 0; i < adapter->num_tx_queues; i++) 6093 for (i = 0; i < adapter->num_tx_queues; i++)
5947 set_bit(__IXGBE_TX_FDIR_INIT_DONE, 6094 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5948 &(adapter->tx_ring[i]->state)); 6095 &(adapter->tx_ring[i]->state));
5949 /* re-enable flow director interrupts */ 6096 /* re-enable flow director interrupts */
5950 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); 6097 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
5951 } else { 6098 } else {
@@ -7172,16 +7319,17 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
7172 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7319 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7173 struct ixgbe_hw *hw = &adapter->hw; 7320 struct ixgbe_hw *hw = &adapter->hw;
7174 struct sockaddr *addr = p; 7321 struct sockaddr *addr = p;
7322 int ret;
7175 7323
7176 if (!is_valid_ether_addr(addr->sa_data)) 7324 if (!is_valid_ether_addr(addr->sa_data))
7177 return -EADDRNOTAVAIL; 7325 return -EADDRNOTAVAIL;
7178 7326
7327 ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
7179 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 7328 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
7180 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 7329 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
7181 7330
7182 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV); 7331 ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
7183 7332 return ret > 0 ? 0 : ret;
7184 return 0;
7185} 7333}
7186 7334
7187static int 7335static int
@@ -7783,7 +7931,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7783 .ndo_do_ioctl = ixgbe_ioctl, 7931 .ndo_do_ioctl = ixgbe_ioctl,
7784 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, 7932 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
7785 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, 7933 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
7786 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, 7934 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
7787 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, 7935 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
7788 .ndo_get_vf_config = ixgbe_ndo_get_vf_config, 7936 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
7789 .ndo_get_stats64 = ixgbe_get_stats64, 7937 .ndo_get_stats64 = ixgbe_get_stats64,
@@ -8187,6 +8335,8 @@ skip_sriov:
8187 goto err_sw_init; 8335 goto err_sw_init;
8188 } 8336 }
8189 8337
8338 ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
8339
8190 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 8340 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
8191 (unsigned long) adapter); 8341 (unsigned long) adapter);
8192 8342
@@ -8242,7 +8392,7 @@ skip_sriov:
8242 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 8392 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
8243 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", 8393 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
8244 hw->mac.type, hw->phy.type, hw->phy.sfp_type, 8394 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
8245 part_str); 8395 part_str);
8246 else 8396 else
8247 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", 8397 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
8248 hw->mac.type, hw->phy.type, part_str); 8398 hw->mac.type, hw->phy.type, part_str);
@@ -8304,8 +8454,8 @@ skip_sriov:
8304 8454
8305 ixgbe_dbg_adapter_init(adapter); 8455 ixgbe_dbg_adapter_init(adapter);
8306 8456
8307 /* Need link setup for MNG FW, else wait for IXGBE_UP */ 8457 /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */
8308 if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link) 8458 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
8309 hw->mac.ops.setup_link(hw, 8459 hw->mac.ops.setup_link(hw,
8310 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, 8460 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
8311 true); 8461 true);
@@ -8319,6 +8469,7 @@ err_sw_init:
8319 ixgbe_disable_sriov(adapter); 8469 ixgbe_disable_sriov(adapter);
8320 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; 8470 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
8321 iounmap(adapter->io_addr); 8471 iounmap(adapter->io_addr);
8472 kfree(adapter->mac_table);
8322err_ioremap: 8473err_ioremap:
8323 free_netdev(netdev); 8474 free_netdev(netdev);
8324err_alloc_etherdev: 8475err_alloc_etherdev:
@@ -8392,6 +8543,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
8392 8543
8393 e_dev_info("complete\n"); 8544 e_dev_info("complete\n");
8394 8545
8546 kfree(adapter->mac_table);
8395 free_netdev(netdev); 8547 free_netdev(netdev);
8396 8548
8397 pci_disable_pcie_error_reporting(pdev); 8549 pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index f5c6af2b891b..1918e0abf734 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -223,7 +223,7 @@ out:
223 * received an ack to that message within delay * timeout period 223 * received an ack to that message within delay * timeout period
224 **/ 224 **/
225static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, 225static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
226 u16 mbx_id) 226 u16 mbx_id)
227{ 227{
228 struct ixgbe_mbx_info *mbx = &hw->mbx; 228 struct ixgbe_mbx_info *mbx = &hw->mbx;
229 s32 ret_val = IXGBE_ERR_MBX; 229 s32 ret_val = IXGBE_ERR_MBX;
@@ -269,7 +269,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
269 u32 vf_bit = vf_number % 16; 269 u32 vf_bit = vf_number % 16;
270 270
271 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, 271 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
272 index)) { 272 index)) {
273 ret_val = 0; 273 ret_val = 0;
274 hw->mbx.stats.reqs++; 274 hw->mbx.stats.reqs++;
275 } 275 }
@@ -291,7 +291,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
291 u32 vf_bit = vf_number % 16; 291 u32 vf_bit = vf_number % 16;
292 292
293 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, 293 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
294 index)) { 294 index)) {
295 ret_val = 0; 295 ret_val = 0;
296 hw->mbx.stats.acks++; 296 hw->mbx.stats.acks++;
297 } 297 }
@@ -366,7 +366,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
366 * returns SUCCESS if it successfully copied message into the buffer 366 * returns SUCCESS if it successfully copied message into the buffer
367 **/ 367 **/
368static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, 368static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
369 u16 vf_number) 369 u16 vf_number)
370{ 370{
371 s32 ret_val; 371 s32 ret_val;
372 u16 i; 372 u16 i;
@@ -407,7 +407,7 @@ out_no_write:
407 * a message due to a VF request so no polling for message is needed. 407 * a message due to a VF request so no polling for message is needed.
408 **/ 408 **/
409static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, 409static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
410 u16 vf_number) 410 u16 vf_number)
411{ 411{
412 s32 ret_val; 412 s32 ret_val;
413 u16 i; 413 u16 i;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index a9b9ad69ed0e..a5cb755de3a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -54,11 +54,11 @@
54 * Message ACK's are the value or'd with 0xF0000000 54 * Message ACK's are the value or'd with 0xF0000000
55 */ 55 */
56#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with 56#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
57 * this are the ACK */ 57 * this are the ACK */
58#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with 58#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
59 * this are the NACK */ 59 * this are the NACK */
60#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still 60#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
61 clear to send requests */ 61 clear to send requests */
62#define IXGBE_VT_MSGINFO_SHIFT 16 62#define IXGBE_VT_MSGINFO_SHIFT 16
63/* bits 23:16 are used for exra info for certain messages */ 63/* bits 23:16 are used for exra info for certain messages */
64#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) 64#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index a76af8e28a04..ff68b7a9deff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -67,7 +67,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
67 if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { 67 if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
68 ixgbe_get_phy_id(hw); 68 ixgbe_get_phy_id(hw);
69 hw->phy.type = 69 hw->phy.type =
70 ixgbe_get_phy_type_from_id(hw->phy.id); 70 ixgbe_get_phy_type_from_id(hw->phy.id);
71 71
72 if (hw->phy.type == ixgbe_phy_unknown) { 72 if (hw->phy.type == ixgbe_phy_unknown) {
73 hw->phy.ops.read_reg(hw, 73 hw->phy.ops.read_reg(hw,
@@ -136,12 +136,12 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
136 u16 phy_id_low = 0; 136 u16 phy_id_low = 0;
137 137
138 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, 138 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
139 &phy_id_high); 139 &phy_id_high);
140 140
141 if (status == 0) { 141 if (status == 0) {
142 hw->phy.id = (u32)(phy_id_high << 16); 142 hw->phy.id = (u32)(phy_id_high << 16);
143 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, 143 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
144 &phy_id_low); 144 &phy_id_low);
145 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); 145 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
146 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); 146 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
147 } 147 }
@@ -318,7 +318,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
318 * @phy_data: Pointer to read data from PHY register 318 * @phy_data: Pointer to read data from PHY register
319 **/ 319 **/
320s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 320s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
321 u32 device_type, u16 *phy_data) 321 u32 device_type, u16 *phy_data)
322{ 322{
323 s32 status; 323 s32 status;
324 u16 gssr; 324 u16 gssr;
@@ -421,7 +421,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
421 * @phy_data: Data to write to the PHY register 421 * @phy_data: Data to write to the PHY register
422 **/ 422 **/
423s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 423s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
424 u32 device_type, u16 phy_data) 424 u32 device_type, u16 phy_data)
425{ 425{
426 s32 status; 426 s32 status;
427 u16 gssr; 427 u16 gssr;
@@ -548,8 +548,8 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
548 * @speed: new link speed 548 * @speed: new link speed
549 **/ 549 **/
550s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, 550s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
551 ixgbe_link_speed speed, 551 ixgbe_link_speed speed,
552 bool autoneg_wait_to_complete) 552 bool autoneg_wait_to_complete)
553{ 553{
554 554
555 /* 555 /*
@@ -582,8 +582,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
582 * Determines the link capabilities by reading the AUTOC register. 582 * Determines the link capabilities by reading the AUTOC register.
583 */ 583 */
584s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, 584s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
585 ixgbe_link_speed *speed, 585 ixgbe_link_speed *speed,
586 bool *autoneg) 586 bool *autoneg)
587{ 587{
588 s32 status = IXGBE_ERR_LINK_SETUP; 588 s32 status = IXGBE_ERR_LINK_SETUP;
589 u16 speed_ability; 589 u16 speed_ability;
@@ -592,7 +592,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
592 *autoneg = true; 592 *autoneg = true;
593 593
594 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, 594 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
595 &speed_ability); 595 &speed_ability);
596 596
597 if (status == 0) { 597 if (status == 0) {
598 if (speed_ability & MDIO_SPEED_10G) 598 if (speed_ability & MDIO_SPEED_10G)
@@ -806,11 +806,11 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
806 806
807 /* reset the PHY and poll for completion */ 807 /* reset the PHY and poll for completion */
808 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, 808 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
809 (phy_data | MDIO_CTRL1_RESET)); 809 (phy_data | MDIO_CTRL1_RESET));
810 810
811 for (i = 0; i < 100; i++) { 811 for (i = 0; i < 100; i++) {
812 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, 812 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
813 &phy_data); 813 &phy_data);
814 if ((phy_data & MDIO_CTRL1_RESET) == 0) 814 if ((phy_data & MDIO_CTRL1_RESET) == 0)
815 break; 815 break;
816 usleep_range(10000, 20000); 816 usleep_range(10000, 20000);
@@ -824,7 +824,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
824 824
825 /* Get init offsets */ 825 /* Get init offsets */
826 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 826 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
827 &data_offset); 827 &data_offset);
828 if (ret_val != 0) 828 if (ret_val != 0)
829 goto out; 829 goto out;
830 830
@@ -838,7 +838,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
838 if (ret_val) 838 if (ret_val)
839 goto err_eeprom; 839 goto err_eeprom;
840 control = (eword & IXGBE_CONTROL_MASK_NL) >> 840 control = (eword & IXGBE_CONTROL_MASK_NL) >>
841 IXGBE_CONTROL_SHIFT_NL; 841 IXGBE_CONTROL_SHIFT_NL;
842 edata = eword & IXGBE_DATA_MASK_NL; 842 edata = eword & IXGBE_DATA_MASK_NL;
843 switch (control) { 843 switch (control) {
844 case IXGBE_DELAY_NL: 844 case IXGBE_DELAY_NL:
@@ -859,7 +859,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
859 if (ret_val) 859 if (ret_val)
860 goto err_eeprom; 860 goto err_eeprom;
861 hw->phy.ops.write_reg(hw, phy_offset, 861 hw->phy.ops.write_reg(hw, phy_offset,
862 MDIO_MMD_PMAPMD, eword); 862 MDIO_MMD_PMAPMD, eword);
863 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, 863 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
864 phy_offset); 864 phy_offset);
865 data_offset++; 865 data_offset++;
@@ -1010,10 +1010,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1010 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { 1010 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
1011 if (hw->bus.lan_id == 0) 1011 if (hw->bus.lan_id == 0)
1012 hw->phy.sfp_type = 1012 hw->phy.sfp_type =
1013 ixgbe_sfp_type_da_cu_core0; 1013 ixgbe_sfp_type_da_cu_core0;
1014 else 1014 else
1015 hw->phy.sfp_type = 1015 hw->phy.sfp_type =
1016 ixgbe_sfp_type_da_cu_core1; 1016 ixgbe_sfp_type_da_cu_core1;
1017 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { 1017 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
1018 hw->phy.ops.read_i2c_eeprom( 1018 hw->phy.ops.read_i2c_eeprom(
1019 hw, IXGBE_SFF_CABLE_SPEC_COMP, 1019 hw, IXGBE_SFF_CABLE_SPEC_COMP,
@@ -1035,10 +1035,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1035 IXGBE_SFF_10GBASELR_CAPABLE)) { 1035 IXGBE_SFF_10GBASELR_CAPABLE)) {
1036 if (hw->bus.lan_id == 0) 1036 if (hw->bus.lan_id == 0)
1037 hw->phy.sfp_type = 1037 hw->phy.sfp_type =
1038 ixgbe_sfp_type_srlr_core0; 1038 ixgbe_sfp_type_srlr_core0;
1039 else 1039 else
1040 hw->phy.sfp_type = 1040 hw->phy.sfp_type =
1041 ixgbe_sfp_type_srlr_core1; 1041 ixgbe_sfp_type_srlr_core1;
1042 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { 1042 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
1043 if (hw->bus.lan_id == 0) 1043 if (hw->bus.lan_id == 0)
1044 hw->phy.sfp_type = 1044 hw->phy.sfp_type =
@@ -1087,15 +1087,15 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1087 goto err_read_i2c_eeprom; 1087 goto err_read_i2c_eeprom;
1088 1088
1089 status = hw->phy.ops.read_i2c_eeprom(hw, 1089 status = hw->phy.ops.read_i2c_eeprom(hw,
1090 IXGBE_SFF_VENDOR_OUI_BYTE1, 1090 IXGBE_SFF_VENDOR_OUI_BYTE1,
1091 &oui_bytes[1]); 1091 &oui_bytes[1]);
1092 1092
1093 if (status != 0) 1093 if (status != 0)
1094 goto err_read_i2c_eeprom; 1094 goto err_read_i2c_eeprom;
1095 1095
1096 status = hw->phy.ops.read_i2c_eeprom(hw, 1096 status = hw->phy.ops.read_i2c_eeprom(hw,
1097 IXGBE_SFF_VENDOR_OUI_BYTE2, 1097 IXGBE_SFF_VENDOR_OUI_BYTE2,
1098 &oui_bytes[2]); 1098 &oui_bytes[2]);
1099 1099
1100 if (status != 0) 1100 if (status != 0)
1101 goto err_read_i2c_eeprom; 1101 goto err_read_i2c_eeprom;
@@ -1403,8 +1403,8 @@ err_read_i2c_eeprom:
1403 * so it returns the offsets to the phy init sequence block. 1403 * so it returns the offsets to the phy init sequence block.
1404 **/ 1404 **/
1405s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 1405s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1406 u16 *list_offset, 1406 u16 *list_offset,
1407 u16 *data_offset) 1407 u16 *data_offset)
1408{ 1408{
1409 u16 sfp_id; 1409 u16 sfp_id;
1410 u16 sfp_type = hw->phy.sfp_type; 1410 u16 sfp_type = hw->phy.sfp_type;
@@ -1493,11 +1493,11 @@ err_phy:
1493 * Performs byte read operation to SFP module's EEPROM over I2C interface. 1493 * Performs byte read operation to SFP module's EEPROM over I2C interface.
1494 **/ 1494 **/
1495s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 1495s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1496 u8 *eeprom_data) 1496 u8 *eeprom_data)
1497{ 1497{
1498 return hw->phy.ops.read_i2c_byte(hw, byte_offset, 1498 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1499 IXGBE_I2C_EEPROM_DEV_ADDR, 1499 IXGBE_I2C_EEPROM_DEV_ADDR,
1500 eeprom_data); 1500 eeprom_data);
1501} 1501}
1502 1502
1503/** 1503/**
@@ -1525,11 +1525,11 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
1525 * Performs byte write operation to SFP module's EEPROM over I2C interface. 1525 * Performs byte write operation to SFP module's EEPROM over I2C interface.
1526 **/ 1526 **/
1527s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 1527s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1528 u8 eeprom_data) 1528 u8 eeprom_data)
1529{ 1529{
1530 return hw->phy.ops.write_i2c_byte(hw, byte_offset, 1530 return hw->phy.ops.write_i2c_byte(hw, byte_offset,
1531 IXGBE_I2C_EEPROM_DEV_ADDR, 1531 IXGBE_I2C_EEPROM_DEV_ADDR,
1532 eeprom_data); 1532 eeprom_data);
1533} 1533}
1534 1534
1535/** 1535/**
@@ -1542,7 +1542,7 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1542 * a specified device address. 1542 * a specified device address.
1543 **/ 1543 **/
1544s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 1544s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1545 u8 dev_addr, u8 *data) 1545 u8 dev_addr, u8 *data)
1546{ 1546{
1547 s32 status = 0; 1547 s32 status = 0;
1548 u32 max_retry = 10; 1548 u32 max_retry = 10;
@@ -1631,7 +1631,7 @@ read_byte_out:
1631 * a specified device address. 1631 * a specified device address.
1632 **/ 1632 **/
1633s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 1633s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1634 u8 dev_addr, u8 data) 1634 u8 dev_addr, u8 data)
1635{ 1635{
1636 s32 status = 0; 1636 s32 status = 0;
1637 u32 max_retry = 1; 1637 u32 max_retry = 1;
@@ -2046,7 +2046,7 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
2046 2046
2047 /* Check that the LASI temp alarm status was triggered */ 2047 /* Check that the LASI temp alarm status was triggered */
2048 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, 2048 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
2049 MDIO_MMD_PMAPMD, &phy_data); 2049 MDIO_MMD_PMAPMD, &phy_data);
2050 2050
2051 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) 2051 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
2052 goto out; 2052 goto out;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 0bb047f751c2..54071ed17e3b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -114,47 +114,47 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
114s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); 114s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
115s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); 115s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
116s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 116s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
117 u32 device_type, u16 *phy_data); 117 u32 device_type, u16 *phy_data);
118s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 118s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
119 u32 device_type, u16 phy_data); 119 u32 device_type, u16 phy_data);
120s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, 120s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
121 u32 device_type, u16 *phy_data); 121 u32 device_type, u16 *phy_data);
122s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, 122s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
123 u32 device_type, u16 phy_data); 123 u32 device_type, u16 phy_data);
124s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); 124s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
125s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, 125s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
126 ixgbe_link_speed speed, 126 ixgbe_link_speed speed,
127 bool autoneg_wait_to_complete); 127 bool autoneg_wait_to_complete);
128s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, 128s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
129 ixgbe_link_speed *speed, 129 ixgbe_link_speed *speed,
130 bool *autoneg); 130 bool *autoneg);
131bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw); 131bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
132 132
133/* PHY specific */ 133/* PHY specific */
134s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, 134s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
135 ixgbe_link_speed *speed, 135 ixgbe_link_speed *speed,
136 bool *link_up); 136 bool *link_up);
137s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); 137s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
138s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, 138s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
139 u16 *firmware_version); 139 u16 *firmware_version);
140s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, 140s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
141 u16 *firmware_version); 141 u16 *firmware_version);
142 142
143s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); 143s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
144s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); 144s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
145s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); 145s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
146s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 146s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
147 u16 *list_offset, 147 u16 *list_offset,
148 u16 *data_offset); 148 u16 *data_offset);
149s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); 149s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
150s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 150s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
151 u8 dev_addr, u8 *data); 151 u8 dev_addr, u8 *data);
152s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 152s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
153 u8 dev_addr, u8 data); 153 u8 dev_addr, u8 data);
154s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 154s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
155 u8 *eeprom_data); 155 u8 *eeprom_data);
156s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, 156s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
157 u8 *sff8472_data); 157 u8 *sff8472_data);
158s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 158s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
159 u8 eeprom_data); 159 u8 eeprom_data);
160#endif /* _IXGBE_PHY_H_ */ 160#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 8902ae683457..68f87ecb8a76 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -26,7 +26,6 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28#include "ixgbe.h" 28#include "ixgbe.h"
29#include <linux/export.h>
30#include <linux/ptp_classify.h> 29#include <linux/ptp_classify.h>
31 30
32/* 31/*
@@ -334,7 +333,7 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
334} 333}
335 334
336/** 335/**
337 * ixgbe_ptp_enable 336 * ixgbe_ptp_feature_enable
338 * @ptp: the ptp clock structure 337 * @ptp: the ptp clock structure
339 * @rq: the requested feature to change 338 * @rq: the requested feature to change
340 * @on: whether to enable or disable the feature 339 * @on: whether to enable or disable the feature
@@ -342,8 +341,8 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
342 * enable (or disable) ancillary features of the phc subsystem. 341 * enable (or disable) ancillary features of the phc subsystem.
343 * our driver only supports the PPS feature on the X540 342 * our driver only supports the PPS feature on the X540
344 */ 343 */
345static int ixgbe_ptp_enable(struct ptp_clock_info *ptp, 344static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
346 struct ptp_clock_request *rq, int on) 345 struct ptp_clock_request *rq, int on)
347{ 346{
348 struct ixgbe_adapter *adapter = 347 struct ixgbe_adapter *adapter =
349 container_of(ptp, struct ixgbe_adapter, ptp_caps); 348 container_of(ptp, struct ixgbe_adapter, ptp_caps);
@@ -570,9 +569,9 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
570} 569}
571 570
572/** 571/**
573 * ixgbe_ptp_set_ts_config - control hardware time stamping 572 * ixgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode
574 * @adapter: pointer to adapter struct 573 * @adapter: the private ixgbe adapter structure
575 * @ifreq: ioctl data 574 * @config: the hwtstamp configuration requested
576 * 575 *
577 * Outgoing time stamping can be enabled and disabled. Play nice and 576 * Outgoing time stamping can be enabled and disabled. Play nice and
578 * disable it when requested, although it shouldn't cause any overhead 577 * disable it when requested, although it shouldn't cause any overhead
@@ -590,25 +589,25 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
590 * packets, regardless of the type specified in the register, only use V2 589 * packets, regardless of the type specified in the register, only use V2
591 * Event mode. This more accurately tells the user what the hardware is going 590 * Event mode. This more accurately tells the user what the hardware is going
592 * to do anyways. 591 * to do anyways.
592 *
593 * Note: this may modify the hwtstamp configuration towards a more general
594 * mode, if required to support the specifically requested mode.
593 */ 595 */
594int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) 596static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
597 struct hwtstamp_config *config)
595{ 598{
596 struct ixgbe_hw *hw = &adapter->hw; 599 struct ixgbe_hw *hw = &adapter->hw;
597 struct hwtstamp_config config;
598 u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; 600 u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
599 u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; 601 u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
600 u32 tsync_rx_mtrl = PTP_EV_PORT << 16; 602 u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
601 bool is_l2 = false; 603 bool is_l2 = false;
602 u32 regval; 604 u32 regval;
603 605
604 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
605 return -EFAULT;
606
607 /* reserved for future extensions */ 606 /* reserved for future extensions */
608 if (config.flags) 607 if (config->flags)
609 return -EINVAL; 608 return -EINVAL;
610 609
611 switch (config.tx_type) { 610 switch (config->tx_type) {
612 case HWTSTAMP_TX_OFF: 611 case HWTSTAMP_TX_OFF:
613 tsync_tx_ctl = 0; 612 tsync_tx_ctl = 0;
614 case HWTSTAMP_TX_ON: 613 case HWTSTAMP_TX_ON:
@@ -617,7 +616,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
617 return -ERANGE; 616 return -ERANGE;
618 } 617 }
619 618
620 switch (config.rx_filter) { 619 switch (config->rx_filter) {
621 case HWTSTAMP_FILTER_NONE: 620 case HWTSTAMP_FILTER_NONE:
622 tsync_rx_ctl = 0; 621 tsync_rx_ctl = 0;
623 tsync_rx_mtrl = 0; 622 tsync_rx_mtrl = 0;
@@ -641,7 +640,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
641 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 640 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
642 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; 641 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
643 is_l2 = true; 642 is_l2 = true;
644 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 643 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
645 break; 644 break;
646 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 645 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
647 case HWTSTAMP_FILTER_ALL: 646 case HWTSTAMP_FILTER_ALL:
@@ -652,7 +651,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
652 * Delay_Req messages and hardware does not support 651 * Delay_Req messages and hardware does not support
653 * timestamping all packets => return error 652 * timestamping all packets => return error
654 */ 653 */
655 config.rx_filter = HWTSTAMP_FILTER_NONE; 654 config->rx_filter = HWTSTAMP_FILTER_NONE;
656 return -ERANGE; 655 return -ERANGE;
657 } 656 }
658 657
@@ -671,7 +670,6 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
671 else 670 else
672 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); 671 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
673 672
674
675 /* enable/disable TX */ 673 /* enable/disable TX */
676 regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 674 regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
677 regval &= ~IXGBE_TSYNCTXCTL_ENABLED; 675 regval &= ~IXGBE_TSYNCTXCTL_ENABLED;
@@ -693,6 +691,29 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
693 regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); 691 regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
694 regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); 692 regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
695 693
694 return 0;
695}
696
697/**
698 * ixgbe_ptp_set_ts_config - user entry point for timestamp mode
699 * @adapter: pointer to adapter struct
700 * @ifreq: ioctl data
701 *
702 * Set hardware to requested mode. If unsupported, return an error with no
703 * changes. Otherwise, store the mode for future reference.
704 */
705int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
706{
707 struct hwtstamp_config config;
708 int err;
709
710 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
711 return -EFAULT;
712
713 err = ixgbe_ptp_set_timestamp_mode(adapter, &config);
714 if (err)
715 return err;
716
696 /* save these settings for future reference */ 717 /* save these settings for future reference */
697 memcpy(&adapter->tstamp_config, &config, 718 memcpy(&adapter->tstamp_config, &config,
698 sizeof(adapter->tstamp_config)); 719 sizeof(adapter->tstamp_config));
@@ -790,9 +811,13 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
790 * ixgbe_ptp_reset 811 * ixgbe_ptp_reset
791 * @adapter: the ixgbe private board structure 812 * @adapter: the ixgbe private board structure
792 * 813 *
793 * When the MAC resets, all timesync features are reset. This function should be 814 * When the MAC resets, all the hardware bits for timesync are reset. This
794 * called to re-enable the PTP clock structure. It will re-init the timecounter 815 * function is used to re-enable the device for PTP based on current settings.
795 * structure based on the kernel time as well as setup the cycle counter data. 816 * We do lose the current clock time, so just reset the cyclecounter to the
817 * system real clock time.
818 *
819 * This function will maintain hwtstamp_config settings, and resets the SDP
820 * output if it was enabled.
796 */ 821 */
797void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) 822void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
798{ 823{
@@ -804,8 +829,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
804 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); 829 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
805 IXGBE_WRITE_FLUSH(hw); 830 IXGBE_WRITE_FLUSH(hw);
806 831
807 /* Reset the saved tstamp_config */ 832 /* reset the hardware timestamping mode */
808 memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config)); 833 ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
809 834
810 ixgbe_ptp_start_cyclecounter(adapter); 835 ixgbe_ptp_start_cyclecounter(adapter);
811 836
@@ -825,16 +850,23 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
825} 850}
826 851
827/** 852/**
828 * ixgbe_ptp_init 853 * ixgbe_ptp_create_clock
829 * @adapter: the ixgbe private adapter structure 854 * @adapter: the ixgbe private adapter structure
830 * 855 *
831 * This function performs the required steps for enabling ptp 856 * This function performs setup of the user entry point function table and
832 * support. If ptp support has already been loaded it simply calls the 857 * initializes the PTP clock device, which is used to access the clock-like
833 * cyclecounter init routine and exits. 858 * features of the PTP core. It will be called by ixgbe_ptp_init, only if
859 * there isn't already a clock device (such as after a suspend/resume cycle,
860 * where the clock device wasn't destroyed).
834 */ 861 */
835void ixgbe_ptp_init(struct ixgbe_adapter *adapter) 862static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
836{ 863{
837 struct net_device *netdev = adapter->netdev; 864 struct net_device *netdev = adapter->netdev;
865 long err;
866
867 /* do nothing if we already have a clock device */
868 if (!IS_ERR_OR_NULL(adapter->ptp_clock))
869 return 0;
838 870
839 switch (adapter->hw.mac.type) { 871 switch (adapter->hw.mac.type) {
840 case ixgbe_mac_X540: 872 case ixgbe_mac_X540:
@@ -851,7 +883,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
851 adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; 883 adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
852 adapter->ptp_caps.gettime = ixgbe_ptp_gettime; 884 adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
853 adapter->ptp_caps.settime = ixgbe_ptp_settime; 885 adapter->ptp_caps.settime = ixgbe_ptp_settime;
854 adapter->ptp_caps.enable = ixgbe_ptp_enable; 886 adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
855 break; 887 break;
856 case ixgbe_mac_82599EB: 888 case ixgbe_mac_82599EB:
857 snprintf(adapter->ptp_caps.name, 889 snprintf(adapter->ptp_caps.name,
@@ -867,24 +899,57 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
867 adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; 899 adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
868 adapter->ptp_caps.gettime = ixgbe_ptp_gettime; 900 adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
869 adapter->ptp_caps.settime = ixgbe_ptp_settime; 901 adapter->ptp_caps.settime = ixgbe_ptp_settime;
870 adapter->ptp_caps.enable = ixgbe_ptp_enable; 902 adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
871 break; 903 break;
872 default: 904 default:
873 adapter->ptp_clock = NULL; 905 adapter->ptp_clock = NULL;
874 return; 906 return -EOPNOTSUPP;
875 } 907 }
876 908
877 spin_lock_init(&adapter->tmreg_lock);
878 INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
879
880 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, 909 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
881 &adapter->pdev->dev); 910 &adapter->pdev->dev);
882 if (IS_ERR(adapter->ptp_clock)) { 911 if (IS_ERR(adapter->ptp_clock)) {
912 err = PTR_ERR(adapter->ptp_clock);
883 adapter->ptp_clock = NULL; 913 adapter->ptp_clock = NULL;
884 e_dev_err("ptp_clock_register failed\n"); 914 e_dev_err("ptp_clock_register failed\n");
915 return err;
885 } else 916 } else
886 e_dev_info("registered PHC device on %s\n", netdev->name); 917 e_dev_info("registered PHC device on %s\n", netdev->name);
887 918
919 /* set default timestamp mode to disabled here. We do this in
920 * create_clock instead of init, because we don't want to override the
921 * previous settings during a resume cycle.
922 */
923 adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
924 adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
925
926 return 0;
927}
928
929/**
930 * ixgbe_ptp_init
931 * @adapter: the ixgbe private adapter structure
932 *
933 * This function performs the required steps for enabling PTP
934 * support. If PTP support has already been loaded it simply calls the
935 * cyclecounter init routine and exits.
936 */
937void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
938{
939 /* initialize the spin lock first since we can't control when a user
940 * will call the entry functions once we have initialized the clock
941 * device
942 */
943 spin_lock_init(&adapter->tmreg_lock);
944
945 /* obtain a PTP device, or re-use an existing device */
946 if (ixgbe_ptp_create_clock(adapter))
947 return;
948
949 /* we have a clock so we can initialize work now */
950 INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
951
952 /* reset the PTP related hardware bits */
888 ixgbe_ptp_reset(adapter); 953 ixgbe_ptp_reset(adapter);
889 954
890 /* enter the IXGBE_PTP_RUNNING state */ 955 /* enter the IXGBE_PTP_RUNNING state */
@@ -894,28 +959,45 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
894} 959}
895 960
896/** 961/**
897 * ixgbe_ptp_stop - disable ptp device and stop the overflow check 962 * ixgbe_ptp_suspend - stop PTP work items
898 * @adapter: pointer to adapter struct 963 * @ adapter: pointer to adapter struct
899 * 964 *
900 * this function stops the ptp support, and cancels the delayed work. 965 * this function suspends PTP activity, and prevents more PTP work from being
966 * generated, but does not destroy the PTP clock device.
901 */ 967 */
902void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) 968void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter)
903{ 969{
904 /* Leave the IXGBE_PTP_RUNNING state. */ 970 /* Leave the IXGBE_PTP_RUNNING state. */
905 if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state)) 971 if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state))
906 return; 972 return;
907 973
908 /* stop the PPS signal */ 974 /* since this might be called in suspend, we don't clear the state,
909 adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; 975 * but simply reset the auxiliary PPS signal control register
910 ixgbe_ptp_setup_sdp(adapter); 976 */
977 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSAUXC, 0x0);
911 978
979 /* ensure that we cancel any pending PTP Tx work item in progress */
912 cancel_work_sync(&adapter->ptp_tx_work); 980 cancel_work_sync(&adapter->ptp_tx_work);
913 if (adapter->ptp_tx_skb) { 981 if (adapter->ptp_tx_skb) {
914 dev_kfree_skb_any(adapter->ptp_tx_skb); 982 dev_kfree_skb_any(adapter->ptp_tx_skb);
915 adapter->ptp_tx_skb = NULL; 983 adapter->ptp_tx_skb = NULL;
916 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); 984 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
917 } 985 }
986}
987
988/**
989 * ixgbe_ptp_stop - close the PTP device
990 * @adapter: pointer to adapter struct
991 *
992 * completely destroy the PTP device, should only be called when the device is
993 * being fully closed.
994 */
995void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
996{
997 /* first, suspend PTP activity */
998 ixgbe_ptp_suspend(adapter);
918 999
1000 /* disable the PTP clock device */
919 if (adapter->ptp_clock) { 1001 if (adapter->ptp_clock) {
920 ptp_clock_unregister(adapter->ptp_clock); 1002 ptp_clock_unregister(adapter->ptp_clock);
921 adapter->ptp_clock = NULL; 1003 adapter->ptp_clock = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index e6c68d396c99..16b3a1cd9db6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -72,8 +72,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
72 for (i = 0; i < num_vf_macvlans; i++) { 72 for (i = 0; i < num_vf_macvlans; i++) {
73 mv_list->vf = -1; 73 mv_list->vf = -1;
74 mv_list->free = true; 74 mv_list->free = true;
75 mv_list->rar_entry = hw->mac.num_rar_entries -
76 (i + adapter->num_vfs + 1);
77 list_add(&mv_list->l, &adapter->vf_mvs.l); 75 list_add(&mv_list->l, &adapter->vf_mvs.l);
78 mv_list++; 76 mv_list++;
79 } 77 }
@@ -327,6 +325,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
327 u32 vector_bit; 325 u32 vector_bit;
328 u32 vector_reg; 326 u32 vector_reg;
329 u32 mta_reg; 327 u32 mta_reg;
328 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
330 329
331 /* only so many hash values supported */ 330 /* only so many hash values supported */
332 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); 331 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -353,25 +352,13 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
353 mta_reg |= (1 << vector_bit); 352 mta_reg |= (1 << vector_bit);
354 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); 353 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
355 } 354 }
355 vmolr |= IXGBE_VMOLR_ROMPE;
356 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
356 357
357 return 0; 358 return 0;
358} 359}
359 360
360static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter) 361#ifdef CONFIG_PCI_IOV
361{
362 struct ixgbe_hw *hw = &adapter->hw;
363 struct list_head *pos;
364 struct vf_macvlans *entry;
365
366 list_for_each(pos, &adapter->vf_mvs.l) {
367 entry = list_entry(pos, struct vf_macvlans, l);
368 if (!entry->free)
369 hw->mac.ops.set_rar(hw, entry->rar_entry,
370 entry->vf_macvlan,
371 entry->vf, IXGBE_RAH_AV);
372 }
373}
374
375void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) 362void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
376{ 363{
377 struct ixgbe_hw *hw = &adapter->hw; 364 struct ixgbe_hw *hw = &adapter->hw;
@@ -382,6 +369,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
382 u32 mta_reg; 369 u32 mta_reg;
383 370
384 for (i = 0; i < adapter->num_vfs; i++) { 371 for (i = 0; i < adapter->num_vfs; i++) {
372 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
385 vfinfo = &adapter->vfinfo[i]; 373 vfinfo = &adapter->vfinfo[i];
386 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { 374 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
387 hw->addr_ctrl.mta_in_use++; 375 hw->addr_ctrl.mta_in_use++;
@@ -391,11 +379,18 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
391 mta_reg |= (1 << vector_bit); 379 mta_reg |= (1 << vector_bit);
392 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); 380 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
393 } 381 }
382
383 if (vfinfo->num_vf_mc_hashes)
384 vmolr |= IXGBE_VMOLR_ROMPE;
385 else
386 vmolr &= ~IXGBE_VMOLR_ROMPE;
387 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
394 } 388 }
395 389
396 /* Restore any VF macvlans */ 390 /* Restore any VF macvlans */
397 ixgbe_restore_vf_macvlans(adapter); 391 ixgbe_full_sync_mac_table(adapter);
398} 392}
393#endif
399 394
400static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, 395static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
401 u32 vf) 396 u32 vf)
@@ -495,8 +490,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
495static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 490static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
496{ 491{
497 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 492 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
498 vmolr |= (IXGBE_VMOLR_ROMPE | 493 vmolr |= IXGBE_VMOLR_BAM;
499 IXGBE_VMOLR_BAM);
500 if (aupe) 494 if (aupe)
501 vmolr |= IXGBE_VMOLR_AUPE; 495 vmolr |= IXGBE_VMOLR_AUPE;
502 else 496 else
@@ -514,7 +508,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
514{ 508{
515 struct ixgbe_hw *hw = &adapter->hw; 509 struct ixgbe_hw *hw = &adapter->hw;
516 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 510 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
517 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
518 u8 num_tcs = netdev_get_num_tc(adapter->netdev); 511 u8 num_tcs = netdev_get_num_tc(adapter->netdev);
519 512
520 /* add PF assigned VLAN or VLAN 0 */ 513 /* add PF assigned VLAN or VLAN 0 */
@@ -544,7 +537,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
544 /* Flush and reset the mta with the new values */ 537 /* Flush and reset the mta with the new values */
545 ixgbe_set_rx_mode(adapter->netdev); 538 ixgbe_set_rx_mode(adapter->netdev);
546 539
547 hw->mac.ops.clear_rar(hw, rar_entry); 540 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
548 541
549 /* reset VF api back to unknown */ 542 /* reset VF api back to unknown */
550 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; 543 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
@@ -553,11 +546,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
553static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 546static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
554 int vf, unsigned char *mac_addr) 547 int vf, unsigned char *mac_addr)
555{ 548{
556 struct ixgbe_hw *hw = &adapter->hw; 549 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
557 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
558
559 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); 550 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
560 hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV); 551 ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
561 552
562 return 0; 553 return 0;
563} 554}
@@ -565,7 +556,6 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
565static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, 556static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
566 int vf, int index, unsigned char *mac_addr) 557 int vf, int index, unsigned char *mac_addr)
567{ 558{
568 struct ixgbe_hw *hw = &adapter->hw;
569 struct list_head *pos; 559 struct list_head *pos;
570 struct vf_macvlans *entry; 560 struct vf_macvlans *entry;
571 561
@@ -576,7 +566,8 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
576 entry->vf = -1; 566 entry->vf = -1;
577 entry->free = true; 567 entry->free = true;
578 entry->is_macvlan = false; 568 entry->is_macvlan = false;
579 hw->mac.ops.clear_rar(hw, entry->rar_entry); 569 ixgbe_del_mac_filter(adapter,
570 entry->vf_macvlan, vf);
580 } 571 }
581 } 572 }
582 } 573 }
@@ -612,7 +603,7 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
612 entry->vf = vf; 603 entry->vf = vf;
613 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); 604 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
614 605
615 hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV); 606 ixgbe_add_mac_filter(adapter, mac_addr, vf);
616 607
617 return 0; 608 return 0;
618} 609}
@@ -1138,9 +1129,9 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1138 adapter->vfinfo[vf].vlan_count--; 1129 adapter->vfinfo[vf].vlan_count--;
1139 adapter->vfinfo[vf].pf_vlan = 0; 1130 adapter->vfinfo[vf].pf_vlan = 0;
1140 adapter->vfinfo[vf].pf_qos = 0; 1131 adapter->vfinfo[vf].pf_qos = 0;
1141 } 1132 }
1142out: 1133out:
1143 return err; 1134 return err;
1144} 1135}
1145 1136
1146static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) 1137static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
@@ -1231,7 +1222,8 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
1231 } 1222 }
1232} 1223}
1233 1224
1234int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 1225int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
1226 int max_tx_rate)
1235{ 1227{
1236 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1228 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1237 int link_speed; 1229 int link_speed;
@@ -1249,13 +1241,16 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
1249 if (link_speed != 10000) 1241 if (link_speed != 10000)
1250 return -EINVAL; 1242 return -EINVAL;
1251 1243
1244 if (min_tx_rate)
1245 return -EINVAL;
1246
1252 /* rate limit cannot be less than 10Mbs or greater than link speed */ 1247 /* rate limit cannot be less than 10Mbs or greater than link speed */
1253 if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed))) 1248 if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
1254 return -EINVAL; 1249 return -EINVAL;
1255 1250
1256 /* store values */ 1251 /* store values */
1257 adapter->vf_rate_link_speed = link_speed; 1252 adapter->vf_rate_link_speed = link_speed;
1258 adapter->vfinfo[vf].tx_rate = tx_rate; 1253 adapter->vfinfo[vf].tx_rate = max_tx_rate;
1259 1254
1260 /* update hardware configuration */ 1255 /* update hardware configuration */
1261 ixgbe_set_vf_rate_limit(adapter, vf); 1256 ixgbe_set_vf_rate_limit(adapter, vf);
@@ -1297,7 +1292,8 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
1297 return -EINVAL; 1292 return -EINVAL;
1298 ivi->vf = vf; 1293 ivi->vf = vf;
1299 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); 1294 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
1300 ivi->tx_rate = adapter->vfinfo[vf].tx_rate; 1295 ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
1296 ivi->min_tx_rate = 0;
1301 ivi->vlan = adapter->vfinfo[vf].pf_vlan; 1297 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
1302 ivi->qos = adapter->vfinfo[vf].pf_qos; 1298 ivi->qos = adapter->vfinfo[vf].pf_qos;
1303 ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; 1299 ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 139eaddfb2ed..32c26d586c01 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -34,7 +34,9 @@
34 */ 34 */
35#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1) 35#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
36 36
37#ifdef CONFIG_PCI_IOV
37void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); 38void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
39#endif
38void ixgbe_msg_task(struct ixgbe_adapter *adapter); 40void ixgbe_msg_task(struct ixgbe_adapter *adapter);
39int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); 41int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
40void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); 42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
@@ -42,7 +44,8 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
42int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); 44int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
43int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, 45int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
44 u8 qos); 46 u8 qos);
45int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 47int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
48 int max_tx_rate);
46int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); 49int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
47int ixgbe_ndo_get_vf_config(struct net_device *netdev, 50int ixgbe_ndo_get_vf_config(struct net_device *netdev,
48 int vf, struct ifla_vf_info *ivi); 51 int vf, struct ifla_vf_info *ivi);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 8a6ff2423f07..9a89f98b35f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -160,7 +160,7 @@ struct ixgbe_thermal_sensor_data {
160#define IXGBE_MAX_EITR 0x00000FF8 160#define IXGBE_MAX_EITR 0x00000FF8
161#define IXGBE_MIN_EITR 8 161#define IXGBE_MIN_EITR 8
162#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ 162#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
163 (0x012300 + (((_i) - 24) * 4))) 163 (0x012300 + (((_i) - 24) * 4)))
164#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 164#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
165#define IXGBE_EITR_LLI_MOD 0x00008000 165#define IXGBE_EITR_LLI_MOD 0x00008000
166#define IXGBE_EITR_CNT_WDIS 0x80000000 166#define IXGBE_EITR_CNT_WDIS 0x80000000
@@ -213,7 +213,7 @@ struct ixgbe_thermal_sensor_data {
213 * 64-127: 0x0D014 + (n-64)*0x40 213 * 64-127: 0x0D014 + (n-64)*0x40
214 */ 214 */
215#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ 215#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
216 (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ 216 (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
217 (0x0D014 + (((_i) - 64) * 0x40)))) 217 (0x0D014 + (((_i) - 64) * 0x40))))
218/* 218/*
219 * Rx DCA Control Register: 219 * Rx DCA Control Register:
@@ -222,11 +222,11 @@ struct ixgbe_thermal_sensor_data {
222 * 64-127: 0x0D00C + (n-64)*0x40 222 * 64-127: 0x0D00C + (n-64)*0x40
223 */ 223 */
224#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ 224#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
225 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ 225 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
226 (0x0D00C + (((_i) - 64) * 0x40)))) 226 (0x0D00C + (((_i) - 64) * 0x40))))
227#define IXGBE_RDRXCTL 0x02F00 227#define IXGBE_RDRXCTL 0x02F00
228#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) 228#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
229 /* 8 of these 0x03C00 - 0x03C1C */ 229 /* 8 of these 0x03C00 - 0x03C1C */
230#define IXGBE_RXCTRL 0x03000 230#define IXGBE_RXCTRL 0x03000
231#define IXGBE_DROPEN 0x03D04 231#define IXGBE_DROPEN 0x03D04
232#define IXGBE_RXPBSIZE_SHIFT 10 232#define IXGBE_RXPBSIZE_SHIFT 10
@@ -239,14 +239,14 @@ struct ixgbe_thermal_sensor_data {
239/* Multicast Table Array - 128 entries */ 239/* Multicast Table Array - 128 entries */
240#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) 240#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
241#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ 241#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
242 (0x0A200 + ((_i) * 8))) 242 (0x0A200 + ((_i) * 8)))
243#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ 243#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
244 (0x0A204 + ((_i) * 8))) 244 (0x0A204 + ((_i) * 8)))
245#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) 245#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
246#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) 246#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
247/* Packet split receive type */ 247/* Packet split receive type */
248#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ 248#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
249 (0x0EA00 + ((_i) * 4))) 249 (0x0EA00 + ((_i) * 4)))
250/* array of 4096 1-bit vlan filters */ 250/* array of 4096 1-bit vlan filters */
251#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) 251#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
252/*array of 4096 4-bit vlan vmdq indices */ 252/*array of 4096 4-bit vlan vmdq indices */
@@ -696,7 +696,7 @@ struct ixgbe_thermal_sensor_data {
696 696
697#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) 697#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
698#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ 698#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
699 (0x08600 + ((_i) * 4))) 699 (0x08600 + ((_i) * 4)))
700#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) 700#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
701 701
702#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ 702#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
@@ -820,7 +820,7 @@ struct ixgbe_thermal_sensor_data {
820#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 820#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
821#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 821#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
822#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ 822#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
823 IXGBE_GCR_EXT_VT_MODE_64) 823 IXGBE_GCR_EXT_VT_MODE_64)
824 824
825/* Time Sync Registers */ 825/* Time Sync Registers */
826#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ 826#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
@@ -1396,10 +1396,10 @@ enum {
1396#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 1396#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
1397 1397
1398#define IXGBE_EIMS_ENABLE_MASK ( \ 1398#define IXGBE_EIMS_ENABLE_MASK ( \
1399 IXGBE_EIMS_RTX_QUEUE | \ 1399 IXGBE_EIMS_RTX_QUEUE | \
1400 IXGBE_EIMS_LSC | \ 1400 IXGBE_EIMS_LSC | \
1401 IXGBE_EIMS_TCP_TIMER | \ 1401 IXGBE_EIMS_TCP_TIMER | \
1402 IXGBE_EIMS_OTHER) 1402 IXGBE_EIMS_OTHER)
1403 1403
1404/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ 1404/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
1405#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ 1405#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
@@ -2161,18 +2161,18 @@ enum {
2161 2161
2162/* Masks to determine if packets should be dropped due to frame errors */ 2162/* Masks to determine if packets should be dropped due to frame errors */
2163#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ 2163#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
2164 IXGBE_RXD_ERR_CE | \ 2164 IXGBE_RXD_ERR_CE | \
2165 IXGBE_RXD_ERR_LE | \ 2165 IXGBE_RXD_ERR_LE | \
2166 IXGBE_RXD_ERR_PE | \ 2166 IXGBE_RXD_ERR_PE | \
2167 IXGBE_RXD_ERR_OSE | \ 2167 IXGBE_RXD_ERR_OSE | \
2168 IXGBE_RXD_ERR_USE) 2168 IXGBE_RXD_ERR_USE)
2169 2169
2170#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ 2170#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
2171 IXGBE_RXDADV_ERR_CE | \ 2171 IXGBE_RXDADV_ERR_CE | \
2172 IXGBE_RXDADV_ERR_LE | \ 2172 IXGBE_RXDADV_ERR_LE | \
2173 IXGBE_RXDADV_ERR_PE | \ 2173 IXGBE_RXDADV_ERR_PE | \
2174 IXGBE_RXDADV_ERR_OSE | \ 2174 IXGBE_RXDADV_ERR_OSE | \
2175 IXGBE_RXDADV_ERR_USE) 2175 IXGBE_RXDADV_ERR_USE)
2176 2176
2177/* Multicast bit mask */ 2177/* Multicast bit mask */
2178#define IXGBE_MCSTCTRL_MFE 0x4 2178#define IXGBE_MCSTCTRL_MFE 0x4
@@ -2393,9 +2393,9 @@ struct ixgbe_adv_tx_context_desc {
2393#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ 2393#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
2394#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ 2394#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
2395#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ 2395#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
2396 IXGBE_ADVTXD_POPTS_SHIFT) 2396 IXGBE_ADVTXD_POPTS_SHIFT)
2397#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ 2397#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
2398 IXGBE_ADVTXD_POPTS_SHIFT) 2398 IXGBE_ADVTXD_POPTS_SHIFT)
2399#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ 2399#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
2400#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ 2400#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
2401#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ 2401#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
@@ -2435,10 +2435,10 @@ typedef u32 ixgbe_link_speed;
2435#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 2435#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
2436#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 2436#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
2437#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ 2437#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
2438 IXGBE_LINK_SPEED_10GB_FULL) 2438 IXGBE_LINK_SPEED_10GB_FULL)
2439#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ 2439#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
2440 IXGBE_LINK_SPEED_1GB_FULL | \ 2440 IXGBE_LINK_SPEED_1GB_FULL | \
2441 IXGBE_LINK_SPEED_10GB_FULL) 2441 IXGBE_LINK_SPEED_10GB_FULL)
2442 2442
2443 2443
2444/* Physical layer type */ 2444/* Physical layer type */
@@ -2746,7 +2746,7 @@ struct ixgbe_bus_info {
2746/* Flow control parameters */ 2746/* Flow control parameters */
2747struct ixgbe_fc_info { 2747struct ixgbe_fc_info {
2748 u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */ 2748 u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
2749 u32 low_water; /* Flow Control Low-water */ 2749 u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
2750 u16 pause_time; /* Flow Control Pause timer */ 2750 u16 pause_time; /* Flow Control Pause timer */
2751 bool send_xon; /* Flow control send XON */ 2751 bool send_xon; /* Flow control send XON */
2752 bool strict_ieee; /* Strict IEEE mode */ 2752 bool strict_ieee; /* Strict IEEE mode */
@@ -2840,7 +2840,7 @@ struct ixgbe_hw;
2840 2840
2841/* iterator type for walking multicast address lists */ 2841/* iterator type for walking multicast address lists */
2842typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, 2842typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
2843 u32 *vmdq); 2843 u32 *vmdq);
2844 2844
2845/* Function pointer table */ 2845/* Function pointer table */
2846struct ixgbe_eeprom_operations { 2846struct ixgbe_eeprom_operations {
@@ -2887,7 +2887,7 @@ struct ixgbe_mac_operations {
2887 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); 2887 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
2888 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); 2888 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
2889 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, 2889 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
2890 bool *); 2890 bool *);
2891 2891
2892 /* Packet Buffer Manipulation */ 2892 /* Packet Buffer Manipulation */
2893 void (*set_rxpba)(struct ixgbe_hw *, int, u32, int); 2893 void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 188a5974b85c..40dd798e1290 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -81,7 +81,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
81 bool autoneg_wait_to_complete) 81 bool autoneg_wait_to_complete)
82{ 82{
83 return hw->phy.ops.setup_link_speed(hw, speed, 83 return hw->phy.ops.setup_link_speed(hw, speed,
84 autoneg_wait_to_complete); 84 autoneg_wait_to_complete);
85} 85}
86 86
87/** 87/**
@@ -155,7 +155,7 @@ mac_reset_top:
155 /* Add the SAN MAC address to the RAR only if it's a valid address */ 155 /* Add the SAN MAC address to the RAR only if it's a valid address */
156 if (is_valid_ether_addr(hw->mac.san_addr)) { 156 if (is_valid_ether_addr(hw->mac.san_addr)) {
157 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 157 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
158 hw->mac.san_addr, 0, IXGBE_RAH_AV); 158 hw->mac.san_addr, 0, IXGBE_RAH_AV);
159 159
160 /* Save the SAN MAC RAR index */ 160 /* Save the SAN MAC RAR index */
161 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; 161 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -166,7 +166,7 @@ mac_reset_top:
166 166
167 /* Store the alternative WWNN/WWPN prefix */ 167 /* Store the alternative WWNN/WWPN prefix */
168 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 168 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
169 &hw->mac.wwpn_prefix); 169 &hw->mac.wwpn_prefix);
170 170
171reset_hw_out: 171reset_hw_out:
172 return status; 172 return status;
@@ -237,9 +237,9 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
237 237
238 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 238 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
239 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 239 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
240 IXGBE_EEC_SIZE_SHIFT); 240 IXGBE_EEC_SIZE_SHIFT);
241 eeprom->word_size = 1 << (eeprom_size + 241 eeprom->word_size = 1 << (eeprom_size +
242 IXGBE_EEPROM_WORD_SIZE_SHIFT); 242 IXGBE_EEPROM_WORD_SIZE_SHIFT);
243 243
244 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", 244 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
245 eeprom->type, eeprom->word_size); 245 eeprom->type, eeprom->word_size);
@@ -712,8 +712,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
712 udelay(50); 712 udelay(50);
713 } 713 }
714 } else { 714 } else {
715 hw_dbg(hw, "Software semaphore SMBI between device drivers " 715 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
716 "not granted.\n");
717 } 716 }
718 717
719 return status; 718 return status;
@@ -813,7 +812,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
813 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, 812 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
814 .get_media_type = &ixgbe_get_media_type_X540, 813 .get_media_type = &ixgbe_get_media_type_X540,
815 .get_supported_physical_layer = 814 .get_supported_physical_layer =
816 &ixgbe_get_supported_physical_layer_X540, 815 &ixgbe_get_supported_physical_layer_X540,
817 .enable_rx_dma = &ixgbe_enable_rx_dma_generic, 816 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
818 .get_mac_addr = &ixgbe_get_mac_addr_generic, 817 .get_mac_addr = &ixgbe_get_mac_addr_generic,
819 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, 818 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 1baecb60f065..d420f124633f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -135,8 +135,8 @@ static int ixgbevf_get_settings(struct net_device *netdev,
135 ethtool_cmd_speed_set(ecmd, speed); 135 ethtool_cmd_speed_set(ecmd, speed);
136 ecmd->duplex = DUPLEX_FULL; 136 ecmd->duplex = DUPLEX_FULL;
137 } else { 137 } else {
138 ethtool_cmd_speed_set(ecmd, -1); 138 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
139 ecmd->duplex = -1; 139 ecmd->duplex = DUPLEX_UNKNOWN;
140 } 140 }
141 141
142 return 0; 142 return 0;
@@ -813,5 +813,5 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
813 813
814void ixgbevf_set_ethtool_ops(struct net_device *netdev) 814void ixgbevf_set_ethtool_ops(struct net_device *netdev)
815{ 815{
816 SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops); 816 netdev->ethtool_ops = &ixgbevf_ethtool_ops;
817} 817}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index de2793b06305..75467f83772c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -85,7 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
85MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 85MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86 86
87MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 87MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 88MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
89MODULE_LICENSE("GPL"); 89MODULE_LICENSE("GPL");
90MODULE_VERSION(DRV_VERSION); 90MODULE_VERSION(DRV_VERSION);
91 91
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b7b8d74c22d9..b151a949f352 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -42,6 +42,7 @@
42#include <linux/dma-mapping.h> 42#include <linux/dma-mapping.h>
43#include <linux/in.h> 43#include <linux/in.h>
44#include <linux/ip.h> 44#include <linux/ip.h>
45#include <net/tso.h>
45#include <linux/tcp.h> 46#include <linux/tcp.h>
46#include <linux/udp.h> 47#include <linux/udp.h>
47#include <linux/etherdevice.h> 48#include <linux/etherdevice.h>
@@ -179,10 +180,18 @@ static char mv643xx_eth_driver_version[] = "1.4";
179 * Misc definitions. 180 * Misc definitions.
180 */ 181 */
181#define DEFAULT_RX_QUEUE_SIZE 128 182#define DEFAULT_RX_QUEUE_SIZE 128
182#define DEFAULT_TX_QUEUE_SIZE 256 183#define DEFAULT_TX_QUEUE_SIZE 512
183#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) 184#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
184 185
186#define TSO_HEADER_SIZE 128
185 187
188/* Max number of allowed TCP segments for software TSO */
189#define MV643XX_MAX_TSO_SEGS 100
190#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
191
192#define IS_TSO_HEADER(txq, addr) \
193 ((addr >= txq->tso_hdrs_dma) && \
194 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
186/* 195/*
187 * RX/TX descriptors. 196 * RX/TX descriptors.
188 */ 197 */
@@ -250,6 +259,7 @@ struct tx_desc {
250#define GEN_TCP_UDP_CHECKSUM 0x00020000 259#define GEN_TCP_UDP_CHECKSUM 0x00020000
251#define UDP_FRAME 0x00010000 260#define UDP_FRAME 0x00010000
252#define MAC_HDR_EXTRA_4_BYTES 0x00008000 261#define MAC_HDR_EXTRA_4_BYTES 0x00008000
262#define GEN_TCP_UDP_CHK_FULL 0x00000400
253#define MAC_HDR_EXTRA_8_BYTES 0x00000200 263#define MAC_HDR_EXTRA_8_BYTES 0x00000200
254 264
255#define TX_IHL_SHIFT 11 265#define TX_IHL_SHIFT 11
@@ -345,6 +355,12 @@ struct tx_queue {
345 int tx_curr_desc; 355 int tx_curr_desc;
346 int tx_used_desc; 356 int tx_used_desc;
347 357
358 int tx_stop_threshold;
359 int tx_wake_threshold;
360
361 char *tso_hdrs;
362 dma_addr_t tso_hdrs_dma;
363
348 struct tx_desc *tx_desc_area; 364 struct tx_desc *tx_desc_area;
349 dma_addr_t tx_desc_dma; 365 dma_addr_t tx_desc_dma;
350 int tx_desc_area_size; 366 int tx_desc_area_size;
@@ -491,7 +507,7 @@ static void txq_maybe_wake(struct tx_queue *txq)
491 507
492 if (netif_tx_queue_stopped(nq)) { 508 if (netif_tx_queue_stopped(nq)) {
493 __netif_tx_lock(nq, smp_processor_id()); 509 __netif_tx_lock(nq, smp_processor_id());
494 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) 510 if (txq->tx_desc_count <= txq->tx_wake_threshold)
495 netif_tx_wake_queue(nq); 511 netif_tx_wake_queue(nq);
496 __netif_tx_unlock(nq); 512 __netif_tx_unlock(nq);
497 } 513 }
@@ -661,6 +677,198 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
661 return 0; 677 return 0;
662} 678}
663 679
680static inline __be16 sum16_as_be(__sum16 sum)
681{
682 return (__force __be16)sum;
683}
684
685static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
686 u16 *l4i_chk, u32 *command, int length)
687{
688 int ret;
689 u32 cmd = 0;
690
691 if (skb->ip_summed == CHECKSUM_PARTIAL) {
692 int hdr_len;
693 int tag_bytes;
694
695 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
696 skb->protocol != htons(ETH_P_8021Q));
697
698 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
699 tag_bytes = hdr_len - ETH_HLEN;
700
701 if (length - hdr_len > mp->shared->tx_csum_limit ||
702 unlikely(tag_bytes & ~12)) {
703 ret = skb_checksum_help(skb);
704 if (!ret)
705 goto no_csum;
706 return ret;
707 }
708
709 if (tag_bytes & 4)
710 cmd |= MAC_HDR_EXTRA_4_BYTES;
711 if (tag_bytes & 8)
712 cmd |= MAC_HDR_EXTRA_8_BYTES;
713
714 cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
715 GEN_IP_V4_CHECKSUM |
716 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
717
718 /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
719 * it seems we don't need to pass the initial checksum. */
720 switch (ip_hdr(skb)->protocol) {
721 case IPPROTO_UDP:
722 cmd |= UDP_FRAME;
723 *l4i_chk = 0;
724 break;
725 case IPPROTO_TCP:
726 *l4i_chk = 0;
727 break;
728 default:
729 WARN(1, "protocol not supported");
730 }
731 } else {
732no_csum:
733 /* Errata BTS #50, IHL must be 5 if no HW checksum */
734 cmd |= 5 << TX_IHL_SHIFT;
735 }
736 *command = cmd;
737 return 0;
738}
739
740static inline int
741txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
742 struct sk_buff *skb, char *data, int length,
743 bool last_tcp, bool is_last)
744{
745 int tx_index;
746 u32 cmd_sts;
747 struct tx_desc *desc;
748
749 tx_index = txq->tx_curr_desc++;
750 if (txq->tx_curr_desc == txq->tx_ring_size)
751 txq->tx_curr_desc = 0;
752 desc = &txq->tx_desc_area[tx_index];
753
754 desc->l4i_chk = 0;
755 desc->byte_cnt = length;
756 desc->buf_ptr = dma_map_single(dev->dev.parent, data,
757 length, DMA_TO_DEVICE);
758 if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
759 WARN(1, "dma_map_single failed!\n");
760 return -ENOMEM;
761 }
762
763 cmd_sts = BUFFER_OWNED_BY_DMA;
764 if (last_tcp) {
765 /* last descriptor in the TCP packet */
766 cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
767 /* last descriptor in SKB */
768 if (is_last)
769 cmd_sts |= TX_ENABLE_INTERRUPT;
770 }
771 desc->cmd_sts = cmd_sts;
772 return 0;
773}
774
775static inline void
776txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
777{
778 struct mv643xx_eth_private *mp = txq_to_mp(txq);
779 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
780 int tx_index;
781 struct tx_desc *desc;
782 int ret;
783 u32 cmd_csum = 0;
784 u16 l4i_chk = 0;
785
786 tx_index = txq->tx_curr_desc;
787 desc = &txq->tx_desc_area[tx_index];
788
789 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
790 if (ret)
791 WARN(1, "failed to prepare checksum!");
792
793 /* Should we set this? Can't use the value from skb_tx_csum()
794 * as it's not the correct initial L4 checksum to use. */
795 desc->l4i_chk = 0;
796
797 desc->byte_cnt = hdr_len;
798 desc->buf_ptr = txq->tso_hdrs_dma +
799 txq->tx_curr_desc * TSO_HEADER_SIZE;
800 desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
801 GEN_CRC;
802
803 txq->tx_curr_desc++;
804 if (txq->tx_curr_desc == txq->tx_ring_size)
805 txq->tx_curr_desc = 0;
806}
807
808static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
809 struct net_device *dev)
810{
811 struct mv643xx_eth_private *mp = txq_to_mp(txq);
812 int total_len, data_left, ret;
813 int desc_count = 0;
814 struct tso_t tso;
815 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
816
817 /* Count needed descriptors */
818 if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
819 netdev_dbg(dev, "not enough descriptors for TSO!\n");
820 return -EBUSY;
821 }
822
823 /* Initialize the TSO handler, and prepare the first payload */
824 tso_start(skb, &tso);
825
826 total_len = skb->len - hdr_len;
827 while (total_len > 0) {
828 char *hdr;
829
830 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
831 total_len -= data_left;
832 desc_count++;
833
834 /* prepare packet headers: MAC + IP + TCP */
835 hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
836 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
837 txq_put_hdr_tso(skb, txq, data_left);
838
839 while (data_left > 0) {
840 int size;
841 desc_count++;
842
843 size = min_t(int, tso.size, data_left);
844 ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
845 size == data_left,
846 total_len == 0);
847 if (ret)
848 goto err_release;
849 data_left -= size;
850 tso_build_data(skb, &tso, size);
851 }
852 }
853
854 __skb_queue_tail(&txq->tx_skb, skb);
855 skb_tx_timestamp(skb);
856
857 /* clear TX_END status */
858 mp->work_tx_end &= ~(1 << txq->index);
859
860 /* ensure all descriptors are written before poking hardware */
861 wmb();
862 txq_enable(txq);
863 txq->tx_desc_count += desc_count;
864 return 0;
865err_release:
866 /* TODO: Release all used data descriptors; header descriptors must not
867 * be DMA-unmapped.
868 */
869 return ret;
870}
871
664static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) 872static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
665{ 873{
666 struct mv643xx_eth_private *mp = txq_to_mp(txq); 874 struct mv643xx_eth_private *mp = txq_to_mp(txq);
@@ -671,8 +879,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
671 skb_frag_t *this_frag; 879 skb_frag_t *this_frag;
672 int tx_index; 880 int tx_index;
673 struct tx_desc *desc; 881 struct tx_desc *desc;
882 void *addr;
674 883
675 this_frag = &skb_shinfo(skb)->frags[frag]; 884 this_frag = &skb_shinfo(skb)->frags[frag];
885 addr = page_address(this_frag->page.p) + this_frag->page_offset;
676 tx_index = txq->tx_curr_desc++; 886 tx_index = txq->tx_curr_desc++;
677 if (txq->tx_curr_desc == txq->tx_ring_size) 887 if (txq->tx_curr_desc == txq->tx_ring_size)
678 txq->tx_curr_desc = 0; 888 txq->tx_curr_desc = 0;
@@ -692,19 +902,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
692 902
693 desc->l4i_chk = 0; 903 desc->l4i_chk = 0;
694 desc->byte_cnt = skb_frag_size(this_frag); 904 desc->byte_cnt = skb_frag_size(this_frag);
695 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, 905 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
696 this_frag, 0, 906 desc->byte_cnt, DMA_TO_DEVICE);
697 skb_frag_size(this_frag),
698 DMA_TO_DEVICE);
699 } 907 }
700} 908}
701 909
702static inline __be16 sum16_as_be(__sum16 sum) 910static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
703{ 911 struct net_device *dev)
704 return (__force __be16)sum;
705}
706
707static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
708{ 912{
709 struct mv643xx_eth_private *mp = txq_to_mp(txq); 913 struct mv643xx_eth_private *mp = txq_to_mp(txq);
710 int nr_frags = skb_shinfo(skb)->nr_frags; 914 int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -712,54 +916,22 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
712 struct tx_desc *desc; 916 struct tx_desc *desc;
713 u32 cmd_sts; 917 u32 cmd_sts;
714 u16 l4i_chk; 918 u16 l4i_chk;
715 int length; 919 int length, ret;
716 920
717 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 921 cmd_sts = 0;
718 l4i_chk = 0; 922 l4i_chk = 0;
719 923
720 if (skb->ip_summed == CHECKSUM_PARTIAL) { 924 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
721 int hdr_len; 925 if (net_ratelimit())
722 int tag_bytes; 926 netdev_err(dev, "tx queue full?!\n");
723 927 return -EBUSY;
724 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
725 skb->protocol != htons(ETH_P_8021Q));
726
727 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
728 tag_bytes = hdr_len - ETH_HLEN;
729 if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
730 unlikely(tag_bytes & ~12)) {
731 if (skb_checksum_help(skb) == 0)
732 goto no_csum;
733 dev_kfree_skb_any(skb);
734 return 1;
735 }
736
737 if (tag_bytes & 4)
738 cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
739 if (tag_bytes & 8)
740 cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
741
742 cmd_sts |= GEN_TCP_UDP_CHECKSUM |
743 GEN_IP_V4_CHECKSUM |
744 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
745
746 switch (ip_hdr(skb)->protocol) {
747 case IPPROTO_UDP:
748 cmd_sts |= UDP_FRAME;
749 l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
750 break;
751 case IPPROTO_TCP:
752 l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
753 break;
754 default:
755 BUG();
756 }
757 } else {
758no_csum:
759 /* Errata BTS #50, IHL must be 5 if no HW checksum */
760 cmd_sts |= 5 << TX_IHL_SHIFT;
761 } 928 }
762 929
930 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
931 if (ret)
932 return ret;
933 cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
934
763 tx_index = txq->tx_curr_desc++; 935 tx_index = txq->tx_curr_desc++;
764 if (txq->tx_curr_desc == txq->tx_ring_size) 936 if (txq->tx_curr_desc == txq->tx_ring_size)
765 txq->tx_curr_desc = 0; 937 txq->tx_curr_desc = 0;
@@ -801,7 +973,7 @@ no_csum:
801static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 973static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
802{ 974{
803 struct mv643xx_eth_private *mp = netdev_priv(dev); 975 struct mv643xx_eth_private *mp = netdev_priv(dev);
804 int length, queue; 976 int length, queue, ret;
805 struct tx_queue *txq; 977 struct tx_queue *txq;
806 struct netdev_queue *nq; 978 struct netdev_queue *nq;
807 979
@@ -810,30 +982,26 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
810 nq = netdev_get_tx_queue(dev, queue); 982 nq = netdev_get_tx_queue(dev, queue);
811 983
812 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 984 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
813 txq->tx_dropped++;
814 netdev_printk(KERN_DEBUG, dev, 985 netdev_printk(KERN_DEBUG, dev,
815 "failed to linearize skb with tiny unaligned fragment\n"); 986 "failed to linearize skb with tiny unaligned fragment\n");
816 return NETDEV_TX_BUSY; 987 return NETDEV_TX_BUSY;
817 } 988 }
818 989
819 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
820 if (net_ratelimit())
821 netdev_err(dev, "tx queue full?!\n");
822 dev_kfree_skb_any(skb);
823 return NETDEV_TX_OK;
824 }
825
826 length = skb->len; 990 length = skb->len;
827 991
828 if (!txq_submit_skb(txq, skb)) { 992 if (skb_is_gso(skb))
829 int entries_left; 993 ret = txq_submit_tso(txq, skb, dev);
830 994 else
995 ret = txq_submit_skb(txq, skb, dev);
996 if (!ret) {
831 txq->tx_bytes += length; 997 txq->tx_bytes += length;
832 txq->tx_packets++; 998 txq->tx_packets++;
833 999
834 entries_left = txq->tx_ring_size - txq->tx_desc_count; 1000 if (txq->tx_desc_count >= txq->tx_stop_threshold)
835 if (entries_left < MAX_SKB_FRAGS + 1)
836 netif_tx_stop_queue(nq); 1001 netif_tx_stop_queue(nq);
1002 } else {
1003 txq->tx_dropped++;
1004 dev_kfree_skb_any(skb);
837 } 1005 }
838 1006
839 return NETDEV_TX_OK; 1007 return NETDEV_TX_OK;
@@ -907,14 +1075,9 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
907 mp->dev->stats.tx_errors++; 1075 mp->dev->stats.tx_errors++;
908 } 1076 }
909 1077
910 if (cmd_sts & TX_FIRST_DESC) { 1078 if (!IS_TSO_HEADER(txq, desc->buf_ptr))
911 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, 1079 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
912 desc->byte_cnt, DMA_TO_DEVICE); 1080 desc->byte_cnt, DMA_TO_DEVICE);
913 } else {
914 dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
915 desc->byte_cnt, DMA_TO_DEVICE);
916 }
917
918 dev_kfree_skb(skb); 1081 dev_kfree_skb(skb);
919 } 1082 }
920 1083
@@ -1010,8 +1173,9 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1010 1173
1011 1174
1012/* mii management interface *************************************************/ 1175/* mii management interface *************************************************/
1013static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp) 1176static void mv643xx_eth_adjust_link(struct net_device *dev)
1014{ 1177{
1178 struct mv643xx_eth_private *mp = netdev_priv(dev);
1015 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 1179 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1016 u32 autoneg_disable = FORCE_LINK_PASS | 1180 u32 autoneg_disable = FORCE_LINK_PASS |
1017 DISABLE_AUTO_NEG_SPEED_GMII | 1181 DISABLE_AUTO_NEG_SPEED_GMII |
@@ -1387,7 +1551,7 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1387 1551
1388 ret = phy_ethtool_sset(mp->phy, cmd); 1552 ret = phy_ethtool_sset(mp->phy, cmd);
1389 if (!ret) 1553 if (!ret)
1390 mv643xx_adjust_pscr(mp); 1554 mv643xx_eth_adjust_link(dev);
1391 return ret; 1555 return ret;
1392} 1556}
1393 1557
@@ -1456,7 +1620,11 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1456 return -EINVAL; 1620 return -EINVAL;
1457 1621
1458 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; 1622 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
1459 mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; 1623 mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
1624 MV643XX_MAX_SKB_DESCS * 2, 4096);
1625 if (mp->tx_ring_size != er->tx_pending)
1626 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
1627 mp->tx_ring_size, er->tx_pending);
1460 1628
1461 if (netif_running(dev)) { 1629 if (netif_running(dev)) {
1462 mv643xx_eth_stop(dev); 1630 mv643xx_eth_stop(dev);
@@ -1832,6 +2000,13 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1832 2000
1833 txq->tx_ring_size = mp->tx_ring_size; 2001 txq->tx_ring_size = mp->tx_ring_size;
1834 2002
2003 /* A queue must always have room for at least one skb.
2004 * Therefore, stop the queue when the free entries reaches
2005 * the maximum number of descriptors per skb.
2006 */
2007 txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
2008 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2009
1835 txq->tx_desc_count = 0; 2010 txq->tx_desc_count = 0;
1836 txq->tx_curr_desc = 0; 2011 txq->tx_curr_desc = 0;
1837 txq->tx_used_desc = 0; 2012 txq->tx_used_desc = 0;
@@ -1871,6 +2046,15 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1871 nexti * sizeof(struct tx_desc); 2046 nexti * sizeof(struct tx_desc);
1872 } 2047 }
1873 2048
2049 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2050 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2051 txq->tx_ring_size * TSO_HEADER_SIZE,
2052 &txq->tso_hdrs_dma, GFP_KERNEL);
2053 if (txq->tso_hdrs == NULL) {
2054 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2055 txq->tx_desc_area, txq->tx_desc_dma);
2056 return -ENOMEM;
2057 }
1874 skb_queue_head_init(&txq->tx_skb); 2058 skb_queue_head_init(&txq->tx_skb);
1875 2059
1876 return 0; 2060 return 0;
@@ -1891,6 +2075,10 @@ static void txq_deinit(struct tx_queue *txq)
1891 else 2075 else
1892 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2076 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
1893 txq->tx_desc_area, txq->tx_desc_dma); 2077 txq->tx_desc_area, txq->tx_desc_dma);
2078 if (txq->tso_hdrs)
2079 dma_free_coherent(mp->dev->dev.parent,
2080 txq->tx_ring_size * TSO_HEADER_SIZE,
2081 txq->tso_hdrs, txq->tso_hdrs_dma);
1894} 2082}
1895 2083
1896 2084
@@ -2303,7 +2491,7 @@ static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2303 2491
2304 ret = phy_mii_ioctl(mp->phy, ifr, cmd); 2492 ret = phy_mii_ioctl(mp->phy, ifr, cmd);
2305 if (!ret) 2493 if (!ret)
2306 mv643xx_adjust_pscr(mp); 2494 mv643xx_eth_adjust_link(dev);
2307 return ret; 2495 return ret;
2308} 2496}
2309 2497
@@ -2678,6 +2866,7 @@ static void set_params(struct mv643xx_eth_private *mp,
2678 struct mv643xx_eth_platform_data *pd) 2866 struct mv643xx_eth_platform_data *pd)
2679{ 2867{
2680 struct net_device *dev = mp->dev; 2868 struct net_device *dev = mp->dev;
2869 unsigned int tx_ring_size;
2681 2870
2682 if (is_valid_ether_addr(pd->mac_addr)) 2871 if (is_valid_ether_addr(pd->mac_addr))
2683 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 2872 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
@@ -2692,22 +2881,22 @@ static void set_params(struct mv643xx_eth_private *mp,
2692 2881
2693 mp->rxq_count = pd->rx_queue_count ? : 1; 2882 mp->rxq_count = pd->rx_queue_count ? : 1;
2694 2883
2695 mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2884 tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2696 if (pd->tx_queue_size) 2885 if (pd->tx_queue_size)
2697 mp->tx_ring_size = pd->tx_queue_size; 2886 tx_ring_size = pd->tx_queue_size;
2887
2888 mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
2889 MV643XX_MAX_SKB_DESCS * 2, 4096);
2890 if (mp->tx_ring_size != tx_ring_size)
2891 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2892 mp->tx_ring_size, tx_ring_size);
2893
2698 mp->tx_desc_sram_addr = pd->tx_sram_addr; 2894 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2699 mp->tx_desc_sram_size = pd->tx_sram_size; 2895 mp->tx_desc_sram_size = pd->tx_sram_size;
2700 2896
2701 mp->txq_count = pd->tx_queue_count ? : 1; 2897 mp->txq_count = pd->tx_queue_count ? : 1;
2702} 2898}
2703 2899
2704static void mv643xx_eth_adjust_link(struct net_device *dev)
2705{
2706 struct mv643xx_eth_private *mp = netdev_priv(dev);
2707
2708 mv643xx_adjust_pscr(mp);
2709}
2710
2711static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2900static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2712 int phy_addr) 2901 int phy_addr)
2713{ 2902{
@@ -2889,7 +3078,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2889 if (err) 3078 if (err)
2890 goto out; 3079 goto out;
2891 3080
2892 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 3081 dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
2893 3082
2894 init_pscr(mp, pd->speed, pd->duplex); 3083 init_pscr(mp, pd->speed, pd->duplex);
2895 3084
@@ -2921,11 +3110,14 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2921 dev->watchdog_timeo = 2 * HZ; 3110 dev->watchdog_timeo = 2 * HZ;
2922 dev->base_addr = 0; 3111 dev->base_addr = 0;
2923 3112
2924 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 3113 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2925 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 3114 dev->vlan_features = dev->features;
2926 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 3115
3116 dev->features |= NETIF_F_RXCSUM;
3117 dev->hw_features = dev->features;
2927 3118
2928 dev->priv_flags |= IFF_UNICAST_FLT; 3119 dev->priv_flags |= IFF_UNICAST_FLT;
3120 dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
2929 3121
2930 SET_NETDEV_DEV(dev, &pdev->dev); 3122 SET_NETDEV_DEV(dev, &pdev->dev);
2931 3123
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 9d5ced263a5e..fc2fb25343f4 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -195,11 +195,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
195 return -ENODEV; 195 return -ENODEV;
196 } 196 }
197 197
198 bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev)); 198 bus = devm_mdiobus_alloc_size(&pdev->dev,
199 if (!bus) { 199 sizeof(struct orion_mdio_dev));
200 dev_err(&pdev->dev, "Cannot allocate MDIO bus\n"); 200 if (!bus)
201 return -ENOMEM; 201 return -ENOMEM;
202 }
203 202
204 bus->name = "orion_mdio_bus"; 203 bus->name = "orion_mdio_bus";
205 bus->read = orion_mdio_read; 204 bus->read = orion_mdio_read;
@@ -208,11 +207,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
208 dev_name(&pdev->dev)); 207 dev_name(&pdev->dev));
209 bus->parent = &pdev->dev; 208 bus->parent = &pdev->dev;
210 209
211 bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 210 bus->irq = devm_kmalloc_array(&pdev->dev, PHY_MAX_ADDR, sizeof(int),
212 if (!bus->irq) { 211 GFP_KERNEL);
213 mdiobus_free(bus); 212 if (!bus->irq)
214 return -ENOMEM; 213 return -ENOMEM;
215 }
216 214
217 for (i = 0; i < PHY_MAX_ADDR; i++) 215 for (i = 0; i < PHY_MAX_ADDR; i++)
218 bus->irq[i] = PHY_POLL; 216 bus->irq[i] = PHY_POLL;
@@ -264,8 +262,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
264out_mdio: 262out_mdio:
265 if (!IS_ERR(dev->clk)) 263 if (!IS_ERR(dev->clk))
266 clk_disable_unprepare(dev->clk); 264 clk_disable_unprepare(dev->clk);
267 kfree(bus->irq);
268 mdiobus_free(bus);
269 return ret; 265 return ret;
270} 266}
271 267
@@ -276,8 +272,6 @@ static int orion_mdio_remove(struct platform_device *pdev)
276 272
277 writel(0, dev->regs + MVMDIO_ERR_INT_MASK); 273 writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
278 mdiobus_unregister(bus); 274 mdiobus_unregister(bus);
279 kfree(bus->irq);
280 mdiobus_free(bus);
281 if (!IS_ERR(dev->clk)) 275 if (!IS_ERR(dev->clk))
282 clk_disable_unprepare(dev->clk); 276 clk_disable_unprepare(dev->clk);
283 277
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 14786c8bf99e..45beca17fa50 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -23,6 +23,7 @@
23#include <net/ip.h> 23#include <net/ip.h>
24#include <net/ipv6.h> 24#include <net/ipv6.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <net/tso.h>
26#include <linux/of.h> 27#include <linux/of.h>
27#include <linux/of_irq.h> 28#include <linux/of_irq.h>
28#include <linux/of_mdio.h> 29#include <linux/of_mdio.h>
@@ -218,9 +219,6 @@
218#define MVNETA_RX_COAL_PKTS 32 219#define MVNETA_RX_COAL_PKTS 32
219#define MVNETA_RX_COAL_USEC 100 220#define MVNETA_RX_COAL_USEC 100
220 221
221/* Napi polling weight */
222#define MVNETA_RX_POLL_WEIGHT 64
223
224/* The two bytes Marvell header. Either contains a special value used 222/* The two bytes Marvell header. Either contains a special value used
225 * by Marvell switches when a specific hardware mode is enabled (not 223 * by Marvell switches when a specific hardware mode is enabled (not
226 * supported by this driver) or is filled automatically by zeroes on 224 * supported by this driver) or is filled automatically by zeroes on
@@ -244,12 +242,20 @@
244 242
245#define MVNETA_TX_MTU_MAX 0x3ffff 243#define MVNETA_TX_MTU_MAX 0x3ffff
246 244
245/* TSO header size */
246#define TSO_HEADER_SIZE 128
247
247/* Max number of Rx descriptors */ 248/* Max number of Rx descriptors */
248#define MVNETA_MAX_RXD 128 249#define MVNETA_MAX_RXD 128
249 250
250/* Max number of Tx descriptors */ 251/* Max number of Tx descriptors */
251#define MVNETA_MAX_TXD 532 252#define MVNETA_MAX_TXD 532
252 253
254/* Max number of allowed TCP segments for software TSO */
255#define MVNETA_MAX_TSO_SEGS 100
256
257#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
258
253/* descriptor aligned size */ 259/* descriptor aligned size */
254#define MVNETA_DESC_ALIGNED_SIZE 32 260#define MVNETA_DESC_ALIGNED_SIZE 32
255 261
@@ -258,6 +264,10 @@
258 ETH_HLEN + ETH_FCS_LEN, \ 264 ETH_HLEN + ETH_FCS_LEN, \
259 MVNETA_CPU_D_CACHE_LINE_SIZE) 265 MVNETA_CPU_D_CACHE_LINE_SIZE)
260 266
267#define IS_TSO_HEADER(txq, addr) \
268 ((addr >= txq->tso_hdrs_phys) && \
269 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
270
261#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 271#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
262 272
263struct mvneta_pcpu_stats { 273struct mvneta_pcpu_stats {
@@ -279,9 +289,6 @@ struct mvneta_port {
279 u32 cause_rx_tx; 289 u32 cause_rx_tx;
280 struct napi_struct napi; 290 struct napi_struct napi;
281 291
282 /* Napi weight */
283 int weight;
284
285 /* Core clock */ 292 /* Core clock */
286 struct clk *clk; 293 struct clk *clk;
287 u8 mcast_count[256]; 294 u8 mcast_count[256];
@@ -390,6 +397,8 @@ struct mvneta_tx_queue {
390 * descriptor ring 397 * descriptor ring
391 */ 398 */
392 int count; 399 int count;
400 int tx_stop_threshold;
401 int tx_wake_threshold;
393 402
394 /* Array of transmitted skb */ 403 /* Array of transmitted skb */
395 struct sk_buff **tx_skb; 404 struct sk_buff **tx_skb;
@@ -413,6 +422,12 @@ struct mvneta_tx_queue {
413 422
414 /* Index of the next TX DMA descriptor to process */ 423 /* Index of the next TX DMA descriptor to process */
415 int next_desc_to_proc; 424 int next_desc_to_proc;
425
426 /* DMA buffers for TSO headers */
427 char *tso_hdrs;
428
429 /* DMA address of TSO headers */
430 dma_addr_t tso_hdrs_phys;
416}; 431};
417 432
418struct mvneta_rx_queue { 433struct mvneta_rx_queue {
@@ -441,7 +456,10 @@ struct mvneta_rx_queue {
441 int next_desc_to_proc; 456 int next_desc_to_proc;
442}; 457};
443 458
444static int rxq_number = 8; 459/* The hardware supports eight (8) rx queues, but we are only allowing
460 * the first one to be used. Therefore, let's just allocate one queue.
461 */
462static int rxq_number = 1;
445static int txq_number = 8; 463static int txq_number = 8;
446 464
447static int rxq_def; 465static int rxq_def;
@@ -1277,11 +1295,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1277 1295
1278 mvneta_txq_inc_get(txq); 1296 mvneta_txq_inc_get(txq);
1279 1297
1298 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1299 dma_unmap_single(pp->dev->dev.parent,
1300 tx_desc->buf_phys_addr,
1301 tx_desc->data_size, DMA_TO_DEVICE);
1280 if (!skb) 1302 if (!skb)
1281 continue; 1303 continue;
1282
1283 dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
1284 tx_desc->data_size, DMA_TO_DEVICE);
1285 dev_kfree_skb_any(skb); 1304 dev_kfree_skb_any(skb);
1286 } 1305 }
1287} 1306}
@@ -1302,7 +1321,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
1302 txq->count -= tx_done; 1321 txq->count -= tx_done;
1303 1322
1304 if (netif_tx_queue_stopped(nq)) { 1323 if (netif_tx_queue_stopped(nq)) {
1305 if (txq->size - txq->count >= MAX_SKB_FRAGS + 1) 1324 if (txq->count <= txq->tx_wake_threshold)
1306 netif_tx_wake_queue(nq); 1325 netif_tx_wake_queue(nq);
1307 } 1326 }
1308} 1327}
@@ -1519,14 +1538,134 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1519 return rx_done; 1538 return rx_done;
1520} 1539}
1521 1540
1541static inline void
1542mvneta_tso_put_hdr(struct sk_buff *skb,
1543 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
1544{
1545 struct mvneta_tx_desc *tx_desc;
1546 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1547
1548 txq->tx_skb[txq->txq_put_index] = NULL;
1549 tx_desc = mvneta_txq_next_desc_get(txq);
1550 tx_desc->data_size = hdr_len;
1551 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
1552 tx_desc->command |= MVNETA_TXD_F_DESC;
1553 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
1554 txq->txq_put_index * TSO_HEADER_SIZE;
1555 mvneta_txq_inc_put(txq);
1556}
1557
1558static inline int
1559mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
1560 struct sk_buff *skb, char *data, int size,
1561 bool last_tcp, bool is_last)
1562{
1563 struct mvneta_tx_desc *tx_desc;
1564
1565 tx_desc = mvneta_txq_next_desc_get(txq);
1566 tx_desc->data_size = size;
1567 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
1568 size, DMA_TO_DEVICE);
1569 if (unlikely(dma_mapping_error(dev->dev.parent,
1570 tx_desc->buf_phys_addr))) {
1571 mvneta_txq_desc_put(txq);
1572 return -ENOMEM;
1573 }
1574
1575 tx_desc->command = 0;
1576 txq->tx_skb[txq->txq_put_index] = NULL;
1577
1578 if (last_tcp) {
1579 /* last descriptor in the TCP packet */
1580 tx_desc->command = MVNETA_TXD_L_DESC;
1581
1582 /* last descriptor in SKB */
1583 if (is_last)
1584 txq->tx_skb[txq->txq_put_index] = skb;
1585 }
1586 mvneta_txq_inc_put(txq);
1587 return 0;
1588}
1589
1590static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
1591 struct mvneta_tx_queue *txq)
1592{
1593 int total_len, data_left;
1594 int desc_count = 0;
1595 struct mvneta_port *pp = netdev_priv(dev);
1596 struct tso_t tso;
1597 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1598 int i;
1599
1600 /* Count needed descriptors */
1601 if ((txq->count + tso_count_descs(skb)) >= txq->size)
1602 return 0;
1603
1604 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
1605 pr_info("*** Is this even possible???!?!?\n");
1606 return 0;
1607 }
1608
1609 /* Initialize the TSO handler, and prepare the first payload */
1610 tso_start(skb, &tso);
1611
1612 total_len = skb->len - hdr_len;
1613 while (total_len > 0) {
1614 char *hdr;
1615
1616 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1617 total_len -= data_left;
1618 desc_count++;
1619
1620 /* prepare packet headers: MAC + IP + TCP */
1621 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
1622 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1623
1624 mvneta_tso_put_hdr(skb, pp, txq);
1625
1626 while (data_left > 0) {
1627 int size;
1628 desc_count++;
1629
1630 size = min_t(int, tso.size, data_left);
1631
1632 if (mvneta_tso_put_data(dev, txq, skb,
1633 tso.data, size,
1634 size == data_left,
1635 total_len == 0))
1636 goto err_release;
1637 data_left -= size;
1638
1639 tso_build_data(skb, &tso, size);
1640 }
1641 }
1642
1643 return desc_count;
1644
1645err_release:
1646 /* Release all used data descriptors; header descriptors must not
1647 * be DMA-unmapped.
1648 */
1649 for (i = desc_count - 1; i >= 0; i--) {
1650 struct mvneta_tx_desc *tx_desc = txq->descs + i;
1651 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1652 dma_unmap_single(pp->dev->dev.parent,
1653 tx_desc->buf_phys_addr,
1654 tx_desc->data_size,
1655 DMA_TO_DEVICE);
1656 mvneta_txq_desc_put(txq);
1657 }
1658 return 0;
1659}
1660
1522/* Handle tx fragmentation processing */ 1661/* Handle tx fragmentation processing */
1523static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, 1662static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1524 struct mvneta_tx_queue *txq) 1663 struct mvneta_tx_queue *txq)
1525{ 1664{
1526 struct mvneta_tx_desc *tx_desc; 1665 struct mvneta_tx_desc *tx_desc;
1527 int i; 1666 int i, nr_frags = skb_shinfo(skb)->nr_frags;
1528 1667
1529 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1668 for (i = 0; i < nr_frags; i++) {
1530 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1669 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1531 void *addr = page_address(frag->page.p) + frag->page_offset; 1670 void *addr = page_address(frag->page.p) + frag->page_offset;
1532 1671
@@ -1543,20 +1682,16 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1543 goto error; 1682 goto error;
1544 } 1683 }
1545 1684
1546 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 1685 if (i == nr_frags - 1) {
1547 /* Last descriptor */ 1686 /* Last descriptor */
1548 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 1687 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1549
1550 txq->tx_skb[txq->txq_put_index] = skb; 1688 txq->tx_skb[txq->txq_put_index] = skb;
1551
1552 mvneta_txq_inc_put(txq);
1553 } else { 1689 } else {
1554 /* Descriptor in the middle: Not First, Not Last */ 1690 /* Descriptor in the middle: Not First, Not Last */
1555 tx_desc->command = 0; 1691 tx_desc->command = 0;
1556
1557 txq->tx_skb[txq->txq_put_index] = NULL; 1692 txq->tx_skb[txq->txq_put_index] = NULL;
1558 mvneta_txq_inc_put(txq);
1559 } 1693 }
1694 mvneta_txq_inc_put(txq);
1560 } 1695 }
1561 1696
1562 return 0; 1697 return 0;
@@ -1584,15 +1719,18 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1584 u16 txq_id = skb_get_queue_mapping(skb); 1719 u16 txq_id = skb_get_queue_mapping(skb);
1585 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; 1720 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
1586 struct mvneta_tx_desc *tx_desc; 1721 struct mvneta_tx_desc *tx_desc;
1587 struct netdev_queue *nq;
1588 int frags = 0; 1722 int frags = 0;
1589 u32 tx_cmd; 1723 u32 tx_cmd;
1590 1724
1591 if (!netif_running(dev)) 1725 if (!netif_running(dev))
1592 goto out; 1726 goto out;
1593 1727
1728 if (skb_is_gso(skb)) {
1729 frags = mvneta_tx_tso(skb, dev, txq);
1730 goto out;
1731 }
1732
1594 frags = skb_shinfo(skb)->nr_frags + 1; 1733 frags = skb_shinfo(skb)->nr_frags + 1;
1595 nq = netdev_get_tx_queue(dev, txq_id);
1596 1734
1597 /* Get a descriptor for the first part of the packet */ 1735 /* Get a descriptor for the first part of the packet */
1598 tx_desc = mvneta_txq_next_desc_get(txq); 1736 tx_desc = mvneta_txq_next_desc_get(txq);
@@ -1635,15 +1773,16 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1635 } 1773 }
1636 } 1774 }
1637 1775
1638 txq->count += frags;
1639 mvneta_txq_pend_desc_add(pp, txq, frags);
1640
1641 if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
1642 netif_tx_stop_queue(nq);
1643
1644out: 1776out:
1645 if (frags > 0) { 1777 if (frags > 0) {
1646 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1778 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1779 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
1780
1781 txq->count += frags;
1782 mvneta_txq_pend_desc_add(pp, txq, frags);
1783
1784 if (txq->count >= txq->tx_stop_threshold)
1785 netif_tx_stop_queue(nq);
1647 1786
1648 u64_stats_update_begin(&stats->syncp); 1787 u64_stats_update_begin(&stats->syncp);
1649 stats->tx_packets++; 1788 stats->tx_packets++;
@@ -2003,7 +2142,7 @@ static void mvneta_tx_reset(struct mvneta_port *pp)
2003{ 2142{
2004 int queue; 2143 int queue;
2005 2144
2006 /* free the skb's in the hal tx ring */ 2145 /* free the skb's in the tx ring */
2007 for (queue = 0; queue < txq_number; queue++) 2146 for (queue = 0; queue < txq_number; queue++)
2008 mvneta_txq_done_force(pp, &pp->txqs[queue]); 2147 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2009 2148
@@ -2081,6 +2220,14 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2081{ 2220{
2082 txq->size = pp->tx_ring_size; 2221 txq->size = pp->tx_ring_size;
2083 2222
2223 /* A queue must always have room for at least one skb.
2224 * Therefore, stop the queue when the free entries reaches
2225 * the maximum number of descriptors per skb.
2226 */
2227 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2228 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2229
2230
2084 /* Allocate memory for TX descriptors */ 2231 /* Allocate memory for TX descriptors */
2085 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, 2232 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2086 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2233 txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2109,6 +2256,18 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2109 txq->descs, txq->descs_phys); 2256 txq->descs, txq->descs_phys);
2110 return -ENOMEM; 2257 return -ENOMEM;
2111 } 2258 }
2259
2260 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2261 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2262 txq->size * TSO_HEADER_SIZE,
2263 &txq->tso_hdrs_phys, GFP_KERNEL);
2264 if (txq->tso_hdrs == NULL) {
2265 kfree(txq->tx_skb);
2266 dma_free_coherent(pp->dev->dev.parent,
2267 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2268 txq->descs, txq->descs_phys);
2269 return -ENOMEM;
2270 }
2112 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 2271 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2113 2272
2114 return 0; 2273 return 0;
@@ -2120,6 +2279,10 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
2120{ 2279{
2121 kfree(txq->tx_skb); 2280 kfree(txq->tx_skb);
2122 2281
2282 if (txq->tso_hdrs)
2283 dma_free_coherent(pp->dev->dev.parent,
2284 txq->size * TSO_HEADER_SIZE,
2285 txq->tso_hdrs, txq->tso_hdrs_phys);
2123 if (txq->descs) 2286 if (txq->descs)
2124 dma_free_coherent(pp->dev->dev.parent, 2287 dma_free_coherent(pp->dev->dev.parent,
2125 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2288 txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2279,24 +2442,28 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
2279 return 0; 2442 return 0;
2280 2443
2281 /* The interface is running, so we have to force a 2444 /* The interface is running, so we have to force a
2282 * reallocation of the RXQs 2445 * reallocation of the queues
2283 */ 2446 */
2284 mvneta_stop_dev(pp); 2447 mvneta_stop_dev(pp);
2285 2448
2286 mvneta_cleanup_txqs(pp); 2449 mvneta_cleanup_txqs(pp);
2287 mvneta_cleanup_rxqs(pp); 2450 mvneta_cleanup_rxqs(pp);
2288 2451
2289 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 2452 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
2290 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 2453 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2291 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2454 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2292 2455
2293 ret = mvneta_setup_rxqs(pp); 2456 ret = mvneta_setup_rxqs(pp);
2294 if (ret) { 2457 if (ret) {
2295 netdev_err(pp->dev, "unable to setup rxqs after MTU change\n"); 2458 netdev_err(dev, "unable to setup rxqs after MTU change\n");
2296 return ret; 2459 return ret;
2297 } 2460 }
2298 2461
2299 mvneta_setup_txqs(pp); 2462 ret = mvneta_setup_txqs(pp);
2463 if (ret) {
2464 netdev_err(dev, "unable to setup txqs after MTU change\n");
2465 return ret;
2466 }
2300 2467
2301 mvneta_start_dev(pp); 2468 mvneta_start_dev(pp);
2302 mvneta_port_up(pp); 2469 mvneta_port_up(pp);
@@ -2323,22 +2490,19 @@ static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2323static int mvneta_set_mac_addr(struct net_device *dev, void *addr) 2490static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2324{ 2491{
2325 struct mvneta_port *pp = netdev_priv(dev); 2492 struct mvneta_port *pp = netdev_priv(dev);
2326 u8 *mac = addr + 2; 2493 struct sockaddr *sockaddr = addr;
2327 int i; 2494 int ret;
2328
2329 if (netif_running(dev))
2330 return -EBUSY;
2331 2495
2496 ret = eth_prepare_mac_addr_change(dev, addr);
2497 if (ret < 0)
2498 return ret;
2332 /* Remove previous address table entry */ 2499 /* Remove previous address table entry */
2333 mvneta_mac_addr_set(pp, dev->dev_addr, -1); 2500 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2334 2501
2335 /* Set new addr in hw */ 2502 /* Set new addr in hw */
2336 mvneta_mac_addr_set(pp, mac, rxq_def); 2503 mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
2337
2338 /* Set addr in the device */
2339 for (i = 0; i < ETH_ALEN; i++)
2340 dev->dev_addr[i] = mac[i];
2341 2504
2505 eth_commit_mac_addr_change(dev, addr);
2342 return 0; 2506 return 0;
2343} 2507}
2344 2508
@@ -2433,8 +2597,6 @@ static int mvneta_open(struct net_device *dev)
2433 struct mvneta_port *pp = netdev_priv(dev); 2597 struct mvneta_port *pp = netdev_priv(dev);
2434 int ret; 2598 int ret;
2435 2599
2436 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2437
2438 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 2600 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2439 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 2601 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2440 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2602 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -2600,8 +2762,12 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2600 return -EINVAL; 2762 return -EINVAL;
2601 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? 2763 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2602 ring->rx_pending : MVNETA_MAX_RXD; 2764 ring->rx_pending : MVNETA_MAX_RXD;
2603 pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ? 2765
2604 ring->tx_pending : MVNETA_MAX_TXD; 2766 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
2767 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
2768 if (pp->tx_ring_size != ring->tx_pending)
2769 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2770 pp->tx_ring_size, ring->tx_pending);
2605 2771
2606 if (netif_running(dev)) { 2772 if (netif_running(dev)) {
2607 mvneta_stop(dev); 2773 mvneta_stop(dev);
@@ -2638,7 +2804,7 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
2638}; 2804};
2639 2805
2640/* Initialize hw */ 2806/* Initialize hw */
2641static int mvneta_init(struct mvneta_port *pp, int phy_addr) 2807static int mvneta_init(struct device *dev, struct mvneta_port *pp)
2642{ 2808{
2643 int queue; 2809 int queue;
2644 2810
@@ -2648,8 +2814,8 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2648 /* Set port default values */ 2814 /* Set port default values */
2649 mvneta_defaults_set(pp); 2815 mvneta_defaults_set(pp);
2650 2816
2651 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue), 2817 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
2652 GFP_KERNEL); 2818 GFP_KERNEL);
2653 if (!pp->txqs) 2819 if (!pp->txqs)
2654 return -ENOMEM; 2820 return -ENOMEM;
2655 2821
@@ -2661,12 +2827,10 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2661 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; 2827 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2662 } 2828 }
2663 2829
2664 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue), 2830 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
2665 GFP_KERNEL); 2831 GFP_KERNEL);
2666 if (!pp->rxqs) { 2832 if (!pp->rxqs)
2667 kfree(pp->txqs);
2668 return -ENOMEM; 2833 return -ENOMEM;
2669 }
2670 2834
2671 /* Create Rx descriptor rings */ 2835 /* Create Rx descriptor rings */
2672 for (queue = 0; queue < rxq_number; queue++) { 2836 for (queue = 0; queue < rxq_number; queue++) {
@@ -2680,12 +2844,6 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2680 return 0; 2844 return 0;
2681} 2845}
2682 2846
2683static void mvneta_deinit(struct mvneta_port *pp)
2684{
2685 kfree(pp->txqs);
2686 kfree(pp->rxqs);
2687}
2688
2689/* platform glue : initialize decoding windows */ 2847/* platform glue : initialize decoding windows */
2690static void mvneta_conf_mbus_windows(struct mvneta_port *pp, 2848static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2691 const struct mbus_dram_target_info *dram) 2849 const struct mbus_dram_target_info *dram)
@@ -2768,7 +2926,6 @@ static int mvneta_probe(struct platform_device *pdev)
2768 struct resource *res; 2926 struct resource *res;
2769 struct device_node *dn = pdev->dev.of_node; 2927 struct device_node *dn = pdev->dev.of_node;
2770 struct device_node *phy_node; 2928 struct device_node *phy_node;
2771 u32 phy_addr;
2772 struct mvneta_port *pp; 2929 struct mvneta_port *pp;
2773 struct net_device *dev; 2930 struct net_device *dev;
2774 const char *dt_mac_addr; 2931 const char *dt_mac_addr;
@@ -2797,9 +2954,22 @@ static int mvneta_probe(struct platform_device *pdev)
2797 2954
2798 phy_node = of_parse_phandle(dn, "phy", 0); 2955 phy_node = of_parse_phandle(dn, "phy", 0);
2799 if (!phy_node) { 2956 if (!phy_node) {
2800 dev_err(&pdev->dev, "no associated PHY\n"); 2957 if (!of_phy_is_fixed_link(dn)) {
2801 err = -ENODEV; 2958 dev_err(&pdev->dev, "no PHY specified\n");
2802 goto err_free_irq; 2959 err = -ENODEV;
2960 goto err_free_irq;
2961 }
2962
2963 err = of_phy_register_fixed_link(dn);
2964 if (err < 0) {
2965 dev_err(&pdev->dev, "cannot register fixed PHY\n");
2966 goto err_free_irq;
2967 }
2968
2969 /* In the case of a fixed PHY, the DT node associated
2970 * to the PHY is the Ethernet MAC DT node.
2971 */
2972 phy_node = dn;
2803 } 2973 }
2804 2974
2805 phy_mode = of_get_phy_mode(dn); 2975 phy_mode = of_get_phy_mode(dn);
@@ -2813,11 +2983,9 @@ static int mvneta_probe(struct platform_device *pdev)
2813 dev->watchdog_timeo = 5 * HZ; 2983 dev->watchdog_timeo = 5 * HZ;
2814 dev->netdev_ops = &mvneta_netdev_ops; 2984 dev->netdev_ops = &mvneta_netdev_ops;
2815 2985
2816 SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops); 2986 dev->ethtool_ops = &mvneta_eth_tool_ops;
2817 2987
2818 pp = netdev_priv(dev); 2988 pp = netdev_priv(dev);
2819
2820 pp->weight = MVNETA_RX_POLL_WEIGHT;
2821 pp->phy_node = phy_node; 2989 pp->phy_node = phy_node;
2822 pp->phy_interface = phy_mode; 2990 pp->phy_interface = phy_mode;
2823 2991
@@ -2864,33 +3032,32 @@ static int mvneta_probe(struct platform_device *pdev)
2864 pp->dev = dev; 3032 pp->dev = dev;
2865 SET_NETDEV_DEV(dev, &pdev->dev); 3033 SET_NETDEV_DEV(dev, &pdev->dev);
2866 3034
2867 err = mvneta_init(pp, phy_addr); 3035 err = mvneta_init(&pdev->dev, pp);
2868 if (err < 0) { 3036 if (err < 0)
2869 dev_err(&pdev->dev, "can't init eth hal\n");
2870 goto err_free_stats; 3037 goto err_free_stats;
2871 }
2872 3038
2873 err = mvneta_port_power_up(pp, phy_mode); 3039 err = mvneta_port_power_up(pp, phy_mode);
2874 if (err < 0) { 3040 if (err < 0) {
2875 dev_err(&pdev->dev, "can't power up port\n"); 3041 dev_err(&pdev->dev, "can't power up port\n");
2876 goto err_deinit; 3042 goto err_free_stats;
2877 } 3043 }
2878 3044
2879 dram_target_info = mv_mbus_dram_info(); 3045 dram_target_info = mv_mbus_dram_info();
2880 if (dram_target_info) 3046 if (dram_target_info)
2881 mvneta_conf_mbus_windows(pp, dram_target_info); 3047 mvneta_conf_mbus_windows(pp, dram_target_info);
2882 3048
2883 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); 3049 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
2884 3050
2885 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 3051 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2886 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; 3052 dev->hw_features |= dev->features;
2887 dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; 3053 dev->vlan_features |= dev->features;
2888 dev->priv_flags |= IFF_UNICAST_FLT; 3054 dev->priv_flags |= IFF_UNICAST_FLT;
3055 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
2889 3056
2890 err = register_netdev(dev); 3057 err = register_netdev(dev);
2891 if (err < 0) { 3058 if (err < 0) {
2892 dev_err(&pdev->dev, "failed to register\n"); 3059 dev_err(&pdev->dev, "failed to register\n");
2893 goto err_deinit; 3060 goto err_free_stats;
2894 } 3061 }
2895 3062
2896 netdev_info(dev, "Using %s mac address %pM\n", mac_from, 3063 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
@@ -2900,8 +3067,6 @@ static int mvneta_probe(struct platform_device *pdev)
2900 3067
2901 return 0; 3068 return 0;
2902 3069
2903err_deinit:
2904 mvneta_deinit(pp);
2905err_free_stats: 3070err_free_stats:
2906 free_percpu(pp->stats); 3071 free_percpu(pp->stats);
2907err_clk: 3072err_clk:
@@ -2920,7 +3085,6 @@ static int mvneta_remove(struct platform_device *pdev)
2920 struct mvneta_port *pp = netdev_priv(dev); 3085 struct mvneta_port *pp = netdev_priv(dev);
2921 3086
2922 unregister_netdev(dev); 3087 unregister_netdev(dev);
2923 mvneta_deinit(pp);
2924 clk_disable_unprepare(pp->clk); 3088 clk_disable_unprepare(pp->clk);
2925 free_percpu(pp->stats); 3089 free_percpu(pp->stats);
2926 irq_dispose_mapping(dev->irq); 3090 irq_dispose_mapping(dev->irq);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index b358c2f6f4bd..8f5aa7c62b18 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1488,7 +1488,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1488 dev->netdev_ops = &pxa168_eth_netdev_ops; 1488 dev->netdev_ops = &pxa168_eth_netdev_ops;
1489 dev->watchdog_timeo = 2 * HZ; 1489 dev->watchdog_timeo = 2 * HZ;
1490 dev->base_addr = 0; 1490 dev->base_addr = 0;
1491 SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops); 1491 dev->ethtool_ops = &pxa168_ethtool_ops;
1492 1492
1493 INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); 1493 INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
1494 1494
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index b81106451a0a..69693384b58c 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4760,7 +4760,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
4760 4760
4761 SET_NETDEV_DEV(dev, &hw->pdev->dev); 4761 SET_NETDEV_DEV(dev, &hw->pdev->dev);
4762 dev->irq = hw->pdev->irq; 4762 dev->irq = hw->pdev->irq;
4763 SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops); 4763 dev->ethtool_ops = &sky2_ethtool_ops;
4764 dev->watchdog_timeo = TX_WATCHDOG; 4764 dev->watchdog_timeo = TX_WATCHDOG;
4765 dev->netdev_ops = &sky2_netdev_ops[port]; 4765 dev->netdev_ops = &sky2_netdev_ops[port];
4766 4766
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 29b616990e52..5d940a26055c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -212,8 +212,7 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
212 212
213 /* First, verify that the master reports correct status */ 213 /* First, verify that the master reports correct status */
214 if (comm_pending(dev)) { 214 if (comm_pending(dev)) {
215 mlx4_warn(dev, "Communication channel is not idle." 215 mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
216 "my toggle is %d (cmd:0x%x)\n",
217 priv->cmd.comm_toggle, cmd); 216 priv->cmd.comm_toggle, cmd);
218 return -EAGAIN; 217 return -EAGAIN;
219 } 218 }
@@ -422,9 +421,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
422 *out_param = 421 *out_param =
423 be64_to_cpu(vhcr->out_param); 422 be64_to_cpu(vhcr->out_param);
424 else { 423 else {
425 mlx4_err(dev, "response expected while" 424 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
426 "output mailbox is NULL for " 425 op);
427 "command 0x%x\n", op);
428 vhcr->status = CMD_STAT_BAD_PARAM; 426 vhcr->status = CMD_STAT_BAD_PARAM;
429 } 427 }
430 } 428 }
@@ -439,16 +437,15 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
439 *out_param = 437 *out_param =
440 be64_to_cpu(vhcr->out_param); 438 be64_to_cpu(vhcr->out_param);
441 else { 439 else {
442 mlx4_err(dev, "response expected while" 440 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
443 "output mailbox is NULL for " 441 op);
444 "command 0x%x\n", op);
445 vhcr->status = CMD_STAT_BAD_PARAM; 442 vhcr->status = CMD_STAT_BAD_PARAM;
446 } 443 }
447 } 444 }
448 ret = mlx4_status_to_errno(vhcr->status); 445 ret = mlx4_status_to_errno(vhcr->status);
449 } else 446 } else
450 mlx4_err(dev, "failed execution of VHCR_POST command" 447 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
451 "opcode 0x%x\n", op); 448 op);
452 } 449 }
453 450
454 mutex_unlock(&priv->cmd.slave_cmd_mutex); 451 mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@ -476,6 +473,13 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
476 goto out; 473 goto out;
477 } 474 }
478 475
476 if (out_is_imm && !out_param) {
477 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
478 op);
479 err = -EINVAL;
480 goto out;
481 }
482
479 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 483 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
480 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); 484 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
481 if (err) 485 if (err)
@@ -554,6 +558,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
554 cmd->free_head = context->next; 558 cmd->free_head = context->next;
555 spin_unlock(&cmd->context_lock); 559 spin_unlock(&cmd->context_lock);
556 560
561 if (out_is_imm && !out_param) {
562 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
563 op);
564 err = -EINVAL;
565 goto out;
566 }
567
557 init_completion(&context->done); 568 init_completion(&context->done);
558 569
559 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 570 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
@@ -625,9 +636,8 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
625 636
626 if ((slave_addr & 0xfff) | (master_addr & 0xfff) | 637 if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
627 (slave & ~0x7f) | (size & 0xff)) { 638 (slave & ~0x7f) | (size & 0xff)) {
628 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx " 639 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
629 "master_addr:0x%llx slave_id:%d size:%d\n", 640 slave_addr, master_addr, slave, size);
630 slave_addr, master_addr, slave, size);
631 return -EINVAL; 641 return -EINVAL;
632 } 642 }
633 643
@@ -1422,8 +1432,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1422 ALIGN(sizeof(struct mlx4_vhcr_cmd), 1432 ALIGN(sizeof(struct mlx4_vhcr_cmd),
1423 MLX4_ACCESS_MEM_ALIGN), 1); 1433 MLX4_ACCESS_MEM_ALIGN), 1);
1424 if (ret) { 1434 if (ret) {
1425 mlx4_err(dev, "%s:Failed reading vhcr" 1435 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1426 "ret: 0x%x\n", __func__, ret); 1436 __func__, ret);
1427 kfree(vhcr); 1437 kfree(vhcr);
1428 return ret; 1438 return ret;
1429 } 1439 }
@@ -1474,9 +1484,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1474 1484
1475 /* Apply permission and bound checks if applicable */ 1485 /* Apply permission and bound checks if applicable */
1476 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) { 1486 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1477 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection " 1487 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1478 "checks for resource_id:%d\n", vhcr->op, slave, 1488 vhcr->op, slave, vhcr->in_modifier);
1479 vhcr->in_modifier);
1480 vhcr_cmd->status = CMD_STAT_BAD_OP; 1489 vhcr_cmd->status = CMD_STAT_BAD_OP;
1481 goto out_status; 1490 goto out_status;
1482 } 1491 }
@@ -1515,8 +1524,7 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1515 } 1524 }
1516 1525
1517 if (err) { 1526 if (err) {
1518 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with" 1527 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1519 " error:%d, status %d\n",
1520 vhcr->op, slave, vhcr->errno, err); 1528 vhcr->op, slave, vhcr->errno, err);
1521 vhcr_cmd->status = mlx4_errno_to_status(err); 1529 vhcr_cmd->status = mlx4_errno_to_status(err);
1522 goto out_status; 1530 goto out_status;
@@ -1550,8 +1558,8 @@ out_status:
1550 __func__); 1558 __func__);
1551 else if (vhcr->e_bit && 1559 else if (vhcr->e_bit &&
1552 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe)) 1560 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1553 mlx4_warn(dev, "Failed to generate command completion " 1561 mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1554 "eqe for slave %d\n", slave); 1562 slave);
1555 } 1563 }
1556 1564
1557out: 1565out:
@@ -1590,8 +1598,9 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1590 1598
1591 mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n", 1599 mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1592 slave, port); 1600 slave, port);
1593 mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan, 1601 mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1594 vp_admin->default_qos, vp_admin->link_state); 1602 vp_admin->default_vlan, vp_admin->default_qos,
1603 vp_admin->link_state);
1595 1604
1596 work = kzalloc(sizeof(*work), GFP_KERNEL); 1605 work = kzalloc(sizeof(*work), GFP_KERNEL);
1597 if (!work) 1606 if (!work)
@@ -1604,7 +1613,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1604 &admin_vlan_ix); 1613 &admin_vlan_ix);
1605 if (err) { 1614 if (err) {
1606 kfree(work); 1615 kfree(work);
1607 mlx4_warn((&priv->dev), 1616 mlx4_warn(&priv->dev,
1608 "No vlan resources slave %d, port %d\n", 1617 "No vlan resources slave %d, port %d\n",
1609 slave, port); 1618 slave, port);
1610 return err; 1619 return err;
@@ -1613,7 +1622,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1613 admin_vlan_ix = NO_INDX; 1622 admin_vlan_ix = NO_INDX;
1614 } 1623 }
1615 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; 1624 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1616 mlx4_dbg((&(priv->dev)), 1625 mlx4_dbg(&priv->dev,
1617 "alloc vlan %d idx %d slave %d port %d\n", 1626 "alloc vlan %d idx %d slave %d port %d\n",
1618 (int)(vp_admin->default_vlan), 1627 (int)(vp_admin->default_vlan),
1619 admin_vlan_ix, slave, port); 1628 admin_vlan_ix, slave, port);
@@ -1676,12 +1685,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1676 vp_admin->default_vlan, &(vp_oper->vlan_idx)); 1685 vp_admin->default_vlan, &(vp_oper->vlan_idx));
1677 if (err) { 1686 if (err) {
1678 vp_oper->vlan_idx = NO_INDX; 1687 vp_oper->vlan_idx = NO_INDX;
1679 mlx4_warn((&priv->dev), 1688 mlx4_warn(&priv->dev,
1680 "No vlan resorces slave %d, port %d\n", 1689 "No vlan resorces slave %d, port %d\n",
1681 slave, port); 1690 slave, port);
1682 return err; 1691 return err;
1683 } 1692 }
1684 mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n", 1693 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
1685 (int)(vp_oper->state.default_vlan), 1694 (int)(vp_oper->state.default_vlan),
1686 vp_oper->vlan_idx, slave, port); 1695 vp_oper->vlan_idx, slave, port);
1687 } 1696 }
@@ -1692,12 +1701,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1692 if (0 > vp_oper->mac_idx) { 1701 if (0 > vp_oper->mac_idx) {
1693 err = vp_oper->mac_idx; 1702 err = vp_oper->mac_idx;
1694 vp_oper->mac_idx = NO_INDX; 1703 vp_oper->mac_idx = NO_INDX;
1695 mlx4_warn((&priv->dev), 1704 mlx4_warn(&priv->dev,
1696 "No mac resorces slave %d, port %d\n", 1705 "No mac resorces slave %d, port %d\n",
1697 slave, port); 1706 slave, port);
1698 return err; 1707 return err;
1699 } 1708 }
1700 mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n", 1709 mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
1701 vp_oper->state.mac, vp_oper->mac_idx, slave, port); 1710 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1702 } 1711 }
1703 } 1712 }
@@ -1748,8 +1757,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1748 slave_state[slave].comm_toggle ^= 1; 1757 slave_state[slave].comm_toggle ^= 1;
1749 reply = (u32) slave_state[slave].comm_toggle << 31; 1758 reply = (u32) slave_state[slave].comm_toggle << 31;
1750 if (toggle != slave_state[slave].comm_toggle) { 1759 if (toggle != slave_state[slave].comm_toggle) {
1751 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER" 1760 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
1752 "STATE COMPROMISIED ***\n", toggle, slave); 1761 toggle, slave);
1753 goto reset_slave; 1762 goto reset_slave;
1754 } 1763 }
1755 if (cmd == MLX4_COMM_CMD_RESET) { 1764 if (cmd == MLX4_COMM_CMD_RESET) {
@@ -1776,8 +1785,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1776 /*command from slave in the middle of FLR*/ 1785 /*command from slave in the middle of FLR*/
1777 if (cmd != MLX4_COMM_CMD_RESET && 1786 if (cmd != MLX4_COMM_CMD_RESET &&
1778 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { 1787 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1779 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) " 1788 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
1780 "in the middle of FLR\n", slave, cmd); 1789 slave, cmd);
1781 return; 1790 return;
1782 } 1791 }
1783 1792
@@ -1815,8 +1824,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1815 1824
1816 mutex_lock(&priv->cmd.slave_cmd_mutex); 1825 mutex_lock(&priv->cmd.slave_cmd_mutex);
1817 if (mlx4_master_process_vhcr(dev, slave, NULL)) { 1826 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
1818 mlx4_err(dev, "Failed processing vhcr for slave:%d," 1827 mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
1819 " resetting slave.\n", slave); 1828 slave);
1820 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1829 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1821 goto reset_slave; 1830 goto reset_slave;
1822 } 1831 }
@@ -1833,8 +1842,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1833 is_going_down = 1; 1842 is_going_down = 1;
1834 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); 1843 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
1835 if (is_going_down) { 1844 if (is_going_down) {
1836 mlx4_warn(dev, "Slave is going down aborting command(%d)" 1845 mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
1837 " executing from slave:%d\n",
1838 cmd, slave); 1846 cmd, slave);
1839 return; 1847 return;
1840 } 1848 }
@@ -1897,10 +1905,9 @@ void mlx4_master_comm_channel(struct work_struct *work)
1897 if (toggle != slt) { 1905 if (toggle != slt) {
1898 if (master->slave_state[slave].comm_toggle 1906 if (master->slave_state[slave].comm_toggle
1899 != slt) { 1907 != slt) {
1900 printk(KERN_INFO "slave %d out of sync." 1908 pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
1901 " read toggle %d, state toggle %d. " 1909 slave, slt,
1902 "Resynching.\n", slave, slt, 1910 master->slave_state[slave].comm_toggle);
1903 master->slave_state[slave].comm_toggle);
1904 master->slave_state[slave].comm_toggle = 1911 master->slave_state[slave].comm_toggle =
1905 slt; 1912 slt;
1906 } 1913 }
@@ -1913,8 +1920,7 @@ void mlx4_master_comm_channel(struct work_struct *work)
1913 } 1920 }
1914 1921
1915 if (reported && reported != served) 1922 if (reported && reported != served)
1916 mlx4_warn(dev, "Got command event with bitmask from %d slaves" 1923 mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
1917 " but %d were served\n",
1918 reported, served); 1924 reported, served);
1919 1925
1920 if (mlx4_ARM_COMM_CHANNEL(dev)) 1926 if (mlx4_ARM_COMM_CHANNEL(dev))
@@ -1970,7 +1976,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1970 ioremap(pci_resource_start(dev->pdev, 2) + 1976 ioremap(pci_resource_start(dev->pdev, 2) +
1971 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); 1977 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
1972 if (!priv->mfunc.comm) { 1978 if (!priv->mfunc.comm) {
1973 mlx4_err(dev, "Couldn't map communication vector.\n"); 1979 mlx4_err(dev, "Couldn't map communication vector\n");
1974 goto err_vhcr; 1980 goto err_vhcr;
1975 } 1981 }
1976 1982
@@ -2097,7 +2103,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
2097 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + 2103 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
2098 MLX4_HCR_BASE, MLX4_HCR_SIZE); 2104 MLX4_HCR_BASE, MLX4_HCR_SIZE);
2099 if (!priv->cmd.hcr) { 2105 if (!priv->cmd.hcr) {
2100 mlx4_err(dev, "Couldn't map command register.\n"); 2106 mlx4_err(dev, "Couldn't map command register\n");
2101 return -ENOMEM; 2107 return -ENOMEM;
2102 } 2108 }
2103 } 2109 }
@@ -2498,11 +2504,12 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
2498 ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff); 2504 ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
2499 ivf->mac[5] = ((s_info->mac) & 0xff); 2505 ivf->mac[5] = ((s_info->mac) & 0xff);
2500 2506
2501 ivf->vlan = s_info->default_vlan; 2507 ivf->vlan = s_info->default_vlan;
2502 ivf->qos = s_info->default_qos; 2508 ivf->qos = s_info->default_qos;
2503 ivf->tx_rate = s_info->tx_rate; 2509 ivf->max_tx_rate = s_info->tx_rate;
2504 ivf->spoofchk = s_info->spoofchk; 2510 ivf->min_tx_rate = 0;
2505 ivf->linkstate = s_info->link_state; 2511 ivf->spoofchk = s_info->spoofchk;
2512 ivf->linkstate = s_info->link_state;
2506 2513
2507 return 0; 2514 return 0;
2508} 2515}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index c90cde5b4aee..80f725228f5b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -293,6 +293,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
293 atomic_set(&cq->refcount, 1); 293 atomic_set(&cq->refcount, 1);
294 init_completion(&cq->free); 294 init_completion(&cq->free);
295 295
296 cq->irq = priv->eq_table.eq[cq->vector].irq;
297 cq->irq_affinity_change = false;
298
296 return 0; 299 return 0;
297 300
298err_radix: 301err_radix:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index c2cd8d31bcad..4b2130760eed 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -125,8 +125,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
125 &cq->vector)) { 125 &cq->vector)) {
126 cq->vector = (cq->ring + 1 + priv->port) 126 cq->vector = (cq->ring + 1 + priv->port)
127 % mdev->dev->caps.num_comp_vectors; 127 % mdev->dev->caps.num_comp_vectors;
128 mlx4_warn(mdev, "Failed Assigning an EQ to " 128 mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
129 "%s ,Falling back to legacy EQ's\n",
130 name); 129 name);
131 } 130 }
132 } 131 }
@@ -164,6 +163,13 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
164 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, 163 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
165 NAPI_POLL_WEIGHT); 164 NAPI_POLL_WEIGHT);
166 } else { 165 } else {
166 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
167
168 err = irq_set_affinity_hint(cq->mcq.irq,
169 ring->affinity_mask);
170 if (err)
171 mlx4_warn(mdev, "Failed setting affinity hint\n");
172
167 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); 173 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
168 napi_hash_add(&cq->napi); 174 napi_hash_add(&cq->napi);
169 } 175 }
@@ -180,8 +186,11 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
180 186
181 mlx4_en_unmap_buffer(&cq->wqres.buf); 187 mlx4_en_unmap_buffer(&cq->wqres.buf);
182 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 188 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
183 if (priv->mdev->dev->caps.comp_pool && cq->vector) 189 if (priv->mdev->dev->caps.comp_pool && cq->vector) {
190 if (!cq->is_tx)
191 irq_set_affinity_hint(cq->mcq.irq, NULL);
184 mlx4_release_eq(priv->mdev->dev, cq->vector); 192 mlx4_release_eq(priv->mdev->dev, cq->vector);
193 }
185 cq->vector = 0; 194 cq->vector = 0;
186 cq->buf_size = 0; 195 cq->buf_size = 0;
187 cq->buf = NULL; 196 cq->buf = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 3e8d33605fe7..fa1a069e14e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -378,8 +378,8 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
378 ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); 378 ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
379 cmd->duplex = DUPLEX_FULL; 379 cmd->duplex = DUPLEX_FULL;
380 } else { 380 } else {
381 ethtool_cmd_speed_set(cmd, -1); 381 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
382 cmd->duplex = -1; 382 cmd->duplex = DUPLEX_UNKNOWN;
383 } 383 }
384 384
385 if (trans_type > 0 && trans_type <= 0xC) { 385 if (trans_type > 0 && trans_type <= 0xC) {
@@ -564,7 +564,7 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
564 return priv->rx_ring_num; 564 return priv->rx_ring_num;
565} 565}
566 566
567static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index) 567static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key)
568{ 568{
569 struct mlx4_en_priv *priv = netdev_priv(dev); 569 struct mlx4_en_priv *priv = netdev_priv(dev);
570 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 570 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
@@ -582,8 +582,8 @@ static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
582 return err; 582 return err;
583} 583}
584 584
585static int mlx4_en_set_rxfh_indir(struct net_device *dev, 585static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
586 const u32 *ring_index) 586 const u8 *key)
587{ 587{
588 struct mlx4_en_priv *priv = netdev_priv(dev); 588 struct mlx4_en_priv *priv = netdev_priv(dev);
589 struct mlx4_en_dev *mdev = priv->mdev; 589 struct mlx4_en_dev *mdev = priv->mdev;
@@ -925,13 +925,13 @@ static int mlx4_en_flow_replace(struct net_device *dev,
925 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); 925 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
926 } else { 926 } else {
927 if (cmd->fs.ring_cookie >= priv->rx_ring_num) { 927 if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
928 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n", 928 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
929 cmd->fs.ring_cookie); 929 cmd->fs.ring_cookie);
930 return -EINVAL; 930 return -EINVAL;
931 } 931 }
932 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn; 932 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
933 if (!qpn) { 933 if (!qpn) {
934 en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n", 934 en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
935 cmd->fs.ring_cookie); 935 cmd->fs.ring_cookie);
936 return -EINVAL; 936 return -EINVAL;
937 } 937 }
@@ -956,7 +956,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
956 } 956 }
957 err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id); 957 err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
958 if (err) { 958 if (err) {
959 en_err(priv, "Fail to attach network rule at location %d.\n", 959 en_err(priv, "Fail to attach network rule at location %d\n",
960 cmd->fs.location); 960 cmd->fs.location);
961 goto out_free_list; 961 goto out_free_list;
962 } 962 }
@@ -1121,7 +1121,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
1121{ 1121{
1122 struct mlx4_en_priv *priv = netdev_priv(dev); 1122 struct mlx4_en_priv *priv = netdev_priv(dev);
1123 struct mlx4_en_dev *mdev = priv->mdev; 1123 struct mlx4_en_dev *mdev = priv->mdev;
1124 int port_up; 1124 int port_up = 0;
1125 int err = 0; 1125 int err = 0;
1126 1126
1127 if (channel->other_count || channel->combined_count || 1127 if (channel->other_count || channel->combined_count ||
@@ -1151,7 +1151,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
1151 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 1151 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1152 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 1152 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
1153 1153
1154 mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP); 1154 if (dev->num_tc)
1155 mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
1155 1156
1156 en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num); 1157 en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
1157 en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num); 1158 en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
@@ -1223,8 +1224,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
1223 .get_rxnfc = mlx4_en_get_rxnfc, 1224 .get_rxnfc = mlx4_en_get_rxnfc,
1224 .set_rxnfc = mlx4_en_set_rxnfc, 1225 .set_rxnfc = mlx4_en_set_rxnfc,
1225 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, 1226 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
1226 .get_rxfh_indir = mlx4_en_get_rxfh_indir, 1227 .get_rxfh = mlx4_en_get_rxfh,
1227 .set_rxfh_indir = mlx4_en_set_rxfh_indir, 1228 .set_rxfh = mlx4_en_set_rxfh,
1228 .get_channels = mlx4_en_get_channels, 1229 .get_channels = mlx4_en_get_channels,
1229 .set_channels = mlx4_en_set_channels, 1230 .set_channels = mlx4_en_set_channels,
1230 .get_ts_info = mlx4_en_get_ts_info, 1231 .get_ts_info = mlx4_en_get_ts_info,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 0c59d4fe7e3a..f953c1d7eae6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -133,7 +133,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
133 MLX4_EN_MAX_TX_RING_P_UP); 133 MLX4_EN_MAX_TX_RING_P_UP);
134 if (params->udp_rss && !(mdev->dev->caps.flags 134 if (params->udp_rss && !(mdev->dev->caps.flags
135 & MLX4_DEV_CAP_FLAG_UDP_RSS)) { 135 & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
136 mlx4_warn(mdev, "UDP RSS is not supported on this device.\n"); 136 mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
137 params->udp_rss = 0; 137 params->udp_rss = 0;
138 } 138 }
139 for (i = 1; i <= MLX4_MAX_PORTS; i++) { 139 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
@@ -251,8 +251,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
251 251
252 mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); 252 mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
253 if (!mdev->LSO_support) 253 if (!mdev->LSO_support)
254 mlx4_warn(mdev, "LSO not supported, please upgrade to later " 254 mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
255 "FW version to enable LSO\n");
256 255
257 if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, 256 if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
258 MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, 257 MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
@@ -268,7 +267,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
268 /* Build device profile according to supplied module parameters */ 267 /* Build device profile according to supplied module parameters */
269 err = mlx4_en_get_profile(mdev); 268 err = mlx4_en_get_profile(mdev);
270 if (err) { 269 if (err) {
271 mlx4_err(mdev, "Bad module parameters, aborting.\n"); 270 mlx4_err(mdev, "Bad module parameters, aborting\n");
272 goto err_mr; 271 goto err_mr;
273 } 272 }
274 273
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7e4b1720c3d1..7d4fb7bf2593 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -130,7 +130,7 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
130 case IPPROTO_TCP: 130 case IPPROTO_TCP:
131 return MLX4_NET_TRANS_RULE_ID_TCP; 131 return MLX4_NET_TRANS_RULE_ID_TCP;
132 default: 132 default:
133 return -EPROTONOSUPPORT; 133 return MLX4_NET_TRANS_RULE_NUM;
134 } 134 }
135}; 135};
136 136
@@ -177,7 +177,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
177 int rc; 177 int rc;
178 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 178 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
179 179
180 if (spec_tcp_udp.id < 0) { 180 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
181 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 181 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
182 filter->ip_proto); 182 filter->ip_proto);
183 goto ignore; 183 goto ignore;
@@ -770,11 +770,12 @@ static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
770 priv->dev->dev_addr, priv->prev_mac); 770 priv->dev->dev_addr, priv->prev_mac);
771 if (err) 771 if (err)
772 en_err(priv, "Failed changing HW MAC address\n"); 772 en_err(priv, "Failed changing HW MAC address\n");
773 memcpy(priv->prev_mac, priv->dev->dev_addr,
774 sizeof(priv->prev_mac));
775 } else 773 } else
776 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); 774 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
777 775
776 memcpy(priv->prev_mac, priv->dev->dev_addr,
777 sizeof(priv->prev_mac));
778
778 return err; 779 return err;
779} 780}
780 781
@@ -788,9 +789,8 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
788 if (!is_valid_ether_addr(saddr->sa_data)) 789 if (!is_valid_ether_addr(saddr->sa_data))
789 return -EADDRNOTAVAIL; 790 return -EADDRNOTAVAIL;
790 791
791 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
792
793 mutex_lock(&mdev->state_lock); 792 mutex_lock(&mdev->state_lock);
793 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
794 err = mlx4_en_do_set_mac(priv); 794 err = mlx4_en_do_set_mac(priv);
795 mutex_unlock(&mdev->state_lock); 795 mutex_unlock(&mdev->state_lock);
796 796
@@ -1526,6 +1526,27 @@ static void mlx4_en_linkstate(struct work_struct *work)
1526 mutex_unlock(&mdev->state_lock); 1526 mutex_unlock(&mdev->state_lock);
1527} 1527}
1528 1528
1529static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1530{
1531 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1532 int numa_node = priv->mdev->dev->numa_node;
1533 int ret = 0;
1534
1535 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1536 return -ENOMEM;
1537
1538 ret = cpumask_set_cpu_local_first(ring_idx, numa_node,
1539 ring->affinity_mask);
1540 if (ret)
1541 free_cpumask_var(ring->affinity_mask);
1542
1543 return ret;
1544}
1545
1546static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1547{
1548 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1549}
1529 1550
1530int mlx4_en_start_port(struct net_device *dev) 1551int mlx4_en_start_port(struct net_device *dev)
1531{ 1552{
@@ -1567,17 +1588,25 @@ int mlx4_en_start_port(struct net_device *dev)
1567 1588
1568 mlx4_en_cq_init_lock(cq); 1589 mlx4_en_cq_init_lock(cq);
1569 1590
1591 err = mlx4_en_init_affinity_hint(priv, i);
1592 if (err) {
1593 en_err(priv, "Failed preparing IRQ affinity hint\n");
1594 goto cq_err;
1595 }
1596
1570 err = mlx4_en_activate_cq(priv, cq, i); 1597 err = mlx4_en_activate_cq(priv, cq, i);
1571 if (err) { 1598 if (err) {
1572 en_err(priv, "Failed activating Rx CQ\n"); 1599 en_err(priv, "Failed activating Rx CQ\n");
1600 mlx4_en_free_affinity_hint(priv, i);
1573 goto cq_err; 1601 goto cq_err;
1574 } 1602 }
1575 for (j = 0; j < cq->size; j++) 1603 for (j = 0; j < cq->size; j++)
1576 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1604 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1577 err = mlx4_en_set_cq_moder(priv, cq); 1605 err = mlx4_en_set_cq_moder(priv, cq);
1578 if (err) { 1606 if (err) {
1579 en_err(priv, "Failed setting cq moderation parameters"); 1607 en_err(priv, "Failed setting cq moderation parameters\n");
1580 mlx4_en_deactivate_cq(priv, cq); 1608 mlx4_en_deactivate_cq(priv, cq);
1609 mlx4_en_free_affinity_hint(priv, i);
1581 goto cq_err; 1610 goto cq_err;
1582 } 1611 }
1583 mlx4_en_arm_cq(priv, cq); 1612 mlx4_en_arm_cq(priv, cq);
@@ -1615,7 +1644,7 @@ int mlx4_en_start_port(struct net_device *dev)
1615 } 1644 }
1616 err = mlx4_en_set_cq_moder(priv, cq); 1645 err = mlx4_en_set_cq_moder(priv, cq);
1617 if (err) { 1646 if (err) {
1618 en_err(priv, "Failed setting cq moderation parameters"); 1647 en_err(priv, "Failed setting cq moderation parameters\n");
1619 mlx4_en_deactivate_cq(priv, cq); 1648 mlx4_en_deactivate_cq(priv, cq);
1620 goto tx_err; 1649 goto tx_err;
1621 } 1650 }
@@ -1715,8 +1744,10 @@ rss_err:
1715mac_err: 1744mac_err:
1716 mlx4_en_put_qp(priv); 1745 mlx4_en_put_qp(priv);
1717cq_err: 1746cq_err:
1718 while (rx_index--) 1747 while (rx_index--) {
1719 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1748 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1749 mlx4_en_free_affinity_hint(priv, i);
1750 }
1720 for (i = 0; i < priv->rx_ring_num; i++) 1751 for (i = 0; i < priv->rx_ring_num; i++)
1721 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1752 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1722 1753
@@ -1847,6 +1878,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1847 msleep(1); 1878 msleep(1);
1848 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1879 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1849 mlx4_en_deactivate_cq(priv, cq); 1880 mlx4_en_deactivate_cq(priv, cq);
1881
1882 mlx4_en_free_affinity_hint(priv, i);
1850 } 1883 }
1851} 1884}
1852 1885
@@ -2539,7 +2572,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2539 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 2572 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
2540 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 2573 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
2541 2574
2542 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 2575 dev->ethtool_ops = &mlx4_en_ethtool_ops;
2543 2576
2544 /* 2577 /*
2545 * Set driver features 2578 * Set driver features
@@ -2594,8 +2627,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2594 prof->tx_pause, prof->tx_ppp, 2627 prof->tx_pause, prof->tx_ppp,
2595 prof->rx_pause, prof->rx_ppp); 2628 prof->rx_pause, prof->rx_ppp);
2596 if (err) { 2629 if (err) {
2597 en_err(priv, "Failed setting port general configurations " 2630 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
2598 "for port %d, with error %d\n", priv->port, err); 2631 priv->port, err);
2599 goto out; 2632 goto out;
2600 } 2633 }
2601 2634
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 87857a6463eb..d2d415732d99 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -270,13 +270,11 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
270 ring->actual_size, 270 ring->actual_size,
271 GFP_KERNEL)) { 271 GFP_KERNEL)) {
272 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { 272 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
273 en_err(priv, "Failed to allocate " 273 en_err(priv, "Failed to allocate enough rx buffers\n");
274 "enough rx buffers\n");
275 return -ENOMEM; 274 return -ENOMEM;
276 } else { 275 } else {
277 new_size = rounddown_pow_of_two(ring->actual_size); 276 new_size = rounddown_pow_of_two(ring->actual_size);
278 en_warn(priv, "Only %d buffers allocated " 277 en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
279 "reducing ring size to %d",
280 ring->actual_size, new_size); 278 ring->actual_size, new_size);
281 goto reduce_rings; 279 goto reduce_rings;
282 } 280 }
@@ -685,10 +683,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
685 /* Drop packet on bad receive or bad checksum */ 683 /* Drop packet on bad receive or bad checksum */
686 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 684 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
687 MLX4_CQE_OPCODE_ERROR)) { 685 MLX4_CQE_OPCODE_ERROR)) {
688 en_err(priv, "CQE completed in error - vendor " 686 en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
689 "syndrom:%d syndrom:%d\n", 687 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
690 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, 688 ((struct mlx4_err_cqe *)cqe)->syndrome);
691 ((struct mlx4_err_cqe *) cqe)->syndrome);
692 goto next; 689 goto next;
693 } 690 }
694 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { 691 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
@@ -898,10 +895,17 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
898 mlx4_en_cq_unlock_napi(cq); 895 mlx4_en_cq_unlock_napi(cq);
899 896
900 /* If we used up all the quota - we're probably not done yet... */ 897 /* If we used up all the quota - we're probably not done yet... */
901 if (done == budget) 898 if (done == budget) {
902 INC_PERF_COUNTER(priv->pstats.napi_quota); 899 INC_PERF_COUNTER(priv->pstats.napi_quota);
903 else { 900 if (unlikely(cq->mcq.irq_affinity_change)) {
901 cq->mcq.irq_affinity_change = false;
902 napi_complete(napi);
903 mlx4_en_arm_cq(priv, cq);
904 return 0;
905 }
906 } else {
904 /* Done for now */ 907 /* Done for now */
908 cq->mcq.irq_affinity_change = false;
905 napi_complete(napi); 909 napi_complete(napi);
906 mlx4_en_arm_cq(priv, cq); 910 mlx4_en_arm_cq(priv, cq);
907 } 911 }
@@ -944,8 +948,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
944 priv->rx_skb_size = eff_mtu; 948 priv->rx_skb_size = eff_mtu;
945 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); 949 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
946 950
947 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " 951 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
948 "num_frags:%d):\n", eff_mtu, priv->num_frags); 952 eff_mtu, priv->num_frags);
949 for (i = 0; i < priv->num_frags; i++) { 953 for (i = 0; i < priv->num_frags; i++) {
950 en_err(priv, 954 en_err(priv,
951 " frag:%d - size:%d prefix:%d align:%d stride:%d\n", 955 " frag:%d - size:%d prefix:%d align:%d stride:%d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index bc0cc1eb214d..8be7483f8236 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -108,9 +108,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
108 108
109 ring->buf = ring->wqres.buf.direct.buf; 109 ring->buf = ring->wqres.buf.direct.buf;
110 110
111 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " 111 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
112 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 112 ring, ring->buf, ring->size, ring->buf_size,
113 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 113 (unsigned long long) ring->wqres.buf.direct.map);
114 114
115 ring->qpn = qpn; 115 ring->qpn = qpn;
116 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); 116 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
@@ -122,7 +122,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
122 122
123 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); 123 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
124 if (err) { 124 if (err) {
125 en_dbg(DRV, priv, "working without blueflame (%d)", err); 125 en_dbg(DRV, priv, "working without blueflame (%d)\n", err);
126 ring->bf.uar = &mdev->priv_uar; 126 ring->bf.uar = &mdev->priv_uar;
127 ring->bf.uar->map = mdev->uar_map; 127 ring->bf.uar->map = mdev->uar_map;
128 ring->bf_enabled = false; 128 ring->bf_enabled = false;
@@ -474,9 +474,15 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
474 /* If we used up all the quota - we're probably not done yet... */ 474 /* If we used up all the quota - we're probably not done yet... */
475 if (done < budget) { 475 if (done < budget) {
476 /* Done for now */ 476 /* Done for now */
477 cq->mcq.irq_affinity_change = false;
477 napi_complete(napi); 478 napi_complete(napi);
478 mlx4_en_arm_cq(priv, cq); 479 mlx4_en_arm_cq(priv, cq);
479 return done; 480 return done;
481 } else if (unlikely(cq->mcq.irq_affinity_change)) {
482 cq->mcq.irq_affinity_change = false;
483 napi_complete(napi);
484 mlx4_en_arm_cq(priv, cq);
485 return 0;
480 } 486 }
481 return budget; 487 return budget;
482} 488}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d501a2b0fb79..d954ec1eac17 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -53,6 +53,11 @@ enum {
53 MLX4_EQ_ENTRY_SIZE = 0x20 53 MLX4_EQ_ENTRY_SIZE = 0x20
54}; 54};
55 55
56struct mlx4_irq_notify {
57 void *arg;
58 struct irq_affinity_notify notify;
59};
60
56#define MLX4_EQ_STATUS_OK ( 0 << 28) 61#define MLX4_EQ_STATUS_OK ( 0 << 28)
57#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 62#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
58#define MLX4_EQ_OWNER_SW ( 0 << 24) 63#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -152,14 +157,13 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
152 if (i != dev->caps.function && 157 if (i != dev->caps.function &&
153 master->slave_state[i].active) 158 master->slave_state[i].active)
154 if (mlx4_GEN_EQE(dev, i, eqe)) 159 if (mlx4_GEN_EQE(dev, i, eqe))
155 mlx4_warn(dev, "Failed to " 160 mlx4_warn(dev, "Failed to generate event for slave %d\n",
156 " generate event " 161 i);
157 "for slave %d\n", i);
158 } 162 }
159 } else { 163 } else {
160 if (mlx4_GEN_EQE(dev, slave, eqe)) 164 if (mlx4_GEN_EQE(dev, slave, eqe))
161 mlx4_warn(dev, "Failed to generate event " 165 mlx4_warn(dev, "Failed to generate event for slave %d\n",
162 "for slave %d\n", slave); 166 slave);
163 } 167 }
164 ++slave_eq->cons; 168 ++slave_eq->cons;
165 } 169 }
@@ -177,8 +181,8 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
177 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; 181 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
178 if ((!!(s_eqe->owner & 0x80)) ^ 182 if ((!!(s_eqe->owner & 0x80)) ^
179 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { 183 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
180 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. " 184 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
181 "No free EQE on slave events queue\n", slave); 185 slave);
182 spin_unlock_irqrestore(&slave_eq->event_lock, flags); 186 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
183 return; 187 return;
184 } 188 }
@@ -375,9 +379,9 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
375 } 379 }
376 break; 380 break;
377 default: 381 default:
378 pr_err("%s: BUG!!! UNKNOWN state: " 382 pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
379 "slave:%d, port:%d\n", __func__, slave, port); 383 __func__, slave, port);
380 goto out; 384 goto out;
381 } 385 }
382 ret = mlx4_get_slave_port_state(dev, slave, port); 386 ret = mlx4_get_slave_port_state(dev, slave, port);
383 387
@@ -425,8 +429,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
425 for (i = 0 ; i < dev->num_slaves; i++) { 429 for (i = 0 ; i < dev->num_slaves; i++) {
426 430
427 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { 431 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
428 mlx4_dbg(dev, "mlx4_handle_slave_flr: " 432 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
429 "clean slave: %d\n", i); 433 i);
430 434
431 mlx4_delete_all_resources_for_slave(dev, i); 435 mlx4_delete_all_resources_for_slave(dev, i);
432 /*return the slave to running mode*/ 436 /*return the slave to running mode*/
@@ -438,8 +442,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
438 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, 442 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
439 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 443 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
440 if (err) 444 if (err)
441 mlx4_warn(dev, "Failed to notify FW on " 445 mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
442 "FLR done (slave:%d)\n", i); 446 i);
443 } 447 }
444 } 448 }
445} 449}
@@ -490,9 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
490 be32_to_cpu(eqe->event.qp.qpn) 494 be32_to_cpu(eqe->event.qp.qpn)
491 & 0xffffff, &slave); 495 & 0xffffff, &slave);
492 if (ret && ret != -ENOENT) { 496 if (ret && ret != -ENOENT) {
493 mlx4_dbg(dev, "QP event %02x(%02x) on " 497 mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
494 "EQ %d at index %u: could "
495 "not get slave id (%d)\n",
496 eqe->type, eqe->subtype, 498 eqe->type, eqe->subtype,
497 eq->eqn, eq->cons_index, ret); 499 eq->eqn, eq->cons_index, ret);
498 break; 500 break;
@@ -520,23 +522,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
520 & 0xffffff, 522 & 0xffffff,
521 &slave); 523 &slave);
522 if (ret && ret != -ENOENT) { 524 if (ret && ret != -ENOENT) {
523 mlx4_warn(dev, "SRQ event %02x(%02x) " 525 mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
524 "on EQ %d at index %u: could"
525 " not get slave id (%d)\n",
526 eqe->type, eqe->subtype, 526 eqe->type, eqe->subtype,
527 eq->eqn, eq->cons_index, ret); 527 eq->eqn, eq->cons_index, ret);
528 break; 528 break;
529 } 529 }
530 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x," 530 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
531 " event: %02x(%02x)\n", __func__, 531 __func__, slave,
532 slave,
533 be32_to_cpu(eqe->event.srq.srqn), 532 be32_to_cpu(eqe->event.srq.srqn),
534 eqe->type, eqe->subtype); 533 eqe->type, eqe->subtype);
535 534
536 if (!ret && slave != dev->caps.function) { 535 if (!ret && slave != dev->caps.function) {
537 mlx4_warn(dev, "%s: sending event " 536 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
538 "%02x(%02x) to slave:%d\n", 537 __func__, eqe->type,
539 __func__, eqe->type,
540 eqe->subtype, slave); 538 eqe->subtype, slave);
541 mlx4_slave_event(dev, slave, eqe); 539 mlx4_slave_event(dev, slave, eqe);
542 break; 540 break;
@@ -569,8 +567,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
569 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { 567 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
570 if (i == mlx4_master_func_num(dev)) 568 if (i == mlx4_master_func_num(dev))
571 continue; 569 continue;
572 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN" 570 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
573 " to slave: %d, port:%d\n",
574 __func__, i, port); 571 __func__, i, port);
575 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 572 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
576 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 573 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
@@ -634,11 +631,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
634 be32_to_cpu(eqe->event.cq_err.cqn) 631 be32_to_cpu(eqe->event.cq_err.cqn)
635 & 0xffffff, &slave); 632 & 0xffffff, &slave);
636 if (ret && ret != -ENOENT) { 633 if (ret && ret != -ENOENT) {
637 mlx4_dbg(dev, "CQ event %02x(%02x) on " 634 mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
638 "EQ %d at index %u: could " 635 eqe->type, eqe->subtype,
639 "not get slave id (%d)\n", 636 eq->eqn, eq->cons_index, ret);
640 eqe->type, eqe->subtype,
641 eq->eqn, eq->cons_index, ret);
642 break; 637 break;
643 } 638 }
644 639
@@ -667,8 +662,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
667 662
668 case MLX4_EVENT_TYPE_COMM_CHANNEL: 663 case MLX4_EVENT_TYPE_COMM_CHANNEL:
669 if (!mlx4_is_master(dev)) { 664 if (!mlx4_is_master(dev)) {
670 mlx4_warn(dev, "Received comm channel event " 665 mlx4_warn(dev, "Received comm channel event for non master device\n");
671 "for non master device\n");
672 break; 666 break;
673 } 667 }
674 memcpy(&priv->mfunc.master.comm_arm_bit_vector, 668 memcpy(&priv->mfunc.master.comm_arm_bit_vector,
@@ -681,8 +675,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
681 case MLX4_EVENT_TYPE_FLR_EVENT: 675 case MLX4_EVENT_TYPE_FLR_EVENT:
682 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); 676 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
683 if (!mlx4_is_master(dev)) { 677 if (!mlx4_is_master(dev)) {
684 mlx4_warn(dev, "Non-master function received" 678 mlx4_warn(dev, "Non-master function received FLR event\n");
685 "FLR event\n");
686 break; 679 break;
687 } 680 }
688 681
@@ -711,22 +704,17 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
711 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { 704 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
712 if (mlx4_is_master(dev)) 705 if (mlx4_is_master(dev))
713 for (i = 0; i < dev->num_slaves; i++) { 706 for (i = 0; i < dev->num_slaves; i++) {
714 mlx4_dbg(dev, "%s: Sending " 707 mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
715 "MLX4_FATAL_WARNING_SUBTYPE_WARMING" 708 __func__, i);
716 " to slave: %d\n", __func__, i);
717 if (i == dev->caps.function) 709 if (i == dev->caps.function)
718 continue; 710 continue;
719 mlx4_slave_event(dev, i, eqe); 711 mlx4_slave_event(dev, i, eqe);
720 } 712 }
721 mlx4_err(dev, "Temperature Threshold was reached! " 713 mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
722 "Threshold: %d celsius degrees; " 714 be16_to_cpu(eqe->event.warming.warning_threshold),
723 "Current Temperature: %d\n", 715 be16_to_cpu(eqe->event.warming.current_temperature));
724 be16_to_cpu(eqe->event.warming.warning_threshold),
725 be16_to_cpu(eqe->event.warming.current_temperature));
726 } else 716 } else
727 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), " 717 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
728 "subtype %02x on EQ %d at index %u. owner=%x, "
729 "nent=0x%x, slave=%x, ownership=%s\n",
730 eqe->type, eqe->subtype, eq->eqn, 718 eqe->type, eqe->subtype, eq->eqn,
731 eq->cons_index, eqe->owner, eq->nent, 719 eq->cons_index, eqe->owner, eq->nent,
732 eqe->slave_id, 720 eqe->slave_id,
@@ -743,9 +731,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
743 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 731 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
744 case MLX4_EVENT_TYPE_ECC_DETECT: 732 case MLX4_EVENT_TYPE_ECC_DETECT:
745 default: 733 default:
746 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at " 734 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
747 "index %u. owner=%x, nent=0x%x, slave=%x, "
748 "ownership=%s\n",
749 eqe->type, eqe->subtype, eq->eqn, 735 eqe->type, eqe->subtype, eq->eqn,
750 eq->cons_index, eqe->owner, eq->nent, 736 eq->cons_index, eqe->owner, eq->nent,
751 eqe->slave_id, 737 eqe->slave_id,
@@ -1088,7 +1074,7 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
1088 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + 1074 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
1089 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); 1075 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
1090 if (!priv->clr_base) { 1076 if (!priv->clr_base) {
1091 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n"); 1077 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
1092 return -ENOMEM; 1078 return -ENOMEM;
1093 } 1079 }
1094 1080
@@ -1102,6 +1088,57 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
1102 iounmap(priv->clr_base); 1088 iounmap(priv->clr_base);
1103} 1089}
1104 1090
1091static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
1092 const cpumask_t *mask)
1093{
1094 struct mlx4_irq_notify *n = container_of(notify,
1095 struct mlx4_irq_notify,
1096 notify);
1097 struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
1098 struct radix_tree_iter iter;
1099 void **slot;
1100
1101 radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
1102 struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
1103
1104 if (cq->irq == notify->irq)
1105 cq->irq_affinity_change = true;
1106 }
1107}
1108
1109static void mlx4_release_irq_notifier(struct kref *ref)
1110{
1111 struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
1112 notify.kref);
1113 kfree(n);
1114}
1115
1116static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
1117 struct mlx4_dev *dev, int irq)
1118{
1119 struct mlx4_irq_notify *irq_notifier = NULL;
1120 int err = 0;
1121
1122 irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
1123 if (!irq_notifier) {
1124 mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
1125 irq);
1126 return;
1127 }
1128
1129 irq_notifier->notify.irq = irq;
1130 irq_notifier->notify.notify = mlx4_irq_notifier_notify;
1131 irq_notifier->notify.release = mlx4_release_irq_notifier;
1132 irq_notifier->arg = priv;
1133 err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
1134 if (err) {
1135 kfree(irq_notifier);
1136 irq_notifier = NULL;
1137 mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
1138 }
1139}
1140
1141
1105int mlx4_alloc_eq_table(struct mlx4_dev *dev) 1142int mlx4_alloc_eq_table(struct mlx4_dev *dev)
1106{ 1143{
1107 struct mlx4_priv *priv = mlx4_priv(dev); 1144 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1372,6 +1409,9 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1372 continue; 1409 continue;
1373 /*we dont want to break here*/ 1410 /*we dont want to break here*/
1374 } 1411 }
1412 mlx4_assign_irq_notifier(priv, dev,
1413 priv->eq_table.eq[vec].irq);
1414
1375 eq_set_ci(&priv->eq_table.eq[vec], 1); 1415 eq_set_ci(&priv->eq_table.eq[vec], 1);
1376 } 1416 }
1377 } 1417 }
@@ -1398,6 +1438,9 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1398 Belonging to a legacy EQ*/ 1438 Belonging to a legacy EQ*/
1399 mutex_lock(&priv->msix_ctl.pool_lock); 1439 mutex_lock(&priv->msix_ctl.pool_lock);
1400 if (priv->msix_ctl.pool_bm & 1ULL << i) { 1440 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1441 irq_set_affinity_notifier(
1442 priv->eq_table.eq[vec].irq,
1443 NULL);
1401 free_irq(priv->eq_table.eq[vec].irq, 1444 free_irq(priv->eq_table.eq[vec].irq,
1402 &priv->eq_table.eq[vec]); 1445 &priv->eq_table.eq[vec]);
1403 priv->msix_ctl.pool_bm &= ~(1ULL << i); 1446 priv->msix_ctl.pool_bm &= ~(1ULL << i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 01e6dd61ee3c..688e1eabab29 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -437,8 +437,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
437 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { 437 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
438 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); 438 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
439 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) { 439 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
440 mlx4_err(dev, "phy_wqe_gid is " 440 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
441 "enforced on this ib port\n");
442 err = -EPROTONOSUPPORT; 441 err = -EPROTONOSUPPORT;
443 goto out; 442 goto out;
444 } 443 }
@@ -1070,10 +1069,10 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1070 */ 1069 */
1071 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; 1070 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1072 if (lg < MLX4_ICM_PAGE_SHIFT) { 1071 if (lg < MLX4_ICM_PAGE_SHIFT) {
1073 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n", 1072 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
1074 MLX4_ICM_PAGE_SIZE, 1073 MLX4_ICM_PAGE_SIZE,
1075 (unsigned long long) mlx4_icm_addr(&iter), 1074 (unsigned long long) mlx4_icm_addr(&iter),
1076 mlx4_icm_size(&iter)); 1075 mlx4_icm_size(&iter));
1077 err = -EINVAL; 1076 err = -EINVAL;
1078 goto out; 1077 goto out;
1079 } 1078 }
@@ -1109,14 +1108,14 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1109 1108
1110 switch (op) { 1109 switch (op) {
1111 case MLX4_CMD_MAP_FA: 1110 case MLX4_CMD_MAP_FA:
1112 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); 1111 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
1113 break; 1112 break;
1114 case MLX4_CMD_MAP_ICM_AUX: 1113 case MLX4_CMD_MAP_ICM_AUX:
1115 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); 1114 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
1116 break; 1115 break;
1117 case MLX4_CMD_MAP_ICM: 1116 case MLX4_CMD_MAP_ICM:
1118 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", 1117 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
1119 tc, ts, (unsigned long long) virt - (ts << 10)); 1118 tc, ts, (unsigned long long) virt - (ts << 10));
1120 break; 1119 break;
1121 } 1120 }
1122 1121
@@ -1202,14 +1201,13 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
1202 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); 1201 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1203 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || 1202 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1204 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { 1203 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1205 mlx4_err(dev, "Installed FW has unsupported " 1204 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
1206 "command interface revision %d.\n",
1207 cmd_if_rev); 1205 cmd_if_rev);
1208 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", 1206 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1209 (int) (dev->caps.fw_ver >> 32), 1207 (int) (dev->caps.fw_ver >> 32),
1210 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1208 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1211 (int) dev->caps.fw_ver & 0xffff); 1209 (int) dev->caps.fw_ver & 0xffff);
1212 mlx4_err(dev, "This driver version supports only revisions %d to %d.\n", 1210 mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
1213 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); 1211 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1214 err = -ENODEV; 1212 err = -ENODEV;
1215 goto out; 1213 goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 26169b3eaed8..5f42f6d6e4c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -104,8 +104,6 @@ module_param(enable_64b_cqe_eqe, bool, 0444);
104MODULE_PARM_DESC(enable_64b_cqe_eqe, 104MODULE_PARM_DESC(enable_64b_cqe_eqe,
105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
106 106
107#define HCA_GLOBAL_CAP_MASK 0
108
109#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE 107#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
110 108
111static char mlx4_version[] = 109static char mlx4_version[] =
@@ -134,8 +132,7 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
134 132
135static bool use_prio; 133static bool use_prio;
136module_param_named(use_prio, use_prio, bool, 0444); 134module_param_named(use_prio, use_prio, bool, 0444);
137MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 135MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
138 "(0/1, default 0)");
139 136
140int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 137int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
141module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 138module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
@@ -163,8 +160,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
163 for (i = 0; i < dev->caps.num_ports - 1; i++) { 160 for (i = 0; i < dev->caps.num_ports - 1; i++) {
164 if (port_type[i] != port_type[i + 1]) { 161 if (port_type[i] != port_type[i + 1]) {
165 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 162 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
166 mlx4_err(dev, "Only same port types supported " 163 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
167 "on this HCA, aborting.\n");
168 return -EINVAL; 164 return -EINVAL;
169 } 165 }
170 } 166 }
@@ -172,8 +168,8 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
172 168
173 for (i = 0; i < dev->caps.num_ports; i++) { 169 for (i = 0; i < dev->caps.num_ports; i++) {
174 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 170 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
175 mlx4_err(dev, "Requested port type for port %d is not " 171 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
176 "supported on this HCA\n", i + 1); 172 i + 1);
177 return -EINVAL; 173 return -EINVAL;
178 } 174 }
179 } 175 }
@@ -195,26 +191,23 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
195 191
196 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 192 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
197 if (err) { 193 if (err) {
198 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 194 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
199 return err; 195 return err;
200 } 196 }
201 197
202 if (dev_cap->min_page_sz > PAGE_SIZE) { 198 if (dev_cap->min_page_sz > PAGE_SIZE) {
203 mlx4_err(dev, "HCA minimum page size of %d bigger than " 199 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
204 "kernel PAGE_SIZE of %ld, aborting.\n",
205 dev_cap->min_page_sz, PAGE_SIZE); 200 dev_cap->min_page_sz, PAGE_SIZE);
206 return -ENODEV; 201 return -ENODEV;
207 } 202 }
208 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 203 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
209 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 204 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
210 "aborting.\n",
211 dev_cap->num_ports, MLX4_MAX_PORTS); 205 dev_cap->num_ports, MLX4_MAX_PORTS);
212 return -ENODEV; 206 return -ENODEV;
213 } 207 }
214 208
215 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 209 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
216 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " 210 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
217 "PCI resource 2 size of 0x%llx, aborting.\n",
218 dev_cap->uar_size, 211 dev_cap->uar_size,
219 (unsigned long long) pci_resource_len(dev->pdev, 2)); 212 (unsigned long long) pci_resource_len(dev->pdev, 2));
220 return -ENODEV; 213 return -ENODEV;
@@ -296,7 +289,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
296 289
297 dev->caps.log_num_macs = log_num_mac; 290 dev->caps.log_num_macs = log_num_mac;
298 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 291 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
299 dev->caps.log_num_prios = use_prio ? 3 : 0;
300 292
301 for (i = 1; i <= dev->caps.num_ports; ++i) { 293 for (i = 1; i <= dev->caps.num_ports; ++i) {
302 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 294 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
@@ -347,14 +339,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
347 339
348 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { 340 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
349 dev->caps.log_num_macs = dev_cap->log_max_macs[i]; 341 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
350 mlx4_warn(dev, "Requested number of MACs is too much " 342 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
351 "for port %d, reducing to %d.\n",
352 i, 1 << dev->caps.log_num_macs); 343 i, 1 << dev->caps.log_num_macs);
353 } 344 }
354 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { 345 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
355 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; 346 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
356 mlx4_warn(dev, "Requested number of VLANs is too much " 347 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
357 "for port %d, reducing to %d.\n",
358 i, 1 << dev->caps.log_num_vlans); 348 i, 1 << dev->caps.log_num_vlans);
359 } 349 }
360 } 350 }
@@ -366,7 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
366 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 356 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
367 (1 << dev->caps.log_num_macs) * 357 (1 << dev->caps.log_num_macs) *
368 (1 << dev->caps.log_num_vlans) * 358 (1 << dev->caps.log_num_vlans) *
369 (1 << dev->caps.log_num_prios) *
370 dev->caps.num_ports; 359 dev->caps.num_ports;
371 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 360 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
372 361
@@ -584,13 +573,14 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
584 memset(&hca_param, 0, sizeof(hca_param)); 573 memset(&hca_param, 0, sizeof(hca_param));
585 err = mlx4_QUERY_HCA(dev, &hca_param); 574 err = mlx4_QUERY_HCA(dev, &hca_param);
586 if (err) { 575 if (err) {
587 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); 576 mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
588 return err; 577 return err;
589 } 578 }
590 579
591 /*fail if the hca has an unknown capability */ 580 /* fail if the hca has an unknown global capability
592 if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) != 581 * at this time global_caps should be always zeroed
593 HCA_GLOBAL_CAP_MASK) { 582 */
583 if (hca_param.global_caps) {
594 mlx4_err(dev, "Unknown hca global capabilities\n"); 584 mlx4_err(dev, "Unknown hca global capabilities\n");
595 return -ENOSYS; 585 return -ENOSYS;
596 } 586 }
@@ -603,19 +593,18 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
603 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 593 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
604 err = mlx4_dev_cap(dev, &dev_cap); 594 err = mlx4_dev_cap(dev, &dev_cap);
605 if (err) { 595 if (err) {
606 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 596 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
607 return err; 597 return err;
608 } 598 }
609 599
610 err = mlx4_QUERY_FW(dev); 600 err = mlx4_QUERY_FW(dev);
611 if (err) 601 if (err)
612 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n"); 602 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
613 603
614 page_size = ~dev->caps.page_size_cap + 1; 604 page_size = ~dev->caps.page_size_cap + 1;
615 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 605 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
616 if (page_size > PAGE_SIZE) { 606 if (page_size > PAGE_SIZE) {
617 mlx4_err(dev, "HCA minimum page size of %d bigger than " 607 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
618 "kernel PAGE_SIZE of %ld, aborting.\n",
619 page_size, PAGE_SIZE); 608 page_size, PAGE_SIZE);
620 return -ENODEV; 609 return -ENODEV;
621 } 610 }
@@ -633,8 +622,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
633 memset(&func_cap, 0, sizeof(func_cap)); 622 memset(&func_cap, 0, sizeof(func_cap));
634 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 623 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
635 if (err) { 624 if (err) {
636 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n", 625 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
637 err); 626 err);
638 return err; 627 return err;
639 } 628 }
640 629
@@ -661,8 +650,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
661 dev->caps.num_amgms = 0; 650 dev->caps.num_amgms = 0;
662 651
663 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 652 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
664 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 653 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
665 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); 654 dev->caps.num_ports, MLX4_MAX_PORTS);
666 return -ENODEV; 655 return -ENODEV;
667 } 656 }
668 657
@@ -682,8 +671,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
682 for (i = 1; i <= dev->caps.num_ports; ++i) { 671 for (i = 1; i <= dev->caps.num_ports; ++i) {
683 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap); 672 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
684 if (err) { 673 if (err) {
685 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for" 674 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
686 " port %d, aborting (%d).\n", i, err); 675 i, err);
687 goto err_mem; 676 goto err_mem;
688 } 677 }
689 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; 678 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
@@ -702,8 +691,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
702 if (dev->caps.uar_page_size * (dev->caps.num_uars - 691 if (dev->caps.uar_page_size * (dev->caps.num_uars -
703 dev->caps.reserved_uars) > 692 dev->caps.reserved_uars) >
704 pci_resource_len(dev->pdev, 2)) { 693 pci_resource_len(dev->pdev, 2)) {
705 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " 694 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
706 "PCI resource 2 size of 0x%llx, aborting.\n",
707 dev->caps.uar_page_size * dev->caps.num_uars, 695 dev->caps.uar_page_size * dev->caps.num_uars,
708 (unsigned long long) pci_resource_len(dev->pdev, 2)); 696 (unsigned long long) pci_resource_len(dev->pdev, 2));
709 goto err_mem; 697 goto err_mem;
@@ -725,7 +713,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
725 } 713 }
726 714
727 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 715 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
728 mlx4_warn(dev, "Timestamping is not supported in slave mode.\n"); 716 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
729 717
730 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 718 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
731 719
@@ -791,8 +779,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
791 dev->caps.port_type[port] = port_types[port - 1]; 779 dev->caps.port_type[port] = port_types[port - 1];
792 err = mlx4_SET_PORT(dev, port, -1); 780 err = mlx4_SET_PORT(dev, port, -1);
793 if (err) { 781 if (err) {
794 mlx4_err(dev, "Failed to set port %d, " 782 mlx4_err(dev, "Failed to set port %d, aborting\n",
795 "aborting\n", port); 783 port);
796 goto out; 784 goto out;
797 } 785 }
798 } 786 }
@@ -875,9 +863,7 @@ static ssize_t set_port_type(struct device *dev,
875 } 863 }
876 } 864 }
877 if (err) { 865 if (err) {
878 mlx4_err(mdev, "Auto sensing is not supported on this HCA. " 866 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
879 "Set only 'eth' or 'ib' for both ports "
880 "(should be the same)\n");
881 goto out; 867 goto out;
882 } 868 }
883 869
@@ -982,8 +968,8 @@ static ssize_t set_port_ib_mtu(struct device *dev,
982 mlx4_CLOSE_PORT(mdev, port); 968 mlx4_CLOSE_PORT(mdev, port);
983 err = mlx4_SET_PORT(mdev, port, -1); 969 err = mlx4_SET_PORT(mdev, port, -1);
984 if (err) { 970 if (err) {
985 mlx4_err(mdev, "Failed to set port %d, " 971 mlx4_err(mdev, "Failed to set port %d, aborting\n",
986 "aborting\n", port); 972 port);
987 goto err_set_port; 973 goto err_set_port;
988 } 974 }
989 } 975 }
@@ -1002,19 +988,19 @@ static int mlx4_load_fw(struct mlx4_dev *dev)
1002 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 988 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1003 GFP_HIGHUSER | __GFP_NOWARN, 0); 989 GFP_HIGHUSER | __GFP_NOWARN, 0);
1004 if (!priv->fw.fw_icm) { 990 if (!priv->fw.fw_icm) {
1005 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); 991 mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1006 return -ENOMEM; 992 return -ENOMEM;
1007 } 993 }
1008 994
1009 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 995 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1010 if (err) { 996 if (err) {
1011 mlx4_err(dev, "MAP_FA command failed, aborting.\n"); 997 mlx4_err(dev, "MAP_FA command failed, aborting\n");
1012 goto err_free; 998 goto err_free;
1013 } 999 }
1014 1000
1015 err = mlx4_RUN_FW(dev); 1001 err = mlx4_RUN_FW(dev);
1016 if (err) { 1002 if (err) {
1017 mlx4_err(dev, "RUN_FW command failed, aborting.\n"); 1003 mlx4_err(dev, "RUN_FW command failed, aborting\n");
1018 goto err_unmap_fa; 1004 goto err_unmap_fa;
1019 } 1005 }
1020 1006
@@ -1098,30 +1084,30 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1098 1084
1099 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1085 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1100 if (err) { 1086 if (err) {
1101 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); 1087 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1102 return err; 1088 return err;
1103 } 1089 }
1104 1090
1105 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", 1091 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1106 (unsigned long long) icm_size >> 10, 1092 (unsigned long long) icm_size >> 10,
1107 (unsigned long long) aux_pages << 2); 1093 (unsigned long long) aux_pages << 2);
1108 1094
1109 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1095 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1110 GFP_HIGHUSER | __GFP_NOWARN, 0); 1096 GFP_HIGHUSER | __GFP_NOWARN, 0);
1111 if (!priv->fw.aux_icm) { 1097 if (!priv->fw.aux_icm) {
1112 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); 1098 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1113 return -ENOMEM; 1099 return -ENOMEM;
1114 } 1100 }
1115 1101
1116 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1102 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1117 if (err) { 1103 if (err) {
1118 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); 1104 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1119 goto err_free_aux; 1105 goto err_free_aux;
1120 } 1106 }
1121 1107
1122 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1108 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1123 if (err) { 1109 if (err) {
1124 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); 1110 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1125 goto err_unmap_aux; 1111 goto err_unmap_aux;
1126 } 1112 }
1127 1113
@@ -1132,7 +1118,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1132 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1118 init_hca->eqc_base, dev_cap->eqc_entry_sz,
1133 num_eqs, num_eqs, 0, 0); 1119 num_eqs, num_eqs, 0, 0);
1134 if (err) { 1120 if (err) {
1135 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 1121 mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1136 goto err_unmap_cmpt; 1122 goto err_unmap_cmpt;
1137 } 1123 }
1138 1124
@@ -1153,7 +1139,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1153 dev->caps.num_mtts, 1139 dev->caps.num_mtts,
1154 dev->caps.reserved_mtts, 1, 0); 1140 dev->caps.reserved_mtts, 1, 0);
1155 if (err) { 1141 if (err) {
1156 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); 1142 mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1157 goto err_unmap_eq; 1143 goto err_unmap_eq;
1158 } 1144 }
1159 1145
@@ -1163,7 +1149,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1163 dev->caps.num_mpts, 1149 dev->caps.num_mpts,
1164 dev->caps.reserved_mrws, 1, 1); 1150 dev->caps.reserved_mrws, 1, 1);
1165 if (err) { 1151 if (err) {
1166 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); 1152 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1167 goto err_unmap_mtt; 1153 goto err_unmap_mtt;
1168 } 1154 }
1169 1155
@@ -1174,7 +1160,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1174 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1160 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1175 0, 0); 1161 0, 0);
1176 if (err) { 1162 if (err) {
1177 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 1163 mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1178 goto err_unmap_dmpt; 1164 goto err_unmap_dmpt;
1179 } 1165 }
1180 1166
@@ -1185,7 +1171,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1185 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1171 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1186 0, 0); 1172 0, 0);
1187 if (err) { 1173 if (err) {
1188 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 1174 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1189 goto err_unmap_qp; 1175 goto err_unmap_qp;
1190 } 1176 }
1191 1177
@@ -1196,7 +1182,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1196 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1182 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1197 0, 0); 1183 0, 0);
1198 if (err) { 1184 if (err) {
1199 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 1185 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1200 goto err_unmap_auxc; 1186 goto err_unmap_auxc;
1201 } 1187 }
1202 1188
@@ -1217,7 +1203,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1217 dev->caps.num_cqs, 1203 dev->caps.num_cqs,
1218 dev->caps.reserved_cqs, 0, 0); 1204 dev->caps.reserved_cqs, 0, 0);
1219 if (err) { 1205 if (err) {
1220 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); 1206 mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1221 goto err_unmap_rdmarc; 1207 goto err_unmap_rdmarc;
1222 } 1208 }
1223 1209
@@ -1227,7 +1213,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1227 dev->caps.num_srqs, 1213 dev->caps.num_srqs,
1228 dev->caps.reserved_srqs, 0, 0); 1214 dev->caps.reserved_srqs, 0, 0);
1229 if (err) { 1215 if (err) {
1230 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); 1216 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1231 goto err_unmap_cq; 1217 goto err_unmap_cq;
1232 } 1218 }
1233 1219
@@ -1245,7 +1231,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1245 dev->caps.num_mgms + dev->caps.num_amgms, 1231 dev->caps.num_mgms + dev->caps.num_amgms,
1246 0, 0); 1232 0, 0);
1247 if (err) { 1233 if (err) {
1248 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); 1234 mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1249 goto err_unmap_srq; 1235 goto err_unmap_srq;
1250 } 1236 }
1251 1237
@@ -1322,7 +1308,7 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
1322 1308
1323 mutex_lock(&priv->cmd.slave_cmd_mutex); 1309 mutex_lock(&priv->cmd.slave_cmd_mutex);
1324 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) 1310 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
1325 mlx4_warn(dev, "Failed to close slave function.\n"); 1311 mlx4_warn(dev, "Failed to close slave function\n");
1326 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1312 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1327} 1313}
1328 1314
@@ -1420,7 +1406,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1420 u32 cmd_channel_ver; 1406 u32 cmd_channel_ver;
1421 1407
1422 if (atomic_read(&pf_loading)) { 1408 if (atomic_read(&pf_loading)) {
1423 mlx4_warn(dev, "PF is not ready. Deferring probe\n"); 1409 mlx4_warn(dev, "PF is not ready - Deferring probe\n");
1424 return -EPROBE_DEFER; 1410 return -EPROBE_DEFER;
1425 } 1411 }
1426 1412
@@ -1433,8 +1419,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1433 * NUM_OF_RESET_RETRIES times before leaving.*/ 1419 * NUM_OF_RESET_RETRIES times before leaving.*/
1434 if (ret_from_reset) { 1420 if (ret_from_reset) {
1435 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1421 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1436 mlx4_warn(dev, "slave is currently in the " 1422 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
1437 "middle of FLR. Deferring probe.\n");
1438 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1423 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1439 return -EPROBE_DEFER; 1424 return -EPROBE_DEFER;
1440 } else 1425 } else
@@ -1448,8 +1433,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1448 1433
1449 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1434 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1450 MLX4_COMM_GET_IF_REV(slave_read)) { 1435 MLX4_COMM_GET_IF_REV(slave_read)) {
1451 mlx4_err(dev, "slave driver version is not supported" 1436 mlx4_err(dev, "slave driver version is not supported by the master\n");
1452 " by the master\n");
1453 goto err; 1437 goto err;
1454 } 1438 }
1455 1439
@@ -1527,8 +1511,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
1527 1511
1528 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 1512 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
1529 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1513 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1530 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags " 1514 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
1531 "set to use B0 steering. Falling back to A0 steering mode.\n");
1532 } 1515 }
1533 dev->oper_log_mgm_entry_size = 1516 dev->oper_log_mgm_entry_size =
1534 mlx4_log_num_mgm_entry_size > 0 ? 1517 mlx4_log_num_mgm_entry_size > 0 ?
@@ -1536,8 +1519,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
1536 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 1519 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
1537 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 1520 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
1538 } 1521 }
1539 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, " 1522 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
1540 "modparam log_num_mgm_entry_size = %d\n",
1541 mlx4_steering_mode_str(dev->caps.steering_mode), 1523 mlx4_steering_mode_str(dev->caps.steering_mode),
1542 dev->oper_log_mgm_entry_size, 1524 dev->oper_log_mgm_entry_size,
1543 mlx4_log_num_mgm_entry_size); 1525 mlx4_log_num_mgm_entry_size);
@@ -1571,15 +1553,15 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1571 err = mlx4_QUERY_FW(dev); 1553 err = mlx4_QUERY_FW(dev);
1572 if (err) { 1554 if (err) {
1573 if (err == -EACCES) 1555 if (err == -EACCES)
1574 mlx4_info(dev, "non-primary physical function, skipping.\n"); 1556 mlx4_info(dev, "non-primary physical function, skipping\n");
1575 else 1557 else
1576 mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); 1558 mlx4_err(dev, "QUERY_FW command failed, aborting\n");
1577 return err; 1559 return err;
1578 } 1560 }
1579 1561
1580 err = mlx4_load_fw(dev); 1562 err = mlx4_load_fw(dev);
1581 if (err) { 1563 if (err) {
1582 mlx4_err(dev, "Failed to start FW, aborting.\n"); 1564 mlx4_err(dev, "Failed to start FW, aborting\n");
1583 return err; 1565 return err;
1584 } 1566 }
1585 1567
@@ -1591,7 +1573,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1591 1573
1592 err = mlx4_dev_cap(dev, &dev_cap); 1574 err = mlx4_dev_cap(dev, &dev_cap);
1593 if (err) { 1575 if (err) {
1594 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 1576 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
1595 goto err_stop_fw; 1577 goto err_stop_fw;
1596 } 1578 }
1597 1579
@@ -1632,7 +1614,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1632 1614
1633 err = mlx4_INIT_HCA(dev, &init_hca); 1615 err = mlx4_INIT_HCA(dev, &init_hca);
1634 if (err) { 1616 if (err) {
1635 mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); 1617 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
1636 goto err_free_icm; 1618 goto err_free_icm;
1637 } 1619 }
1638 /* 1620 /*
@@ -1643,7 +1625,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1643 memset(&init_hca, 0, sizeof(init_hca)); 1625 memset(&init_hca, 0, sizeof(init_hca));
1644 err = mlx4_QUERY_HCA(dev, &init_hca); 1626 err = mlx4_QUERY_HCA(dev, &init_hca);
1645 if (err) { 1627 if (err) {
1646 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n"); 1628 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
1647 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1629 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1648 } else { 1630 } else {
1649 dev->caps.hca_core_clock = 1631 dev->caps.hca_core_clock =
@@ -1656,14 +1638,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1656 if (!dev->caps.hca_core_clock) { 1638 if (!dev->caps.hca_core_clock) {
1657 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1639 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1658 mlx4_err(dev, 1640 mlx4_err(dev,
1659 "HCA frequency is 0. Timestamping is not supported."); 1641 "HCA frequency is 0 - timestamping is not supported\n");
1660 } else if (map_internal_clock(dev)) { 1642 } else if (map_internal_clock(dev)) {
1661 /* 1643 /*
1662 * Map internal clock, 1644 * Map internal clock,
1663 * in case of failure disable timestamping 1645 * in case of failure disable timestamping
1664 */ 1646 */
1665 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1647 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1666 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n"); 1648 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
1667 } 1649 }
1668 } 1650 }
1669 } else { 1651 } else {
@@ -1690,7 +1672,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1690 1672
1691 err = mlx4_QUERY_ADAPTER(dev, &adapter); 1673 err = mlx4_QUERY_ADAPTER(dev, &adapter);
1692 if (err) { 1674 if (err) {
1693 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); 1675 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
1694 goto unmap_bf; 1676 goto unmap_bf;
1695 } 1677 }
1696 1678
@@ -1808,79 +1790,69 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1808 1790
1809 err = mlx4_init_uar_table(dev); 1791 err = mlx4_init_uar_table(dev);
1810 if (err) { 1792 if (err) {
1811 mlx4_err(dev, "Failed to initialize " 1793 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
1812 "user access region table, aborting.\n"); 1794 return err;
1813 return err;
1814 } 1795 }
1815 1796
1816 err = mlx4_uar_alloc(dev, &priv->driver_uar); 1797 err = mlx4_uar_alloc(dev, &priv->driver_uar);
1817 if (err) { 1798 if (err) {
1818 mlx4_err(dev, "Failed to allocate driver access region, " 1799 mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
1819 "aborting.\n");
1820 goto err_uar_table_free; 1800 goto err_uar_table_free;
1821 } 1801 }
1822 1802
1823 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 1803 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
1824 if (!priv->kar) { 1804 if (!priv->kar) {
1825 mlx4_err(dev, "Couldn't map kernel access region, " 1805 mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
1826 "aborting.\n");
1827 err = -ENOMEM; 1806 err = -ENOMEM;
1828 goto err_uar_free; 1807 goto err_uar_free;
1829 } 1808 }
1830 1809
1831 err = mlx4_init_pd_table(dev); 1810 err = mlx4_init_pd_table(dev);
1832 if (err) { 1811 if (err) {
1833 mlx4_err(dev, "Failed to initialize " 1812 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
1834 "protection domain table, aborting.\n");
1835 goto err_kar_unmap; 1813 goto err_kar_unmap;
1836 } 1814 }
1837 1815
1838 err = mlx4_init_xrcd_table(dev); 1816 err = mlx4_init_xrcd_table(dev);
1839 if (err) { 1817 if (err) {
1840 mlx4_err(dev, "Failed to initialize " 1818 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
1841 "reliable connection domain table, aborting.\n");
1842 goto err_pd_table_free; 1819 goto err_pd_table_free;
1843 } 1820 }
1844 1821
1845 err = mlx4_init_mr_table(dev); 1822 err = mlx4_init_mr_table(dev);
1846 if (err) { 1823 if (err) {
1847 mlx4_err(dev, "Failed to initialize " 1824 mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
1848 "memory region table, aborting.\n");
1849 goto err_xrcd_table_free; 1825 goto err_xrcd_table_free;
1850 } 1826 }
1851 1827
1852 if (!mlx4_is_slave(dev)) { 1828 if (!mlx4_is_slave(dev)) {
1853 err = mlx4_init_mcg_table(dev); 1829 err = mlx4_init_mcg_table(dev);
1854 if (err) { 1830 if (err) {
1855 mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n"); 1831 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
1856 goto err_mr_table_free; 1832 goto err_mr_table_free;
1857 } 1833 }
1858 } 1834 }
1859 1835
1860 err = mlx4_init_eq_table(dev); 1836 err = mlx4_init_eq_table(dev);
1861 if (err) { 1837 if (err) {
1862 mlx4_err(dev, "Failed to initialize " 1838 mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
1863 "event queue table, aborting.\n");
1864 goto err_mcg_table_free; 1839 goto err_mcg_table_free;
1865 } 1840 }
1866 1841
1867 err = mlx4_cmd_use_events(dev); 1842 err = mlx4_cmd_use_events(dev);
1868 if (err) { 1843 if (err) {
1869 mlx4_err(dev, "Failed to switch to event-driven " 1844 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
1870 "firmware commands, aborting.\n");
1871 goto err_eq_table_free; 1845 goto err_eq_table_free;
1872 } 1846 }
1873 1847
1874 err = mlx4_NOP(dev); 1848 err = mlx4_NOP(dev);
1875 if (err) { 1849 if (err) {
1876 if (dev->flags & MLX4_FLAG_MSI_X) { 1850 if (dev->flags & MLX4_FLAG_MSI_X) {
1877 mlx4_warn(dev, "NOP command failed to generate MSI-X " 1851 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
1878 "interrupt IRQ %d).\n",
1879 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1852 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1880 mlx4_warn(dev, "Trying again without MSI-X.\n"); 1853 mlx4_warn(dev, "Trying again without MSI-X\n");
1881 } else { 1854 } else {
1882 mlx4_err(dev, "NOP command failed to generate interrupt " 1855 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
1883 "(IRQ %d), aborting.\n",
1884 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1856 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1885 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 1857 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
1886 } 1858 }
@@ -1892,28 +1864,25 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1892 1864
1893 err = mlx4_init_cq_table(dev); 1865 err = mlx4_init_cq_table(dev);
1894 if (err) { 1866 if (err) {
1895 mlx4_err(dev, "Failed to initialize " 1867 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
1896 "completion queue table, aborting.\n");
1897 goto err_cmd_poll; 1868 goto err_cmd_poll;
1898 } 1869 }
1899 1870
1900 err = mlx4_init_srq_table(dev); 1871 err = mlx4_init_srq_table(dev);
1901 if (err) { 1872 if (err) {
1902 mlx4_err(dev, "Failed to initialize " 1873 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
1903 "shared receive queue table, aborting.\n");
1904 goto err_cq_table_free; 1874 goto err_cq_table_free;
1905 } 1875 }
1906 1876
1907 err = mlx4_init_qp_table(dev); 1877 err = mlx4_init_qp_table(dev);
1908 if (err) { 1878 if (err) {
1909 mlx4_err(dev, "Failed to initialize " 1879 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
1910 "queue pair table, aborting.\n");
1911 goto err_srq_table_free; 1880 goto err_srq_table_free;
1912 } 1881 }
1913 1882
1914 err = mlx4_init_counters_table(dev); 1883 err = mlx4_init_counters_table(dev);
1915 if (err && err != -ENOENT) { 1884 if (err && err != -ENOENT) {
1916 mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); 1885 mlx4_err(dev, "Failed to initialize counters table, aborting\n");
1917 goto err_qp_table_free; 1886 goto err_qp_table_free;
1918 } 1887 }
1919 1888
@@ -1923,9 +1892,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1923 err = mlx4_get_port_ib_caps(dev, port, 1892 err = mlx4_get_port_ib_caps(dev, port,
1924 &ib_port_default_caps); 1893 &ib_port_default_caps);
1925 if (err) 1894 if (err)
1926 mlx4_warn(dev, "failed to get port %d default " 1895 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
1927 "ib capabilities (%d). Continuing " 1896 port, err);
1928 "with caps = 0\n", port, err);
1929 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 1897 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1930 1898
1931 /* initialize per-slave default ib port capabilities */ 1899 /* initialize per-slave default ib port capabilities */
@@ -1935,7 +1903,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1935 if (i == mlx4_master_func_num(dev)) 1903 if (i == mlx4_master_func_num(dev))
1936 continue; 1904 continue;
1937 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 1905 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
1938 ib_port_default_caps; 1906 ib_port_default_caps;
1939 } 1907 }
1940 } 1908 }
1941 1909
@@ -1948,7 +1916,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1948 dev->caps.pkey_table_len[port] : -1); 1916 dev->caps.pkey_table_len[port] : -1);
1949 if (err) { 1917 if (err) {
1950 mlx4_err(dev, "Failed to set port %d, aborting\n", 1918 mlx4_err(dev, "Failed to set port %d, aborting\n",
1951 port); 1919 port);
1952 goto err_counters_table_free; 1920 goto err_counters_table_free;
1953 } 1921 }
1954 } 1922 }
@@ -2024,7 +1992,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2024 kfree(entries); 1992 kfree(entries);
2025 goto no_msi; 1993 goto no_msi;
2026 } else if (nreq < MSIX_LEGACY_SZ + 1994 } else if (nreq < MSIX_LEGACY_SZ +
2027 dev->caps.num_ports * MIN_MSIX_P_PORT) { 1995 dev->caps.num_ports * MIN_MSIX_P_PORT) {
2028 /*Working in legacy mode , all EQ's shared*/ 1996 /*Working in legacy mode , all EQ's shared*/
2029 dev->caps.comp_pool = 0; 1997 dev->caps.comp_pool = 0;
2030 dev->caps.num_comp_vectors = nreq - 1; 1998 dev->caps.num_comp_vectors = nreq - 1;
@@ -2225,8 +2193,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2225 2193
2226 err = pci_enable_device(pdev); 2194 err = pci_enable_device(pdev);
2227 if (err) { 2195 if (err) {
2228 dev_err(&pdev->dev, "Cannot enable PCI device, " 2196 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
2229 "aborting.\n");
2230 return err; 2197 return err;
2231 } 2198 }
2232 2199
@@ -2273,14 +2240,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2273 */ 2240 */
2274 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 2241 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
2275 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2242 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2276 dev_err(&pdev->dev, "Missing DCS, aborting." 2243 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
2277 "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
2278 pci_dev_data, pci_resource_flags(pdev, 0)); 2244 pci_dev_data, pci_resource_flags(pdev, 0));
2279 err = -ENODEV; 2245 err = -ENODEV;
2280 goto err_disable_pdev; 2246 goto err_disable_pdev;
2281 } 2247 }
2282 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 2248 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
2283 dev_err(&pdev->dev, "Missing UAR, aborting.\n"); 2249 dev_err(&pdev->dev, "Missing UAR, aborting\n");
2284 err = -ENODEV; 2250 err = -ENODEV;
2285 goto err_disable_pdev; 2251 goto err_disable_pdev;
2286 } 2252 }
@@ -2295,21 +2261,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2295 2261
2296 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2262 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2297 if (err) { 2263 if (err) {
2298 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 2264 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
2299 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2265 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2300 if (err) { 2266 if (err) {
2301 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 2267 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
2302 goto err_release_regions; 2268 goto err_release_regions;
2303 } 2269 }
2304 } 2270 }
2305 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2271 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2306 if (err) { 2272 if (err) {
2307 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " 2273 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
2308 "consistent PCI DMA mask.\n");
2309 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2274 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2310 if (err) { 2275 if (err) {
2311 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 2276 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
2312 "aborting.\n");
2313 goto err_release_regions; 2277 goto err_release_regions;
2314 } 2278 }
2315 } 2279 }
@@ -2340,7 +2304,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2340 if (total_vfs) { 2304 if (total_vfs) {
2341 unsigned vfs_offset = 0; 2305 unsigned vfs_offset = 0;
2342 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 2306 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
2343 vfs_offset + nvfs[i] < extended_func_num(pdev); 2307 vfs_offset + nvfs[i] < extended_func_num(pdev);
2344 vfs_offset += nvfs[i], i++) 2308 vfs_offset += nvfs[i], i++)
2345 ; 2309 ;
2346 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 2310 if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
@@ -2366,8 +2330,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2366 if (err < 0) 2330 if (err < 0)
2367 goto err_free_dev; 2331 goto err_free_dev;
2368 else { 2332 else {
2369 mlx4_warn(dev, "Multiple PFs not yet supported." 2333 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
2370 " Skipping PF.\n");
2371 err = -EINVAL; 2334 err = -EINVAL;
2372 goto err_free_dev; 2335 goto err_free_dev;
2373 } 2336 }
@@ -2377,8 +2340,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2377 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", 2340 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
2378 total_vfs); 2341 total_vfs);
2379 dev->dev_vfs = kzalloc( 2342 dev->dev_vfs = kzalloc(
2380 total_vfs * sizeof(*dev->dev_vfs), 2343 total_vfs * sizeof(*dev->dev_vfs),
2381 GFP_KERNEL); 2344 GFP_KERNEL);
2382 if (NULL == dev->dev_vfs) { 2345 if (NULL == dev->dev_vfs) {
2383 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 2346 mlx4_err(dev, "Failed to allocate memory for VFs\n");
2384 err = 0; 2347 err = 0;
@@ -2386,14 +2349,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2386 atomic_inc(&pf_loading); 2349 atomic_inc(&pf_loading);
2387 err = pci_enable_sriov(pdev, total_vfs); 2350 err = pci_enable_sriov(pdev, total_vfs);
2388 if (err) { 2351 if (err) {
2389 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", 2352 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
2390 err); 2353 err);
2391 atomic_dec(&pf_loading); 2354 atomic_dec(&pf_loading);
2392 err = 0; 2355 err = 0;
2393 } else { 2356 } else {
2394 mlx4_warn(dev, "Running in master mode\n"); 2357 mlx4_warn(dev, "Running in master mode\n");
2395 dev->flags |= MLX4_FLAG_SRIOV | 2358 dev->flags |= MLX4_FLAG_SRIOV |
2396 MLX4_FLAG_MASTER; 2359 MLX4_FLAG_MASTER;
2397 dev->num_vfs = total_vfs; 2360 dev->num_vfs = total_vfs;
2398 sriov_initialized = 1; 2361 sriov_initialized = 1;
2399 } 2362 }
@@ -2410,7 +2373,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2410 */ 2373 */
2411 err = mlx4_reset(dev); 2374 err = mlx4_reset(dev);
2412 if (err) { 2375 if (err) {
2413 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 2376 mlx4_err(dev, "Failed to reset HCA, aborting\n");
2414 goto err_rel_own; 2377 goto err_rel_own;
2415 } 2378 }
2416 } 2379 }
@@ -2418,7 +2381,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2418slave_start: 2381slave_start:
2419 err = mlx4_cmd_init(dev); 2382 err = mlx4_cmd_init(dev);
2420 if (err) { 2383 if (err) {
2421 mlx4_err(dev, "Failed to init command interface, aborting.\n"); 2384 mlx4_err(dev, "Failed to init command interface, aborting\n");
2422 goto err_sriov; 2385 goto err_sriov;
2423 } 2386 }
2424 2387
@@ -2432,8 +2395,7 @@ slave_start:
2432 dev->num_slaves = 0; 2395 dev->num_slaves = 0;
2433 err = mlx4_multi_func_init(dev); 2396 err = mlx4_multi_func_init(dev);
2434 if (err) { 2397 if (err) {
2435 mlx4_err(dev, "Failed to init slave mfunc" 2398 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
2436 " interface, aborting.\n");
2437 goto err_cmd; 2399 goto err_cmd;
2438 } 2400 }
2439 } 2401 }
@@ -2465,8 +2427,7 @@ slave_start:
2465 unsigned sum = 0; 2427 unsigned sum = 0;
2466 err = mlx4_multi_func_init(dev); 2428 err = mlx4_multi_func_init(dev);
2467 if (err) { 2429 if (err) {
2468 mlx4_err(dev, "Failed to init master mfunc" 2430 mlx4_err(dev, "Failed to init master mfunc interface, aborting\n");
2469 "interface, aborting.\n");
2470 goto err_close; 2431 goto err_close;
2471 } 2432 }
2472 if (sriov_initialized) { 2433 if (sriov_initialized) {
@@ -2477,10 +2438,7 @@ slave_start:
2477 if (ib_ports && 2438 if (ib_ports &&
2478 (num_vfs_argc > 1 || probe_vfs_argc > 1)) { 2439 (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
2479 mlx4_err(dev, 2440 mlx4_err(dev,
2480 "Invalid syntax of num_vfs/probe_vfs " 2441 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
2481 "with IB port. Single port VFs syntax"
2482 " is only supported when all ports "
2483 "are configured as ethernet\n");
2484 goto err_close; 2442 goto err_close;
2485 } 2443 }
2486 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) { 2444 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
@@ -2506,8 +2464,7 @@ slave_start:
2506 if ((mlx4_is_mfunc(dev)) && 2464 if ((mlx4_is_mfunc(dev)) &&
2507 !(dev->flags & MLX4_FLAG_MSI_X)) { 2465 !(dev->flags & MLX4_FLAG_MSI_X)) {
2508 err = -ENOSYS; 2466 err = -ENOSYS;
2509 mlx4_err(dev, "INTx is not supported in multi-function mode." 2467 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
2510 " aborting.\n");
2511 goto err_free_eq; 2468 goto err_free_eq;
2512 } 2469 }
2513 2470
@@ -2660,7 +2617,7 @@ static void __mlx4_remove_one(struct pci_dev *pdev)
2660 /* in SRIOV it is not allowed to unload the pf's 2617 /* in SRIOV it is not allowed to unload the pf's
2661 * driver while there are alive vf's */ 2618 * driver while there are alive vf's */
2662 if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev)) 2619 if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev))
2663 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); 2620 pr_warn("Removing PF when there are assigned VF's !!!\n");
2664 mlx4_stop_sense(dev); 2621 mlx4_stop_sense(dev);
2665 mlx4_unregister_device(dev); 2622 mlx4_unregister_device(dev);
2666 2623
@@ -2824,7 +2781,7 @@ static struct pci_driver mlx4_driver = {
2824 .name = DRV_NAME, 2781 .name = DRV_NAME,
2825 .id_table = mlx4_pci_table, 2782 .id_table = mlx4_pci_table,
2826 .probe = mlx4_init_one, 2783 .probe = mlx4_init_one,
2827 .shutdown = mlx4_remove_one, 2784 .shutdown = __mlx4_remove_one,
2828 .remove = mlx4_remove_one, 2785 .remove = mlx4_remove_one,
2829 .err_handler = &mlx4_err_handler, 2786 .err_handler = &mlx4_err_handler,
2830}; 2787};
@@ -2832,33 +2789,36 @@ static struct pci_driver mlx4_driver = {
2832static int __init mlx4_verify_params(void) 2789static int __init mlx4_verify_params(void)
2833{ 2790{
2834 if ((log_num_mac < 0) || (log_num_mac > 7)) { 2791 if ((log_num_mac < 0) || (log_num_mac > 7)) {
2835 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); 2792 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
2836 return -1; 2793 return -1;
2837 } 2794 }
2838 2795
2839 if (log_num_vlan != 0) 2796 if (log_num_vlan != 0)
2840 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 2797 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
2841 MLX4_LOG_NUM_VLANS); 2798 MLX4_LOG_NUM_VLANS);
2799
2800 if (use_prio != 0)
2801 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
2842 2802
2843 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 2803 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
2844 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 2804 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
2805 log_mtts_per_seg);
2845 return -1; 2806 return -1;
2846 } 2807 }
2847 2808
2848 /* Check if module param for ports type has legal combination */ 2809 /* Check if module param for ports type has legal combination */
2849 if (port_type_array[0] == false && port_type_array[1] == true) { 2810 if (port_type_array[0] == false && port_type_array[1] == true) {
2850 printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 2811 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
2851 port_type_array[0] = true; 2812 port_type_array[0] = true;
2852 } 2813 }
2853 2814
2854 if (mlx4_log_num_mgm_entry_size != -1 && 2815 if (mlx4_log_num_mgm_entry_size != -1 &&
2855 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 2816 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
2856 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) { 2817 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
2857 pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not " 2818 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
2858 "in legal range (-1 or %d..%d)\n", 2819 mlx4_log_num_mgm_entry_size,
2859 mlx4_log_num_mgm_entry_size, 2820 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
2860 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 2821 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
2861 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
2862 return -1; 2822 return -1;
2863 } 2823 }
2864 2824
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 80ccb4edf825..4c36def8e10f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -638,7 +638,7 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
638 638
639 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 639 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
640 if (*index != hash) { 640 if (*index != hash) {
641 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 641 mlx4_err(dev, "Found zero MGID in AMGM\n");
642 err = -EINVAL; 642 err = -EINVAL;
643 } 643 }
644 return err; 644 return err;
@@ -874,7 +874,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
874 mlx4_err(dev, "%s", buf); 874 mlx4_err(dev, "%s", buf);
875 875
876 if (len >= BUF_SIZE) 876 if (len >= BUF_SIZE)
877 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); 877 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
878} 878}
879 879
880int mlx4_flow_attach(struct mlx4_dev *dev, 880int mlx4_flow_attach(struct mlx4_dev *dev,
@@ -897,7 +897,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
897 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 897 ret = parse_trans_rule(dev, cur, mailbox->buf + size);
898 if (ret < 0) { 898 if (ret < 0) {
899 mlx4_free_cmd_mailbox(dev, mailbox); 899 mlx4_free_cmd_mailbox(dev, mailbox);
900 return -EINVAL; 900 return ret;
901 } 901 }
902 size += ret; 902 size += ret;
903 } 903 }
@@ -905,10 +905,10 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
905 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); 905 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
906 if (ret == -ENOMEM) 906 if (ret == -ENOMEM)
907 mlx4_err_rule(dev, 907 mlx4_err_rule(dev,
908 "mcg table is full. Fail to register network rule.\n", 908 "mcg table is full. Fail to register network rule\n",
909 rule); 909 rule);
910 else if (ret) 910 else if (ret)
911 mlx4_err_rule(dev, "Fail to register network rule.\n", rule); 911 mlx4_err_rule(dev, "Fail to register network rule\n", rule);
912 912
913 mlx4_free_cmd_mailbox(dev, mailbox); 913 mlx4_free_cmd_mailbox(dev, mailbox);
914 914
@@ -994,7 +994,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
994 994
995 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 995 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
996 if (members_count == dev->caps.num_qp_per_mgm) { 996 if (members_count == dev->caps.num_qp_per_mgm) {
997 mlx4_err(dev, "MGM at index %x is full.\n", index); 997 mlx4_err(dev, "MGM at index %x is full\n", index);
998 err = -ENOMEM; 998 err = -ENOMEM;
999 goto out; 999 goto out;
1000 } 1000 }
@@ -1042,7 +1042,7 @@ out:
1042 } 1042 }
1043 if (err && link && index != -1) { 1043 if (err && link && index != -1) {
1044 if (index < dev->caps.num_mgms) 1044 if (index < dev->caps.num_mgms)
1045 mlx4_warn(dev, "Got AMGM index %d < %d", 1045 mlx4_warn(dev, "Got AMGM index %d < %d\n",
1046 index, dev->caps.num_mgms); 1046 index, dev->caps.num_mgms);
1047 else 1047 else
1048 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1048 mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1133,7 +1133,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1133 1133
1134 if (amgm_index) { 1134 if (amgm_index) {
1135 if (amgm_index < dev->caps.num_mgms) 1135 if (amgm_index < dev->caps.num_mgms)
1136 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", 1136 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
1137 index, amgm_index, dev->caps.num_mgms); 1137 index, amgm_index, dev->caps.num_mgms);
1138 else 1138 else
1139 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1139 mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1153,7 +1153,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1153 goto out; 1153 goto out;
1154 1154
1155 if (index < dev->caps.num_mgms) 1155 if (index < dev->caps.num_mgms)
1156 mlx4_warn(dev, "entry %d had next AMGM index %d < %d", 1156 mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
1157 prev, index, dev->caps.num_mgms); 1157 prev, index, dev->caps.num_mgms);
1158 else 1158 else
1159 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1159 mlx4_bitmap_free(&priv->mcg_table.bitmap,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 7a0665beebb1..1d8af7336807 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -221,18 +221,19 @@ extern int mlx4_debug_level;
221#define mlx4_debug_level (0) 221#define mlx4_debug_level (0)
222#endif /* CONFIG_MLX4_DEBUG */ 222#endif /* CONFIG_MLX4_DEBUG */
223 223
224#define mlx4_dbg(mdev, format, arg...) \ 224#define mlx4_dbg(mdev, format, ...) \
225do { \ 225do { \
226 if (mlx4_debug_level) \ 226 if (mlx4_debug_level) \
227 dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \ 227 dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \
228 ##__VA_ARGS__); \
228} while (0) 229} while (0)
229 230
230#define mlx4_err(mdev, format, arg...) \ 231#define mlx4_err(mdev, format, ...) \
231 dev_err(&mdev->pdev->dev, format, ##arg) 232 dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
232#define mlx4_info(mdev, format, arg...) \ 233#define mlx4_info(mdev, format, ...) \
233 dev_info(&mdev->pdev->dev, format, ##arg) 234 dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
234#define mlx4_warn(mdev, format, arg...) \ 235#define mlx4_warn(mdev, format, ...) \
235 dev_warn(&mdev->pdev->dev, format, ##arg) 236 dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
236 237
237extern int mlx4_log_num_mgm_entry_size; 238extern int mlx4_log_num_mgm_entry_size;
238extern int log_mtts_per_seg; 239extern int log_mtts_per_seg;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 04d9b6fe3e80..0e15295bedd6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -313,6 +313,7 @@ struct mlx4_en_rx_ring {
313 unsigned long csum_ok; 313 unsigned long csum_ok;
314 unsigned long csum_none; 314 unsigned long csum_none;
315 int hwtstamp_rx_filter; 315 int hwtstamp_rx_filter;
316 cpumask_var_t affinity_mask;
316}; 317};
317 318
318struct mlx4_en_cq { 319struct mlx4_en_cq {
@@ -830,26 +831,26 @@ __printf(3, 4)
830int en_print(const char *level, const struct mlx4_en_priv *priv, 831int en_print(const char *level, const struct mlx4_en_priv *priv,
831 const char *format, ...); 832 const char *format, ...);
832 833
833#define en_dbg(mlevel, priv, format, arg...) \ 834#define en_dbg(mlevel, priv, format, ...) \
834do { \ 835do { \
835 if (NETIF_MSG_##mlevel & priv->msg_enable) \ 836 if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
836 en_print(KERN_DEBUG, priv, format, ##arg); \ 837 en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
837} while (0) 838} while (0)
838#define en_warn(priv, format, arg...) \ 839#define en_warn(priv, format, ...) \
839 en_print(KERN_WARNING, priv, format, ##arg) 840 en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
840#define en_err(priv, format, arg...) \ 841#define en_err(priv, format, ...) \
841 en_print(KERN_ERR, priv, format, ##arg) 842 en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
842#define en_info(priv, format, arg...) \ 843#define en_info(priv, format, ...) \
843 en_print(KERN_INFO, priv, format, ## arg) 844 en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
844 845
845#define mlx4_err(mdev, format, arg...) \ 846#define mlx4_err(mdev, format, ...) \
846 pr_err("%s %s: " format, DRV_NAME, \ 847 pr_err(DRV_NAME " %s: " format, \
847 dev_name(&mdev->pdev->dev), ##arg) 848 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
848#define mlx4_info(mdev, format, arg...) \ 849#define mlx4_info(mdev, format, ...) \
849 pr_info("%s %s: " format, DRV_NAME, \ 850 pr_info(DRV_NAME " %s: " format, \
850 dev_name(&mdev->pdev->dev), ##arg) 851 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
851#define mlx4_warn(mdev, format, arg...) \ 852#define mlx4_warn(mdev, format, ...) \
852 pr_warning("%s %s: " format, DRV_NAME, \ 853 pr_warn(DRV_NAME " %s: " format, \
853 dev_name(&mdev->pdev->dev), ##arg) 854 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
854 855
855#endif 856#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 4c71dafad217..2839abb878a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -250,8 +250,8 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
250 MLX4_CMD_TIME_CLASS_A, 250 MLX4_CMD_TIME_CLASS_A,
251 MLX4_CMD_WRAPPED); 251 MLX4_CMD_WRAPPED);
252 if (err) 252 if (err)
253 mlx4_warn(dev, "Failed to free mtt range at:" 253 mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
254 "%d order:%d\n", offset, order); 254 offset, order);
255 return; 255 return;
256 } 256 }
257 __mlx4_free_mtt_range(dev, offset, order); 257 __mlx4_free_mtt_range(dev, offset, order);
@@ -436,8 +436,8 @@ static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
436 key_to_hw_index(mr->key) & 436 key_to_hw_index(mr->key) &
437 (dev->caps.num_mpts - 1)); 437 (dev->caps.num_mpts - 1));
438 if (err) { 438 if (err) {
439 mlx4_warn(dev, "HW2SW_MPT failed (%d),", err); 439 mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
440 mlx4_warn(dev, "MR has MWs bound to it.\n"); 440 err);
441 return err; 441 return err;
442 } 442 }
443 443
@@ -774,7 +774,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
774 mlx4_alloc_mtt_range(dev, 774 mlx4_alloc_mtt_range(dev,
775 fls(dev->caps.reserved_mtts - 1)); 775 fls(dev->caps.reserved_mtts - 1));
776 if (priv->reserved_mtts < 0) { 776 if (priv->reserved_mtts < 0) {
777 mlx4_warn(dev, "MTT table of order %u is too small.\n", 777 mlx4_warn(dev, "MTT table of order %u is too small\n",
778 mr_table->mtt_buddy.max_order); 778 mr_table->mtt_buddy.max_order);
779 err = -ENOMEM; 779 err = -ENOMEM;
780 goto err_reserve_mtts; 780 goto err_reserve_mtts;
@@ -955,8 +955,7 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
955 mailbox = mlx4_alloc_cmd_mailbox(dev); 955 mailbox = mlx4_alloc_cmd_mailbox(dev);
956 if (IS_ERR(mailbox)) { 956 if (IS_ERR(mailbox)) {
957 err = PTR_ERR(mailbox); 957 err = PTR_ERR(mailbox);
958 printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox" 958 pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
959 " failed (%d)\n", err);
960 return; 959 return;
961 } 960 }
962 961
@@ -965,8 +964,7 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
965 (dev->caps.num_mpts - 1)); 964 (dev->caps.num_mpts - 1));
966 mlx4_free_cmd_mailbox(dev, mailbox); 965 mlx4_free_cmd_mailbox(dev, mailbox);
967 if (err) { 966 if (err) {
968 printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", 967 pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
969 err);
970 return; 968 return;
971 } 969 }
972 fmr->mr.enabled = MLX4_MPT_EN_SW; 970 fmr->mr.enabled = MLX4_MPT_EN_SW;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 5ec6f203c6e6..7ab97174886d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -254,8 +254,8 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
254 if (validate_index(dev, table, index)) 254 if (validate_index(dev, table, index))
255 goto out; 255 goto out;
256 if (--table->refs[index]) { 256 if (--table->refs[index]) {
257 mlx4_dbg(dev, "Have more references for index %d," 257 mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
258 "no need to modify mac table\n", index); 258 index);
259 goto out; 259 goto out;
260 } 260 }
261 261
@@ -453,9 +453,8 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
453 } 453 }
454 454
455 if (--table->refs[index]) { 455 if (--table->refs[index]) {
456 mlx4_dbg(dev, "Have %d more references for index %d," 456 mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
457 "no need to modify vlan table\n", table->refs[index], 457 table->refs[index], index);
458 index);
459 goto out; 458 goto out;
460 } 459 }
461 table->entries[index] = 0; 460 table->entries[index] = 0;
@@ -796,8 +795,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
796 if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw, 795 if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
797 sizeof(gid_entry_tbl->raw))) { 796 sizeof(gid_entry_tbl->raw))) {
798 /* found duplicate */ 797 /* found duplicate */
799 mlx4_warn(dev, "requested gid entry for slave:%d " 798 mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
800 "is a duplicate of gid at index %d\n",
801 slave, i); 799 slave, i);
802 mutex_unlock(&(priv->port[port].gid_table.mutex)); 800 mutex_unlock(&(priv->port[port].gid_table.mutex));
803 return -EINVAL; 801 return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 8e0c3cc2a1ec..14089d9e1667 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -164,18 +164,17 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
164 } 164 }
165 165
166 if (total_size > dev_cap->max_icm_sz) { 166 if (total_size > dev_cap->max_icm_sz) {
167 mlx4_err(dev, "Profile requires 0x%llx bytes; " 167 mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
168 "won't fit in 0x%llx bytes of context memory.\n", 168 (unsigned long long) total_size,
169 (unsigned long long) total_size, 169 (unsigned long long) dev_cap->max_icm_sz);
170 (unsigned long long) dev_cap->max_icm_sz);
171 kfree(profile); 170 kfree(profile);
172 return -ENOMEM; 171 return -ENOMEM;
173 } 172 }
174 173
175 if (profile[i].size) 174 if (profile[i].size)
176 mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, " 175 mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
177 "size 0x%10llx\n", 176 i, res_name[profile[i].type],
178 i, res_name[profile[i].type], profile[i].log_num, 177 profile[i].log_num,
179 (unsigned long long) profile[i].start, 178 (unsigned long long) profile[i].start,
180 (unsigned long long) profile[i].size); 179 (unsigned long long) profile[i].size);
181 } 180 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 40af61947925..0dc31d85fc3b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -264,8 +264,8 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
264 MLX4_CMD_FREE_RES, 264 MLX4_CMD_FREE_RES,
265 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 265 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
266 if (err) { 266 if (err) {
267 mlx4_warn(dev, "Failed to release qp range" 267 mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
268 " base:%d cnt:%d\n", base_qpn, cnt); 268 base_qpn, cnt);
269 } 269 }
270 } else 270 } else
271 __mlx4_qp_release_range(dev, base_qpn, cnt); 271 __mlx4_qp_release_range(dev, base_qpn, cnt);
@@ -612,8 +612,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
612 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], 612 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
613 context, 0, 0, qp); 613 context, 0, 0, qp);
614 if (err) { 614 if (err) {
615 mlx4_err(dev, "Failed to bring QP to state: " 615 mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
616 "%d with error: %d\n",
617 states[i + 1], err); 616 states[i + 1], err);
618 return err; 617 return err;
619 } 618 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
index dd1b5093d8b1..ea1c6d092145 100644
--- a/drivers/net/ethernet/mellanox/mlx4/reset.c
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
@@ -72,8 +72,7 @@ int mlx4_reset(struct mlx4_dev *dev)
72 hca_header = kmalloc(256, GFP_KERNEL); 72 hca_header = kmalloc(256, GFP_KERNEL);
73 if (!hca_header) { 73 if (!hca_header) {
74 err = -ENOMEM; 74 err = -ENOMEM;
75 mlx4_err(dev, "Couldn't allocate memory to save HCA " 75 mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n");
76 "PCI header, aborting.\n");
77 goto out; 76 goto out;
78 } 77 }
79 78
@@ -84,8 +83,7 @@ int mlx4_reset(struct mlx4_dev *dev)
84 continue; 83 continue;
85 if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) { 84 if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
86 err = -ENODEV; 85 err = -ENODEV;
87 mlx4_err(dev, "Couldn't save HCA " 86 mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
88 "PCI header, aborting.\n");
89 goto out; 87 goto out;
90 } 88 }
91 } 89 }
@@ -94,7 +92,7 @@ int mlx4_reset(struct mlx4_dev *dev)
94 MLX4_RESET_SIZE); 92 MLX4_RESET_SIZE);
95 if (!reset) { 93 if (!reset) {
96 err = -ENOMEM; 94 err = -ENOMEM;
97 mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n"); 95 mlx4_err(dev, "Couldn't map HCA reset register, aborting\n");
98 goto out; 96 goto out;
99 } 97 }
100 98
@@ -133,8 +131,7 @@ int mlx4_reset(struct mlx4_dev *dev)
133 131
134 if (vendor == 0xffff) { 132 if (vendor == 0xffff) {
135 err = -ENODEV; 133 err = -ENODEV;
136 mlx4_err(dev, "PCI device did not come back after reset, " 134 mlx4_err(dev, "PCI device did not come back after reset, aborting\n");
137 "aborting.\n");
138 goto out; 135 goto out;
139 } 136 }
140 137
@@ -144,16 +141,14 @@ int mlx4_reset(struct mlx4_dev *dev)
144 if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL, 141 if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
145 devctl)) { 142 devctl)) {
146 err = -ENODEV; 143 err = -ENODEV;
147 mlx4_err(dev, "Couldn't restore HCA PCI Express " 144 mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
148 "Device Control register, aborting.\n");
149 goto out; 145 goto out;
150 } 146 }
151 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; 147 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
152 if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL, 148 if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
153 linkctl)) { 149 linkctl)) {
154 err = -ENODEV; 150 err = -ENODEV;
155 mlx4_err(dev, "Couldn't restore HCA PCI Express " 151 mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
156 "Link control register, aborting.\n");
157 goto out; 152 goto out;
158 } 153 }
159 } 154 }
@@ -164,8 +159,8 @@ int mlx4_reset(struct mlx4_dev *dev)
164 159
165 if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) { 160 if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
166 err = -ENODEV; 161 err = -ENODEV;
167 mlx4_err(dev, "Couldn't restore HCA reg %x, " 162 mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
168 "aborting.\n", i); 163 i);
169 goto out; 164 goto out;
170 } 165 }
171 } 166 }
@@ -173,8 +168,7 @@ int mlx4_reset(struct mlx4_dev *dev)
173 if (pci_write_config_dword(dev->pdev, PCI_COMMAND, 168 if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
174 hca_header[PCI_COMMAND / 4])) { 169 hca_header[PCI_COMMAND / 4])) {
175 err = -ENODEV; 170 err = -ENODEV;
176 mlx4_err(dev, "Couldn't restore HCA COMMAND, " 171 mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
177 "aborting.\n");
178 goto out; 172 goto out;
179 } 173 }
180 174
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 2ba3b7623960..0efc1368e5a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -279,7 +279,7 @@ enum qp_transition {
279}; 279};
280 280
281/* For Debug uses */ 281/* For Debug uses */
282static const char *ResourceType(enum mlx4_resource rt) 282static const char *resource_str(enum mlx4_resource rt)
283{ 283{
284 switch (rt) { 284 switch (rt) {
285 case RES_QP: return "RES_QP"; 285 case RES_QP: return "RES_QP";
@@ -307,6 +307,7 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
307 &priv->mfunc.master.res_tracker.res_alloc[res_type]; 307 &priv->mfunc.master.res_tracker.res_alloc[res_type];
308 int err = -EINVAL; 308 int err = -EINVAL;
309 int allocated, free, reserved, guaranteed, from_free; 309 int allocated, free, reserved, guaranteed, from_free;
310 int from_rsvd;
310 311
311 if (slave > dev->num_vfs) 312 if (slave > dev->num_vfs)
312 return -EINVAL; 313 return -EINVAL;
@@ -321,11 +322,16 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
321 res_alloc->res_reserved; 322 res_alloc->res_reserved;
322 guaranteed = res_alloc->guaranteed[slave]; 323 guaranteed = res_alloc->guaranteed[slave];
323 324
324 if (allocated + count > res_alloc->quota[slave]) 325 if (allocated + count > res_alloc->quota[slave]) {
326 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
327 slave, port, resource_str(res_type), count,
328 allocated, res_alloc->quota[slave]);
325 goto out; 329 goto out;
330 }
326 331
327 if (allocated + count <= guaranteed) { 332 if (allocated + count <= guaranteed) {
328 err = 0; 333 err = 0;
334 from_rsvd = count;
329 } else { 335 } else {
330 /* portion may need to be obtained from free area */ 336 /* portion may need to be obtained from free area */
331 if (guaranteed - allocated > 0) 337 if (guaranteed - allocated > 0)
@@ -333,8 +339,14 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
333 else 339 else
334 from_free = count; 340 from_free = count;
335 341
336 if (free - from_free > reserved) 342 from_rsvd = count - from_free;
343
344 if (free - from_free >= reserved)
337 err = 0; 345 err = 0;
346 else
347 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
348 slave, port, resource_str(res_type), free,
349 from_free, reserved);
338 } 350 }
339 351
340 if (!err) { 352 if (!err) {
@@ -342,9 +354,11 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
342 if (port > 0) { 354 if (port > 0) {
343 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count; 355 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
344 res_alloc->res_port_free[port - 1] -= count; 356 res_alloc->res_port_free[port - 1] -= count;
357 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
345 } else { 358 } else {
346 res_alloc->allocated[slave] += count; 359 res_alloc->allocated[slave] += count;
347 res_alloc->res_free -= count; 360 res_alloc->res_free -= count;
361 res_alloc->res_reserved -= from_rsvd;
348 } 362 }
349 } 363 }
350 364
@@ -360,17 +374,36 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
360 struct mlx4_priv *priv = mlx4_priv(dev); 374 struct mlx4_priv *priv = mlx4_priv(dev);
361 struct resource_allocator *res_alloc = 375 struct resource_allocator *res_alloc =
362 &priv->mfunc.master.res_tracker.res_alloc[res_type]; 376 &priv->mfunc.master.res_tracker.res_alloc[res_type];
377 int allocated, guaranteed, from_rsvd;
363 378
364 if (slave > dev->num_vfs) 379 if (slave > dev->num_vfs)
365 return; 380 return;
366 381
367 spin_lock(&res_alloc->alloc_lock); 382 spin_lock(&res_alloc->alloc_lock);
383
384 allocated = (port > 0) ?
385 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
386 res_alloc->allocated[slave];
387 guaranteed = res_alloc->guaranteed[slave];
388
389 if (allocated - count >= guaranteed) {
390 from_rsvd = 0;
391 } else {
392 /* portion may need to be returned to reserved area */
393 if (allocated - guaranteed > 0)
394 from_rsvd = count - (allocated - guaranteed);
395 else
396 from_rsvd = count;
397 }
398
368 if (port > 0) { 399 if (port > 0) {
369 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count; 400 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
370 res_alloc->res_port_free[port - 1] += count; 401 res_alloc->res_port_free[port - 1] += count;
402 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
371 } else { 403 } else {
372 res_alloc->allocated[slave] -= count; 404 res_alloc->allocated[slave] -= count;
373 res_alloc->res_free += count; 405 res_alloc->res_free += count;
406 res_alloc->res_reserved += from_rsvd;
374 } 407 }
375 408
376 spin_unlock(&res_alloc->alloc_lock); 409 spin_unlock(&res_alloc->alloc_lock);
@@ -963,7 +996,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
963 ret = alloc_srq_tr(id); 996 ret = alloc_srq_tr(id);
964 break; 997 break;
965 case RES_MAC: 998 case RES_MAC:
966 printk(KERN_ERR "implementation missing\n"); 999 pr_err("implementation missing\n");
967 return NULL; 1000 return NULL;
968 case RES_COUNTER: 1001 case RES_COUNTER:
969 ret = alloc_counter_tr(id); 1002 ret = alloc_counter_tr(id);
@@ -1057,10 +1090,10 @@ static int remove_mtt_ok(struct res_mtt *res, int order)
1057{ 1090{
1058 if (res->com.state == RES_MTT_BUSY || 1091 if (res->com.state == RES_MTT_BUSY ||
1059 atomic_read(&res->ref_count)) { 1092 atomic_read(&res->ref_count)) {
1060 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n", 1093 pr_devel("%s-%d: state %s, ref_count %d\n",
1061 __func__, __LINE__, 1094 __func__, __LINE__,
1062 mtt_states_str(res->com.state), 1095 mtt_states_str(res->com.state),
1063 atomic_read(&res->ref_count)); 1096 atomic_read(&res->ref_count));
1064 return -EBUSY; 1097 return -EBUSY;
1065 } else if (res->com.state != RES_MTT_ALLOCATED) 1098 } else if (res->com.state != RES_MTT_ALLOCATED)
1066 return -EPERM; 1099 return -EPERM;
@@ -3897,7 +3930,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
3897 } 3930 }
3898 } 3931 }
3899 if (!be_mac) { 3932 if (!be_mac) {
3900 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n", 3933 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
3901 port); 3934 port);
3902 return -EINVAL; 3935 return -EINVAL;
3903 } 3936 }
@@ -3994,7 +4027,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3994 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 4027 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3995 err = get_res(dev, slave, qpn, RES_QP, &rqp); 4028 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3996 if (err) { 4029 if (err) {
3997 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); 4030 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
3998 return err; 4031 return err;
3999 } 4032 }
4000 rule_header = (struct _rule_hw *)(ctrl + 1); 4033 rule_header = (struct _rule_hw *)(ctrl + 1);
@@ -4012,7 +4045,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4012 case MLX4_NET_TRANS_RULE_ID_IPV4: 4045 case MLX4_NET_TRANS_RULE_ID_IPV4:
4013 case MLX4_NET_TRANS_RULE_ID_TCP: 4046 case MLX4_NET_TRANS_RULE_ID_TCP:
4014 case MLX4_NET_TRANS_RULE_ID_UDP: 4047 case MLX4_NET_TRANS_RULE_ID_UDP:
4015 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); 4048 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4016 if (add_eth_header(dev, slave, inbox, rlist, header_id)) { 4049 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4017 err = -EINVAL; 4050 err = -EINVAL;
4018 goto err_put; 4051 goto err_put;
@@ -4021,7 +4054,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4021 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; 4054 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4022 break; 4055 break;
4023 default: 4056 default:
4024 pr_err("Corrupted mailbox.\n"); 4057 pr_err("Corrupted mailbox\n");
4025 err = -EINVAL; 4058 err = -EINVAL;
4026 goto err_put; 4059 goto err_put;
4027 } 4060 }
@@ -4035,7 +4068,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4035 4068
4036 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); 4069 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4037 if (err) { 4070 if (err) {
4038 mlx4_err(dev, "Fail to add flow steering resources.\n "); 4071 mlx4_err(dev, "Fail to add flow steering resources\n");
4039 /* detach rule*/ 4072 /* detach rule*/
4040 mlx4_cmd(dev, vhcr->out_param, 0, 0, 4073 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4041 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 4074 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
@@ -4073,7 +4106,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4073 4106
4074 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); 4107 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4075 if (err) { 4108 if (err) {
4076 mlx4_err(dev, "Fail to remove flow steering resources.\n "); 4109 mlx4_err(dev, "Fail to remove flow steering resources\n");
4077 goto out; 4110 goto out;
4078 } 4111 }
4079 4112
@@ -4151,7 +4184,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave,
4151 if (print) 4184 if (print)
4152 mlx4_dbg(dev, 4185 mlx4_dbg(dev,
4153 "%s id 0x%llx is busy\n", 4186 "%s id 0x%llx is busy\n",
4154 ResourceType(type), 4187 resource_str(type),
4155 r->res_id); 4188 r->res_id);
4156 ++busy; 4189 ++busy;
4157 } else { 4190 } else {
@@ -4202,8 +4235,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4202 4235
4203 err = move_all_busy(dev, slave, RES_QP); 4236 err = move_all_busy(dev, slave, RES_QP);
4204 if (err) 4237 if (err)
4205 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy" 4238 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4206 "for slave %d\n", slave); 4239 slave);
4207 4240
4208 spin_lock_irq(mlx4_tlock(dev)); 4241 spin_lock_irq(mlx4_tlock(dev));
4209 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 4242 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4241,10 +4274,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4241 MLX4_CMD_TIME_CLASS_A, 4274 MLX4_CMD_TIME_CLASS_A,
4242 MLX4_CMD_NATIVE); 4275 MLX4_CMD_NATIVE);
4243 if (err) 4276 if (err)
4244 mlx4_dbg(dev, "rem_slave_qps: failed" 4277 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4245 " to move slave %d qpn %d to" 4278 slave, qp->local_qpn);
4246 " reset\n", slave,
4247 qp->local_qpn);
4248 atomic_dec(&qp->rcq->ref_count); 4279 atomic_dec(&qp->rcq->ref_count);
4249 atomic_dec(&qp->scq->ref_count); 4280 atomic_dec(&qp->scq->ref_count);
4250 atomic_dec(&qp->mtt->ref_count); 4281 atomic_dec(&qp->mtt->ref_count);
@@ -4278,8 +4309,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4278 4309
4279 err = move_all_busy(dev, slave, RES_SRQ); 4310 err = move_all_busy(dev, slave, RES_SRQ);
4280 if (err) 4311 if (err)
4281 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to " 4312 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4282 "busy for slave %d\n", slave); 4313 slave);
4283 4314
4284 spin_lock_irq(mlx4_tlock(dev)); 4315 spin_lock_irq(mlx4_tlock(dev));
4285 list_for_each_entry_safe(srq, tmp, srq_list, com.list) { 4316 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
@@ -4309,9 +4340,7 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4309 MLX4_CMD_TIME_CLASS_A, 4340 MLX4_CMD_TIME_CLASS_A,
4310 MLX4_CMD_NATIVE); 4341 MLX4_CMD_NATIVE);
4311 if (err) 4342 if (err)
4312 mlx4_dbg(dev, "rem_slave_srqs: failed" 4343 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4313 " to move slave %d srq %d to"
4314 " SW ownership\n",
4315 slave, srqn); 4344 slave, srqn);
4316 4345
4317 atomic_dec(&srq->mtt->ref_count); 4346 atomic_dec(&srq->mtt->ref_count);
@@ -4346,8 +4375,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4346 4375
4347 err = move_all_busy(dev, slave, RES_CQ); 4376 err = move_all_busy(dev, slave, RES_CQ);
4348 if (err) 4377 if (err)
4349 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to " 4378 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4350 "busy for slave %d\n", slave); 4379 slave);
4351 4380
4352 spin_lock_irq(mlx4_tlock(dev)); 4381 spin_lock_irq(mlx4_tlock(dev));
4353 list_for_each_entry_safe(cq, tmp, cq_list, com.list) { 4382 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
@@ -4377,9 +4406,7 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4377 MLX4_CMD_TIME_CLASS_A, 4406 MLX4_CMD_TIME_CLASS_A,
4378 MLX4_CMD_NATIVE); 4407 MLX4_CMD_NATIVE);
4379 if (err) 4408 if (err)
4380 mlx4_dbg(dev, "rem_slave_cqs: failed" 4409 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4381 " to move slave %d cq %d to"
4382 " SW ownership\n",
4383 slave, cqn); 4410 slave, cqn);
4384 atomic_dec(&cq->mtt->ref_count); 4411 atomic_dec(&cq->mtt->ref_count);
4385 state = RES_CQ_ALLOCATED; 4412 state = RES_CQ_ALLOCATED;
@@ -4411,8 +4438,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4411 4438
4412 err = move_all_busy(dev, slave, RES_MPT); 4439 err = move_all_busy(dev, slave, RES_MPT);
4413 if (err) 4440 if (err)
4414 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to " 4441 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4415 "busy for slave %d\n", slave); 4442 slave);
4416 4443
4417 spin_lock_irq(mlx4_tlock(dev)); 4444 spin_lock_irq(mlx4_tlock(dev));
4418 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { 4445 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
@@ -4447,9 +4474,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4447 MLX4_CMD_TIME_CLASS_A, 4474 MLX4_CMD_TIME_CLASS_A,
4448 MLX4_CMD_NATIVE); 4475 MLX4_CMD_NATIVE);
4449 if (err) 4476 if (err)
4450 mlx4_dbg(dev, "rem_slave_mrs: failed" 4477 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4451 " to move slave %d mpt %d to"
4452 " SW ownership\n",
4453 slave, mptn); 4478 slave, mptn);
4454 if (mpt->mtt) 4479 if (mpt->mtt)
4455 atomic_dec(&mpt->mtt->ref_count); 4480 atomic_dec(&mpt->mtt->ref_count);
@@ -4481,8 +4506,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4481 4506
4482 err = move_all_busy(dev, slave, RES_MTT); 4507 err = move_all_busy(dev, slave, RES_MTT);
4483 if (err) 4508 if (err)
4484 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to " 4509 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4485 "busy for slave %d\n", slave); 4510 slave);
4486 4511
4487 spin_lock_irq(mlx4_tlock(dev)); 4512 spin_lock_irq(mlx4_tlock(dev));
4488 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { 4513 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
@@ -4584,8 +4609,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4584 4609
4585 err = move_all_busy(dev, slave, RES_EQ); 4610 err = move_all_busy(dev, slave, RES_EQ);
4586 if (err) 4611 if (err)
4587 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to " 4612 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4588 "busy for slave %d\n", slave); 4613 slave);
4589 4614
4590 spin_lock_irq(mlx4_tlock(dev)); 4615 spin_lock_irq(mlx4_tlock(dev));
4591 list_for_each_entry_safe(eq, tmp, eq_list, com.list) { 4616 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
@@ -4617,9 +4642,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4617 MLX4_CMD_TIME_CLASS_A, 4642 MLX4_CMD_TIME_CLASS_A,
4618 MLX4_CMD_NATIVE); 4643 MLX4_CMD_NATIVE);
4619 if (err) 4644 if (err)
4620 mlx4_dbg(dev, "rem_slave_eqs: failed" 4645 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4621 " to move slave %d eqs %d to" 4646 slave, eqn);
4622 " SW ownership\n", slave, eqn);
4623 mlx4_free_cmd_mailbox(dev, mailbox); 4647 mlx4_free_cmd_mailbox(dev, mailbox);
4624 atomic_dec(&eq->mtt->ref_count); 4648 atomic_dec(&eq->mtt->ref_count);
4625 state = RES_EQ_RESERVED; 4649 state = RES_EQ_RESERVED;
@@ -4648,8 +4672,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4648 4672
4649 err = move_all_busy(dev, slave, RES_COUNTER); 4673 err = move_all_busy(dev, slave, RES_COUNTER);
4650 if (err) 4674 if (err)
4651 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to " 4675 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4652 "busy for slave %d\n", slave); 4676 slave);
4653 4677
4654 spin_lock_irq(mlx4_tlock(dev)); 4678 spin_lock_irq(mlx4_tlock(dev));
4655 list_for_each_entry_safe(counter, tmp, counter_list, com.list) { 4679 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
@@ -4679,8 +4703,8 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4679 4703
4680 err = move_all_busy(dev, slave, RES_XRCD); 4704 err = move_all_busy(dev, slave, RES_XRCD);
4681 if (err) 4705 if (err)
4682 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to " 4706 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4683 "busy for slave %d\n", slave); 4707 slave);
4684 4708
4685 spin_lock_irq(mlx4_tlock(dev)); 4709 spin_lock_irq(mlx4_tlock(dev));
4686 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { 4710 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
@@ -4825,10 +4849,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4825 0, MLX4_CMD_UPDATE_QP, 4849 0, MLX4_CMD_UPDATE_QP,
4826 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 4850 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4827 if (err) { 4851 if (err) {
4828 mlx4_info(dev, "UPDATE_QP failed for slave %d, " 4852 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4829 "port %d, qpn %d (%d)\n", 4853 work->slave, port, qp->local_qpn, err);
4830 work->slave, port, qp->local_qpn,
4831 err);
4832 errors++; 4854 errors++;
4833 } 4855 }
4834 } 4856 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 405c4fbcd0ad..87d1b018a9c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -620,8 +620,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
620 mlx5_command_str(msg_to_opcode(ent->in)), 620 mlx5_command_str(msg_to_opcode(ent->in)),
621 msg_to_opcode(ent->in)); 621 msg_to_opcode(ent->in));
622 } 622 }
623 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, 623 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
624 deliv_status_to_str(ent->status), ent->status); 624 err, deliv_status_to_str(ent->status), ent->status);
625 625
626 return err; 626 return err;
627} 627}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 64a61b286b2c..7f39ebcd6ad0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -208,7 +208,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
208 */ 208 */
209 rmb(); 209 rmb();
210 210
211 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type)); 211 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
212 eq->eqn, eqe_type_str(eqe->type));
212 switch (eqe->type) { 213 switch (eqe->type) {
213 case MLX5_EVENT_TYPE_COMP: 214 case MLX5_EVENT_TYPE_COMP:
214 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; 215 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
@@ -270,14 +271,16 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); 271 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
271 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); 272 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
272 273
273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); 274 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
275 func_id, npages);
274 mlx5_core_req_pages_handler(dev, func_id, npages); 276 mlx5_core_req_pages_handler(dev, func_id, npages);
275 } 277 }
276 break; 278 break;
277 279
278 280
279 default: 281 default:
280 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); 282 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
283 eqe->type, eq->eqn);
281 break; 284 break;
282 } 285 }
283 286
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c3eee5f70051..ee24f132e319 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -66,10 +66,10 @@ static int set_dma_caps(struct pci_dev *pdev)
66 66
67 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 67 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
68 if (err) { 68 if (err) {
69 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 69 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
70 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 70 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
71 if (err) { 71 if (err) {
72 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 72 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
73 return err; 73 return err;
74 } 74 }
75 } 75 }
@@ -77,11 +77,11 @@ static int set_dma_caps(struct pci_dev *pdev)
77 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 77 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
78 if (err) { 78 if (err) {
79 dev_warn(&pdev->dev, 79 dev_warn(&pdev->dev,
80 "Warning: couldn't set 64-bit consistent PCI DMA mask.\n"); 80 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
81 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 81 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
82 if (err) { 82 if (err) {
83 dev_err(&pdev->dev, 83 dev_err(&pdev->dev,
84 "Can't set consistent PCI DMA mask, aborting.\n"); 84 "Can't set consistent PCI DMA mask, aborting\n");
85 return err; 85 return err;
86 } 86 }
87 } 87 }
@@ -95,7 +95,7 @@ static int request_bar(struct pci_dev *pdev)
95 int err = 0; 95 int err = 0;
96 96
97 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 97 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
98 dev_err(&pdev->dev, "Missing registers BAR, aborting.\n"); 98 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
99 return -ENODEV; 99 return -ENODEV;
100 } 100 }
101 101
@@ -319,13 +319,13 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
319 319
320 err = pci_enable_device(pdev); 320 err = pci_enable_device(pdev);
321 if (err) { 321 if (err) {
322 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); 322 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
323 goto err_dbg; 323 goto err_dbg;
324 } 324 }
325 325
326 err = request_bar(pdev); 326 err = request_bar(pdev);
327 if (err) { 327 if (err) {
328 dev_err(&pdev->dev, "error requesting BARs, aborting.\n"); 328 dev_err(&pdev->dev, "error requesting BARs, aborting\n");
329 goto err_disable; 329 goto err_disable;
330 } 330 }
331 331
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 68b74e1ae1b0..f0c9f9a7a361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -39,24 +39,26 @@
39 39
40extern int mlx5_core_debug_mask; 40extern int mlx5_core_debug_mask;
41 41
42#define mlx5_core_dbg(dev, format, arg...) \ 42#define mlx5_core_dbg(dev, format, ...) \
43pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ 43 pr_debug("%s:%s:%d:(pid %d): " format, \
44 current->pid, ##arg) 44 (dev)->priv.name, __func__, __LINE__, current->pid, \
45 ##__VA_ARGS__)
45 46
46#define mlx5_core_dbg_mask(dev, mask, format, arg...) \ 47#define mlx5_core_dbg_mask(dev, mask, format, ...) \
47do { \ 48do { \
48 if ((mask) & mlx5_core_debug_mask) \ 49 if ((mask) & mlx5_core_debug_mask) \
49 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, \ 50 mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
50 __func__, __LINE__, current->pid, ##arg); \
51} while (0) 51} while (0)
52 52
53#define mlx5_core_err(dev, format, arg...) \ 53#define mlx5_core_err(dev, format, ...) \
54pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ 54 pr_err("%s:%s:%d:(pid %d): " format, \
55 current->pid, ##arg) 55 (dev)->priv.name, __func__, __LINE__, current->pid, \
56 ##__VA_ARGS__)
56 57
57#define mlx5_core_warn(dev, format, arg...) \ 58#define mlx5_core_warn(dev, format, ...) \
58pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ 59 pr_warn("%s:%s:%d:(pid %d): " format, \
59 current->pid, ##arg) 60 (dev)->priv.name, __func__, __LINE__, current->pid, \
61 ##__VA_ARGS__)
60 62
61enum { 63enum {
62 MLX5_CMD_DATA, /* print command payload only */ 64 MLX5_CMD_DATA, /* print command payload only */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index ac52a0fe2d3a..ba0401d4af50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -73,7 +73,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
73 } 73 }
74 74
75 if (err) { 75 if (err) {
76 mlx5_core_dbg(dev, "cmd exec faile %d\n", err); 76 mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
77 return err; 77 return err;
78 } 78 }
79 79
@@ -195,7 +195,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
195 } 195 }
196 196
197 if (out.hdr.status) { 197 if (out.hdr.status) {
198 mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status); 198 mlx5_core_err(dev, "create_psv bad status %d\n",
199 out.hdr.status);
199 return mlx5_cmd_status_to_err(&out.hdr); 200 return mlx5_cmd_status_to_err(&out.hdr);
200 } 201 }
201 202
@@ -224,7 +225,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
224 } 225 }
225 226
226 if (out.hdr.status) { 227 if (out.hdr.status) {
227 mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status); 228 mlx5_core_err(dev, "destroy_psv bad status %d\n",
229 out.hdr.status);
228 err = mlx5_cmd_status_to_err(&out.hdr); 230 err = mlx5_cmd_status_to_err(&out.hdr);
229 goto out; 231 goto out;
230 } 232 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d59790a82bc3..c2a953ef0e67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -311,7 +311,8 @@ retry:
311 in->num_entries = cpu_to_be32(npages); 311 in->num_entries = cpu_to_be32(npages);
312 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 312 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
313 if (err) { 313 if (err) {
314 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); 314 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
315 func_id, npages, err);
315 goto out_alloc; 316 goto out_alloc;
316 } 317 }
317 dev->priv.fw_pages += npages; 318 dev->priv.fw_pages += npages;
@@ -319,7 +320,8 @@ retry:
319 if (out.hdr.status) { 320 if (out.hdr.status) {
320 err = mlx5_cmd_status_to_err(&out.hdr); 321 err = mlx5_cmd_status_to_err(&out.hdr);
321 if (err) { 322 if (err) {
322 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status); 323 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
324 func_id, npages, out.hdr.status);
323 goto out_alloc; 325 goto out_alloc;
324 } 326 }
325 } 327 }
@@ -378,7 +380,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
378 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); 380 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
379 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 381 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
380 if (err) { 382 if (err) {
381 mlx5_core_err(dev, "failed recliaming pages\n"); 383 mlx5_core_err(dev, "failed reclaiming pages\n");
382 goto out_free; 384 goto out_free;
383 } 385 }
384 dev->priv.fw_pages -= npages; 386 dev->priv.fw_pages -= npages;
@@ -414,8 +416,8 @@ static void pages_work_handler(struct work_struct *work)
414 err = give_pages(dev, req->func_id, req->npages, 1); 416 err = give_pages(dev, req->func_id, req->npages, 1);
415 417
416 if (err) 418 if (err)
417 mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ? 419 mlx5_core_warn(dev, "%s fail %d\n",
418 "reclaim" : "give", err); 420 req->npages < 0 ? "reclaim" : "give", err);
419 421
420 kfree(req); 422 kfree(req);
421} 423}
@@ -487,7 +489,8 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
487 optimal_reclaimed_pages(), 489 optimal_reclaimed_pages(),
488 &nclaimed); 490 &nclaimed);
489 if (err) { 491 if (err) {
490 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); 492 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
493 err);
491 return err; 494 return err;
492 } 495 }
493 if (nclaimed) 496 if (nclaimed)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 510576213dd0..8145b4668229 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -79,7 +79,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
79 79
80 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 80 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
81 if (err) { 81 if (err) {
82 mlx5_core_warn(dev, "ret %d", err); 82 mlx5_core_warn(dev, "ret %d\n", err);
83 return err; 83 return err;
84 } 84 }
85 85
@@ -96,7 +96,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
96 err = radix_tree_insert(&table->tree, qp->qpn, qp); 96 err = radix_tree_insert(&table->tree, qp->qpn, qp);
97 spin_unlock_irq(&table->lock); 97 spin_unlock_irq(&table->lock);
98 if (err) { 98 if (err) {
99 mlx5_core_warn(dev, "err %d", err); 99 mlx5_core_warn(dev, "err %d\n", err);
100 goto err_cmd; 100 goto err_cmd;
101 } 101 }
102 102
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 16435b3cfa9f..6c7c78baedca 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1504,15 +1504,15 @@ ks8695_probe(struct platform_device *pdev)
1504 if (ksp->phyiface_regs && ksp->link_irq == -1) { 1504 if (ksp->phyiface_regs && ksp->link_irq == -1) {
1505 ks8695_init_switch(ksp); 1505 ks8695_init_switch(ksp);
1506 ksp->dtype = KS8695_DTYPE_LAN; 1506 ksp->dtype = KS8695_DTYPE_LAN;
1507 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); 1507 ndev->ethtool_ops = &ks8695_ethtool_ops;
1508 } else if (ksp->phyiface_regs && ksp->link_irq != -1) { 1508 } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
1509 ks8695_init_wan_phy(ksp); 1509 ks8695_init_wan_phy(ksp);
1510 ksp->dtype = KS8695_DTYPE_WAN; 1510 ksp->dtype = KS8695_DTYPE_WAN;
1511 SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops); 1511 ndev->ethtool_ops = &ks8695_wan_ethtool_ops;
1512 } else { 1512 } else {
1513 /* No initialisation since HPNA does not have a PHY */ 1513 /* No initialisation since HPNA does not have a PHY */
1514 ksp->dtype = KS8695_DTYPE_HPNA; 1514 ksp->dtype = KS8695_DTYPE_HPNA;
1515 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); 1515 ndev->ethtool_ops = &ks8695_ethtool_ops;
1516 } 1516 }
1517 1517
1518 /* And bring up the net_device with the net core */ 1518 /* And bring up the net_device with the net core */
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index e0c92e0e5e1d..66d4ab703f45 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -26,6 +26,8 @@
26#include <linux/regulator/consumer.h> 26#include <linux/regulator/consumer.h>
27 27
28#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
29#include <linux/gpio.h>
30#include <linux/of_gpio.h>
29 31
30#include "ks8851.h" 32#include "ks8851.h"
31 33
@@ -85,6 +87,8 @@ union ks8851_tx_hdr {
85 * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom 87 * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
86 * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. 88 * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
87 * @vdd_reg: Optional regulator supplying the chip 89 * @vdd_reg: Optional regulator supplying the chip
90 * @vdd_io: Optional digital power supply for IO
91 * @gpio: Optional reset_n gpio
88 * 92 *
89 * The @lock ensures that the chip is protected when certain operations are 93 * The @lock ensures that the chip is protected when certain operations are
90 * in progress. When the read or write packet transfer is in progress, most 94 * in progress. When the read or write packet transfer is in progress, most
@@ -133,6 +137,8 @@ struct ks8851_net {
133 137
134 struct eeprom_93cx6 eeprom; 138 struct eeprom_93cx6 eeprom;
135 struct regulator *vdd_reg; 139 struct regulator *vdd_reg;
140 struct regulator *vdd_io;
141 int gpio;
136}; 142};
137 143
138static int msg_enable; 144static int msg_enable;
@@ -1404,6 +1410,7 @@ static int ks8851_probe(struct spi_device *spi)
1404 struct ks8851_net *ks; 1410 struct ks8851_net *ks;
1405 int ret; 1411 int ret;
1406 unsigned cider; 1412 unsigned cider;
1413 int gpio;
1407 1414
1408 ndev = alloc_etherdev(sizeof(struct ks8851_net)); 1415 ndev = alloc_etherdev(sizeof(struct ks8851_net));
1409 if (!ndev) 1416 if (!ndev)
@@ -1417,20 +1424,53 @@ static int ks8851_probe(struct spi_device *spi)
1417 ks->spidev = spi; 1424 ks->spidev = spi;
1418 ks->tx_space = 6144; 1425 ks->tx_space = 6144;
1419 1426
1420 ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd"); 1427 gpio = of_get_named_gpio_flags(spi->dev.of_node, "reset-gpios",
1421 if (IS_ERR(ks->vdd_reg)) { 1428 0, NULL);
1422 ret = PTR_ERR(ks->vdd_reg); 1429 if (gpio == -EPROBE_DEFER) {
1423 if (ret == -EPROBE_DEFER) 1430 ret = gpio;
1424 goto err_reg; 1431 goto err_gpio;
1425 } else { 1432 }
1426 ret = regulator_enable(ks->vdd_reg); 1433
1434 ks->gpio = gpio;
1435 if (gpio_is_valid(gpio)) {
1436 ret = devm_gpio_request_one(&spi->dev, gpio,
1437 GPIOF_OUT_INIT_LOW, "ks8851_rst_n");
1427 if (ret) { 1438 if (ret) {
1428 dev_err(&spi->dev, "regulator enable fail: %d\n", 1439 dev_err(&spi->dev, "reset gpio request failed\n");
1429 ret); 1440 goto err_gpio;
1430 goto err_reg_en;
1431 } 1441 }
1432 } 1442 }
1433 1443
1444 ks->vdd_io = devm_regulator_get(&spi->dev, "vdd-io");
1445 if (IS_ERR(ks->vdd_io)) {
1446 ret = PTR_ERR(ks->vdd_io);
1447 goto err_reg_io;
1448 }
1449
1450 ret = regulator_enable(ks->vdd_io);
1451 if (ret) {
1452 dev_err(&spi->dev, "regulator vdd_io enable fail: %d\n",
1453 ret);
1454 goto err_reg_io;
1455 }
1456
1457 ks->vdd_reg = devm_regulator_get(&spi->dev, "vdd");
1458 if (IS_ERR(ks->vdd_reg)) {
1459 ret = PTR_ERR(ks->vdd_reg);
1460 goto err_reg;
1461 }
1462
1463 ret = regulator_enable(ks->vdd_reg);
1464 if (ret) {
1465 dev_err(&spi->dev, "regulator vdd enable fail: %d\n",
1466 ret);
1467 goto err_reg;
1468 }
1469
1470 if (gpio_is_valid(gpio)) {
1471 usleep_range(10000, 11000);
1472 gpio_set_value(gpio, 1);
1473 }
1434 1474
1435 mutex_init(&ks->lock); 1475 mutex_init(&ks->lock);
1436 spin_lock_init(&ks->statelock); 1476 spin_lock_init(&ks->statelock);
@@ -1471,7 +1511,7 @@ static int ks8851_probe(struct spi_device *spi)
1471 1511
1472 skb_queue_head_init(&ks->txq); 1512 skb_queue_head_init(&ks->txq);
1473 1513
1474 SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops); 1514 ndev->ethtool_ops = &ks8851_ethtool_ops;
1475 SET_NETDEV_DEV(ndev, &spi->dev); 1515 SET_NETDEV_DEV(ndev, &spi->dev);
1476 1516
1477 spi_set_drvdata(spi, ks); 1517 spi_set_drvdata(spi, ks);
@@ -1527,13 +1567,14 @@ err_netdev:
1527 free_irq(ndev->irq, ks); 1567 free_irq(ndev->irq, ks);
1528 1568
1529err_irq: 1569err_irq:
1570 if (gpio_is_valid(gpio))
1571 gpio_set_value(gpio, 0);
1530err_id: 1572err_id:
1531 if (!IS_ERR(ks->vdd_reg)) 1573 regulator_disable(ks->vdd_reg);
1532 regulator_disable(ks->vdd_reg);
1533err_reg_en:
1534 if (!IS_ERR(ks->vdd_reg))
1535 regulator_put(ks->vdd_reg);
1536err_reg: 1574err_reg:
1575 regulator_disable(ks->vdd_io);
1576err_reg_io:
1577err_gpio:
1537 free_netdev(ndev); 1578 free_netdev(ndev);
1538 return ret; 1579 return ret;
1539} 1580}
@@ -1547,18 +1588,24 @@ static int ks8851_remove(struct spi_device *spi)
1547 1588
1548 unregister_netdev(priv->netdev); 1589 unregister_netdev(priv->netdev);
1549 free_irq(spi->irq, priv); 1590 free_irq(spi->irq, priv);
1550 if (!IS_ERR(priv->vdd_reg)) { 1591 if (gpio_is_valid(priv->gpio))
1551 regulator_disable(priv->vdd_reg); 1592 gpio_set_value(priv->gpio, 0);
1552 regulator_put(priv->vdd_reg); 1593 regulator_disable(priv->vdd_reg);
1553 } 1594 regulator_disable(priv->vdd_io);
1554 free_netdev(priv->netdev); 1595 free_netdev(priv->netdev);
1555 1596
1556 return 0; 1597 return 0;
1557} 1598}
1558 1599
1600static const struct of_device_id ks8851_match_table[] = {
1601 { .compatible = "micrel,ks8851" },
1602 { }
1603};
1604
1559static struct spi_driver ks8851_driver = { 1605static struct spi_driver ks8851_driver = {
1560 .driver = { 1606 .driver = {
1561 .name = "ks8851", 1607 .name = "ks8851",
1608 .of_match_table = ks8851_match_table,
1562 .owner = THIS_MODULE, 1609 .owner = THIS_MODULE,
1563 .pm = &ks8851_pm_ops, 1610 .pm = &ks8851_pm_ops,
1564 }, 1611 },
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 14ac0e2bc09f..064a48d0c368 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4930,7 +4930,7 @@ static void netdev_tx_timeout(struct net_device *dev)
4930 * Only reset the hardware if time between calls is long 4930 * Only reset the hardware if time between calls is long
4931 * enough. 4931 * enough.
4932 */ 4932 */
4933 if (jiffies - last_reset <= dev->watchdog_timeo) 4933 if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo))
4934 hw_priv = NULL; 4934 hw_priv = NULL;
4935 } 4935 }
4936 4936
@@ -7072,6 +7072,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
7072 dev = alloc_etherdev(sizeof(struct dev_priv)); 7072 dev = alloc_etherdev(sizeof(struct dev_priv));
7073 if (!dev) 7073 if (!dev)
7074 goto pcidev_init_reg_err; 7074 goto pcidev_init_reg_err;
7075 SET_NETDEV_DEV(dev, &pdev->dev);
7075 info->netdev[i] = dev; 7076 info->netdev[i] = dev;
7076 7077
7077 priv = netdev_priv(dev); 7078 priv = netdev_priv(dev);
@@ -7106,7 +7107,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
7106 } 7107 }
7107 7108
7108 dev->netdev_ops = &netdev_ops; 7109 dev->netdev_ops = &netdev_ops;
7109 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 7110 dev->ethtool_ops = &netdev_ethtool_ops;
7110 if (register_netdev(dev)) 7111 if (register_netdev(dev))
7111 goto pcidev_init_reg_err; 7112 goto pcidev_init_reg_err;
7112 port_set_power_saving(port, true); 7113 port_set_power_saving(port, true);
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index c7b40aa21f22..b1b5f66b8b69 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1593,7 +1593,7 @@ static int enc28j60_probe(struct spi_device *spi)
1593 dev->irq = spi->irq; 1593 dev->irq = spi->irq;
1594 dev->netdev_ops = &enc28j60_netdev_ops; 1594 dev->netdev_ops = &enc28j60_netdev_ops;
1595 dev->watchdog_timeo = TX_TIMEOUT; 1595 dev->watchdog_timeo = TX_TIMEOUT;
1596 SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops); 1596 dev->ethtool_ops = &enc28j60_ethtool_ops;
1597 1597
1598 enc28j60_lowpower(priv, true); 1598 enc28j60_lowpower(priv, true);
1599 1599
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 130f6b204efa..f3d5d79f1cd1 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4112,7 +4112,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4112 setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer, 4112 setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
4113 (unsigned long)mgp); 4113 (unsigned long)mgp);
4114 4114
4115 SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); 4115 netdev->ethtool_ops = &myri10ge_ethtool_ops;
4116 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog); 4116 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
4117 status = register_netdev(netdev); 4117 status = register_netdev(netdev);
4118 if (status != 0) { 4118 if (status != 0) {
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 64ec2a437f46..291fba8b9f07 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -927,7 +927,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
927 dev->netdev_ops = &natsemi_netdev_ops; 927 dev->netdev_ops = &natsemi_netdev_ops;
928 dev->watchdog_timeo = TX_TIMEOUT; 928 dev->watchdog_timeo = TX_TIMEOUT;
929 929
930 SET_ETHTOOL_OPS(dev, &ethtool_ops); 930 dev->ethtool_ops = &ethtool_ops;
931 931
932 if (mtu) 932 if (mtu)
933 dev->mtu = mtu; 933 dev->mtu = mtu;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index dbccf1de49ec..19bb8244b9e3 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -2030,7 +2030,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
2030 pci_dev->subsystem_vendor, pci_dev->subsystem_device); 2030 pci_dev->subsystem_vendor, pci_dev->subsystem_device);
2031 2031
2032 ndev->netdev_ops = &netdev_ops; 2032 ndev->netdev_ops = &netdev_ops;
2033 SET_ETHTOOL_OPS(ndev, &ops); 2033 ndev->ethtool_ops = &ops;
2034 ndev->watchdog_timeo = 5 * HZ; 2034 ndev->watchdog_timeo = 5 * HZ;
2035 pci_set_drvdata(pci_dev, ndev); 2035 pci_set_drvdata(pci_dev, ndev);
2036 2036
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index a2844ff322c4..be587647c706 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -534,15 +534,6 @@ static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
534 netif_tx_start_all_queues(sp->dev); 534 netif_tx_start_all_queues(sp->dev);
535} 535}
536 536
537static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
538{
539 if (!sp->config.multiq)
540 sp->mac_control.fifos[fifo_no].queue_state =
541 FIFO_QUEUE_START;
542
543 netif_tx_start_all_queues(sp->dev);
544}
545
546static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp) 537static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
547{ 538{
548 if (!sp->config.multiq) { 539 if (!sp->config.multiq) {
@@ -5369,8 +5360,8 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5369 ethtool_cmd_speed_set(info, SPEED_10000); 5360 ethtool_cmd_speed_set(info, SPEED_10000);
5370 info->duplex = DUPLEX_FULL; 5361 info->duplex = DUPLEX_FULL;
5371 } else { 5362 } else {
5372 ethtool_cmd_speed_set(info, -1); 5363 ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
5373 info->duplex = -1; 5364 info->duplex = DUPLEX_UNKNOWN;
5374 } 5365 }
5375 5366
5376 info->autoneg = AUTONEG_DISABLE; 5367 info->autoneg = AUTONEG_DISABLE;
@@ -7919,7 +7910,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7919 7910
7920 /* Driver entry points */ 7911 /* Driver entry points */
7921 dev->netdev_ops = &s2io_netdev_ops; 7912 dev->netdev_ops = &s2io_netdev_ops;
7922 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 7913 dev->ethtool_ops = &netdev_ethtool_ops;
7923 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | 7914 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7924 NETIF_F_TSO | NETIF_F_TSO6 | 7915 NETIF_F_TSO | NETIF_F_TSO6 |
7925 NETIF_F_RXCSUM | NETIF_F_LRO; 7916 NETIF_F_RXCSUM | NETIF_F_LRO;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 089b713b9f7b..2bbd01fcb9b0 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -120,7 +120,6 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
120{ 120{
121 u64 val64; 121 u64 val64;
122 u32 i = 0; 122 u32 i = 0;
123 enum vxge_hw_status ret = VXGE_HW_FAIL;
124 123
125 udelay(10); 124 udelay(10);
126 125
@@ -139,7 +138,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
139 mdelay(1); 138 mdelay(1);
140 } while (++i <= max_millis); 139 } while (++i <= max_millis);
141 140
142 return ret; 141 return VXGE_HW_FAIL;
143} 142}
144 143
145static inline enum vxge_hw_status 144static inline enum vxge_hw_status
@@ -1682,12 +1681,10 @@ enum vxge_hw_status vxge_hw_driver_stats_get(
1682 struct __vxge_hw_device *hldev, 1681 struct __vxge_hw_device *hldev,
1683 struct vxge_hw_device_stats_sw_info *sw_stats) 1682 struct vxge_hw_device_stats_sw_info *sw_stats)
1684{ 1683{
1685 enum vxge_hw_status status = VXGE_HW_OK;
1686
1687 memcpy(sw_stats, &hldev->stats.sw_dev_info_stats, 1684 memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
1688 sizeof(struct vxge_hw_device_stats_sw_info)); 1685 sizeof(struct vxge_hw_device_stats_sw_info));
1689 1686
1690 return status; 1687 return VXGE_HW_OK;
1691} 1688}
1692 1689
1693/* 1690/*
@@ -3228,7 +3225,6 @@ enum vxge_hw_status
3228vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask) 3225vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
3229{ 3226{
3230 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; 3227 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
3231 enum vxge_hw_status status = VXGE_HW_OK;
3232 int i = 0, j = 0; 3228 int i = 0, j = 0;
3233 3229
3234 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 3230 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
@@ -3241,7 +3237,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
3241 return VXGE_HW_FAIL; 3237 return VXGE_HW_FAIL;
3242 } 3238 }
3243 } 3239 }
3244 return status; 3240 return VXGE_HW_OK;
3245} 3241}
3246/* 3242/*
3247 * vxge_hw_mgmt_reg_Write - Write Titan register. 3243 * vxge_hw_mgmt_reg_Write - Write Titan register.
@@ -3979,7 +3975,6 @@ __vxge_hw_vpath_mgmt_read(
3979{ 3975{
3980 u32 i, mtu = 0, max_pyld = 0; 3976 u32 i, mtu = 0, max_pyld = 0;
3981 u64 val64; 3977 u64 val64;
3982 enum vxge_hw_status status = VXGE_HW_OK;
3983 3978
3984 for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { 3979 for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3985 3980
@@ -4009,7 +4004,7 @@ __vxge_hw_vpath_mgmt_read(
4009 else 4004 else
4010 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN); 4005 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
4011 4006
4012 return status; 4007 return VXGE_HW_OK;
4013} 4008}
4014 4009
4015/* 4010/*
@@ -4039,14 +4034,13 @@ static enum vxge_hw_status
4039__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) 4034__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4040{ 4035{
4041 u64 val64; 4036 u64 val64;
4042 enum vxge_hw_status status = VXGE_HW_OK;
4043 4037
4044 val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id)); 4038 val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
4045 4039
4046 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), 4040 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4047 &hldev->common_reg->cmn_rsthdlr_cfg0); 4041 &hldev->common_reg->cmn_rsthdlr_cfg0);
4048 4042
4049 return status; 4043 return VXGE_HW_OK;
4050} 4044}
4051 4045
4052/* 4046/*
@@ -4227,7 +4221,6 @@ static enum vxge_hw_status
4227__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) 4221__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4228{ 4222{
4229 u64 val64; 4223 u64 val64;
4230 enum vxge_hw_status status = VXGE_HW_OK;
4231 struct __vxge_hw_virtualpath *vpath; 4224 struct __vxge_hw_virtualpath *vpath;
4232 struct vxge_hw_vp_config *vp_config; 4225 struct vxge_hw_vp_config *vp_config;
4233 struct vxge_hw_vpath_reg __iomem *vp_reg; 4226 struct vxge_hw_vpath_reg __iomem *vp_reg;
@@ -4283,7 +4276,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4283 4276
4284 writeq(val64, &vp_reg->rxmac_vcfg1); 4277 writeq(val64, &vp_reg->rxmac_vcfg1);
4285 } 4278 }
4286 return status; 4279 return VXGE_HW_OK;
4287} 4280}
4288 4281
4289/* 4282/*
@@ -4295,7 +4288,6 @@ static enum vxge_hw_status
4295__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) 4288__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4296{ 4289{
4297 u64 val64; 4290 u64 val64;
4298 enum vxge_hw_status status = VXGE_HW_OK;
4299 struct __vxge_hw_virtualpath *vpath; 4291 struct __vxge_hw_virtualpath *vpath;
4300 struct vxge_hw_vpath_reg __iomem *vp_reg; 4292 struct vxge_hw_vpath_reg __iomem *vp_reg;
4301 struct vxge_hw_vp_config *config; 4293 struct vxge_hw_vp_config *config;
@@ -4545,7 +4537,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4545 val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3); 4537 val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
4546 writeq(val64, &vp_reg->tim_wrkld_clc); 4538 writeq(val64, &vp_reg->tim_wrkld_clc);
4547 4539
4548 return status; 4540 return VXGE_HW_OK;
4549} 4541}
4550 4542
4551/* 4543/*
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index f8f073880f84..b07d552a27d4 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -62,8 +62,8 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
62 ethtool_cmd_speed_set(info, SPEED_10000); 62 ethtool_cmd_speed_set(info, SPEED_10000);
63 info->duplex = DUPLEX_FULL; 63 info->duplex = DUPLEX_FULL;
64 } else { 64 } else {
65 ethtool_cmd_speed_set(info, -1); 65 ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
66 info->duplex = -1; 66 info->duplex = DUPLEX_UNKNOWN;
67 } 67 }
68 68
69 info->autoneg = AUTONEG_DISABLE; 69 info->autoneg = AUTONEG_DISABLE;
@@ -1128,5 +1128,5 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1128 1128
1129void vxge_initialize_ethtool_ops(struct net_device *ndev) 1129void vxge_initialize_ethtool_ops(struct net_device *ndev)
1130{ 1130{
1131 SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops); 1131 ndev->ethtool_ops = &vxge_ethtool_ops;
1132} 1132}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index d107bcbb8543..7a0deadd53bf 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2122,7 +2122,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2122static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo) 2122static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2123{ 2123{
2124 fifo->interrupt_count++; 2124 fifo->interrupt_count++;
2125 if (jiffies > fifo->jiffies + HZ / 100) { 2125 if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
2126 struct __vxge_hw_fifo *hw_fifo = fifo->handle; 2126 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2127 2127
2128 fifo->jiffies = jiffies; 2128 fifo->jiffies = jiffies;
@@ -2150,7 +2150,7 @@ static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2150static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring) 2150static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2151{ 2151{
2152 ring->interrupt_count++; 2152 ring->interrupt_count++;
2153 if (jiffies > ring->jiffies + HZ / 100) { 2153 if (time_before(ring->jiffies + HZ / 100, jiffies)) {
2154 struct __vxge_hw_ring *hw_ring = ring->handle; 2154 struct __vxge_hw_ring *hw_ring = ring->handle;
2155 2155
2156 ring->jiffies = jiffies; 2156 ring->jiffies = jiffies;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index fddb464aeab3..9afc536c5734 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -406,7 +406,7 @@ union ring_type {
406 406
407#define NV_RX_DESCRIPTORVALID (1<<16) 407#define NV_RX_DESCRIPTORVALID (1<<16)
408#define NV_RX_MISSEDFRAME (1<<17) 408#define NV_RX_MISSEDFRAME (1<<17)
409#define NV_RX_SUBSTRACT1 (1<<18) 409#define NV_RX_SUBTRACT1 (1<<18)
410#define NV_RX_ERROR1 (1<<23) 410#define NV_RX_ERROR1 (1<<23)
411#define NV_RX_ERROR2 (1<<24) 411#define NV_RX_ERROR2 (1<<24)
412#define NV_RX_ERROR3 (1<<25) 412#define NV_RX_ERROR3 (1<<25)
@@ -423,7 +423,7 @@ union ring_type {
423#define NV_RX2_CHECKSUM_IP_TCP (0x14000000) 423#define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
424#define NV_RX2_CHECKSUM_IP_UDP (0x18000000) 424#define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
425#define NV_RX2_DESCRIPTORVALID (1<<29) 425#define NV_RX2_DESCRIPTORVALID (1<<29)
426#define NV_RX2_SUBSTRACT1 (1<<25) 426#define NV_RX2_SUBTRACT1 (1<<25)
427#define NV_RX2_ERROR1 (1<<18) 427#define NV_RX2_ERROR1 (1<<18)
428#define NV_RX2_ERROR2 (1<<19) 428#define NV_RX2_ERROR2 (1<<19)
429#define NV_RX2_ERROR3 (1<<20) 429#define NV_RX2_ERROR3 (1<<20)
@@ -2832,7 +2832,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2832 } 2832 }
2833 /* framing errors are soft errors */ 2833 /* framing errors are soft errors */
2834 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2834 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2835 if (flags & NV_RX_SUBSTRACT1) 2835 if (flags & NV_RX_SUBTRACT1)
2836 len--; 2836 len--;
2837 } 2837 }
2838 /* the rest are hard errors */ 2838 /* the rest are hard errors */
@@ -2863,7 +2863,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2863 } 2863 }
2864 /* framing errors are soft errors */ 2864 /* framing errors are soft errors */
2865 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2865 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2866 if (flags & NV_RX2_SUBSTRACT1) 2866 if (flags & NV_RX2_SUBTRACT1)
2867 len--; 2867 len--;
2868 } 2868 }
2869 /* the rest are hard errors */ 2869 /* the rest are hard errors */
@@ -2937,7 +2937,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2937 } 2937 }
2938 /* framing errors are soft errors */ 2938 /* framing errors are soft errors */
2939 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2939 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2940 if (flags & NV_RX2_SUBSTRACT1) 2940 if (flags & NV_RX2_SUBTRACT1)
2941 len--; 2941 len--;
2942 } 2942 }
2943 /* the rest are hard errors */ 2943 /* the rest are hard errors */
@@ -4285,8 +4285,8 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4285 if (np->duplex) 4285 if (np->duplex)
4286 ecmd->duplex = DUPLEX_FULL; 4286 ecmd->duplex = DUPLEX_FULL;
4287 } else { 4287 } else {
4288 speed = -1; 4288 speed = SPEED_UNKNOWN;
4289 ecmd->duplex = -1; 4289 ecmd->duplex = DUPLEX_UNKNOWN;
4290 } 4290 }
4291 ethtool_cmd_speed_set(ecmd, speed); 4291 ethtool_cmd_speed_set(ecmd, speed);
4292 ecmd->autoneg = np->autoneg; 4292 ecmd->autoneg = np->autoneg;
@@ -5766,7 +5766,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5766 dev->netdev_ops = &nv_netdev_ops_optimized; 5766 dev->netdev_ops = &nv_netdev_ops_optimized;
5767 5767
5768 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5768 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5769 SET_ETHTOOL_OPS(dev, &ops); 5769 dev->ethtool_ops = &ops;
5770 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5770 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5771 5771
5772 pci_set_drvdata(pci_dev, dev); 5772 pci_set_drvdata(pci_dev, dev);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 422d9b51ac24..8706c0dbd0c3 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1361,7 +1361,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1361 __lpc_eth_clock_enable(pldat, true); 1361 __lpc_eth_clock_enable(pldat, true);
1362 1362
1363 /* Map IO space */ 1363 /* Map IO space */
1364 pldat->net_base = ioremap(res->start, res->end - res->start + 1); 1364 pldat->net_base = ioremap(res->start, resource_size(res));
1365 if (!pldat->net_base) { 1365 if (!pldat->net_base) {
1366 dev_err(&pdev->dev, "failed to map registers\n"); 1366 dev_err(&pdev->dev, "failed to map registers\n");
1367 ret = -ENOMEM; 1367 ret = -ENOMEM;
@@ -1417,10 +1417,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1417 } 1417 }
1418 pldat->dma_buff_base_p = dma_handle; 1418 pldat->dma_buff_base_p = dma_handle;
1419 1419
1420 netdev_dbg(ndev, "IO address start :0x%08x\n", 1420 netdev_dbg(ndev, "IO address space :%pR\n", res);
1421 res->start); 1421 netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
1422 netdev_dbg(ndev, "IO address size :%d\n",
1423 res->end - res->start + 1);
1424 netdev_dbg(ndev, "IO address (mapped) :0x%p\n", 1422 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
1425 pldat->net_base); 1423 pldat->net_base);
1426 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq); 1424 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index a588ffde9700..44c8be1c6805 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -4,7 +4,7 @@
4 4
5config PCH_GBE 5config PCH_GBE
6 tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" 6 tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
7 depends on PCI && (X86 || COMPILE_TEST) 7 depends on PCI && (X86_32 || COMPILE_TEST)
8 select MII 8 select MII
9 select PTP_1588_CLOCK_PCH 9 select PTP_1588_CLOCK_PCH
10 ---help--- 10 ---help---
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 826f0ccdc23c..4fe8ea96bd25 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -91,7 +91,7 @@ static int pch_gbe_get_settings(struct net_device *netdev,
91 ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half); 91 ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
92 92
93 if (!netif_carrier_ok(adapter->netdev)) 93 if (!netif_carrier_ok(adapter->netdev))
94 ethtool_cmd_speed_set(ecmd, -1); 94 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
95 return ret; 95 return ret;
96} 96}
97 97
@@ -508,5 +508,5 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
508 508
509void pch_gbe_set_ethtool_ops(struct net_device *netdev) 509void pch_gbe_set_ethtool_ops(struct net_device *netdev)
510{ 510{
511 SET_ETHTOOL_OPS(netdev, &pch_gbe_ethtool_ops); 511 netdev->ethtool_ops = &pch_gbe_ethtool_ops;
512} 512}
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index b6bdeb3c1971..9a997e4c3e08 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -724,10 +724,8 @@ static int hamachi_init_one(struct pci_dev *pdev,
724 724
725 /* The Hamachi-specific entries in the device structure. */ 725 /* The Hamachi-specific entries in the device structure. */
726 dev->netdev_ops = &hamachi_netdev_ops; 726 dev->netdev_ops = &hamachi_netdev_ops;
727 if (chip_tbl[hmp->chip_id].flags & CanHaveMII) 727 dev->ethtool_ops = (chip_tbl[hmp->chip_id].flags & CanHaveMII) ?
728 SET_ETHTOOL_OPS(dev, &ethtool_ops); 728 &ethtool_ops : &ethtool_ops_no_mii;
729 else
730 SET_ETHTOOL_OPS(dev, &ethtool_ops_no_mii);
731 dev->watchdog_timeo = TX_TIMEOUT; 729 dev->watchdog_timeo = TX_TIMEOUT;
732 if (mtu) 730 if (mtu)
733 dev->mtu = mtu; 731 dev->mtu = mtu;
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 9a6cb482dcd0..69a8dc095072 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -472,7 +472,7 @@ static int yellowfin_init_one(struct pci_dev *pdev,
472 472
473 /* The Yellowfin-specific entries in the device structure. */ 473 /* The Yellowfin-specific entries in the device structure. */
474 dev->netdev_ops = &netdev_ops; 474 dev->netdev_ops = &netdev_ops;
475 SET_ETHTOOL_OPS(dev, &ethtool_ops); 475 dev->ethtool_ops = &ethtool_ops;
476 dev->watchdog_timeo = TX_TIMEOUT; 476 dev->watchdog_timeo = TX_TIMEOUT;
477 477
478 if (mtu) 478 if (mtu)
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index c14bd3116e45..d49cba129081 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -66,6 +66,17 @@ config QLCNIC_VXLAN
66 Say Y here if you want to enable hardware offload support for 66 Say Y here if you want to enable hardware offload support for
67 Virtual eXtensible Local Area Network (VXLAN) in the driver. 67 Virtual eXtensible Local Area Network (VXLAN) in the driver.
68 68
69config QLCNIC_HWMON
70 bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
71 depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
72 default y
73 ---help---
74 This configuration parameter can be used to read the
75 board temperature in Converged Ethernet devices
76 supported by qlcnic.
77
78 This data is available via the hwmon sysfs interface.
79
69config QLGE 80config QLGE
70 tristate "QLogic QLGE 10Gb Ethernet Driver Support" 81 tristate "QLogic QLGE 10Gb Ethernet Driver Support"
71 depends on PCI 82 depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index f09c35d669b3..5bf05818a12c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1373,7 +1373,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
1373 1373
1374 netxen_nic_change_mtu(netdev, netdev->mtu); 1374 netxen_nic_change_mtu(netdev, netdev->mtu);
1375 1375
1376 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); 1376 netdev->ethtool_ops = &netxen_nic_ethtool_ops;
1377 1377
1378 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 1378 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1379 NETIF_F_RXCSUM; 1379 NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 2eabd44f8914..b5d6bc1a8b00 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3838,7 +3838,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
3838 3838
3839 /* Set driver entry points */ 3839 /* Set driver entry points */
3840 ndev->netdev_ops = &ql3xxx_netdev_ops; 3840 ndev->netdev_ops = &ql3xxx_netdev_ops;
3841 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 3841 ndev->ethtool_ops = &ql3xxx_ethtool_ops;
3842 ndev->watchdog_timeo = 5 * HZ; 3842 ndev->watchdog_timeo = 5 * HZ;
3843 3843
3844 netif_napi_add(ndev, &qdev->napi, ql_poll, 64); 3844 netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f785d01c7d12..be618b9e874f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -39,8 +39,8 @@
39 39
40#define _QLCNIC_LINUX_MAJOR 5 40#define _QLCNIC_LINUX_MAJOR 5
41#define _QLCNIC_LINUX_MINOR 3 41#define _QLCNIC_LINUX_MINOR 3
42#define _QLCNIC_LINUX_SUBVERSION 57 42#define _QLCNIC_LINUX_SUBVERSION 60
43#define QLCNIC_LINUX_VERSIONID "5.3.57" 43#define QLCNIC_LINUX_VERSIONID "5.3.60"
44#define QLCNIC_DRV_IDC_VER 0x01 44#define QLCNIC_DRV_IDC_VER 0x01
45#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 45#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
46 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 46 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -441,6 +441,8 @@ struct qlcnic_82xx_dump_template_hdr {
441 u32 rsvd1[0]; 441 u32 rsvd1[0];
442}; 442};
443 443
444#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
445
444struct qlcnic_fw_dump { 446struct qlcnic_fw_dump {
445 u8 clr; /* flag to indicate if dump is cleared */ 447 u8 clr; /* flag to indicate if dump is cleared */
446 bool enable; /* enable/disable dump */ 448 bool enable; /* enable/disable dump */
@@ -537,6 +539,7 @@ struct qlcnic_hardware_context {
537 u8 phys_port_id[ETH_ALEN]; 539 u8 phys_port_id[ETH_ALEN];
538 u8 lb_mode; 540 u8 lb_mode;
539 u16 vxlan_port; 541 u16 vxlan_port;
542 struct device *hwmon_dev;
540}; 543};
541 544
542struct qlcnic_adapter_stats { 545struct qlcnic_adapter_stats {
@@ -1018,6 +1021,8 @@ struct qlcnic_ipaddr {
1018#define QLCNIC_DEL_VXLAN_PORT 0x200000 1021#define QLCNIC_DEL_VXLAN_PORT 0x200000
1019#endif 1022#endif
1020 1023
1024#define QLCNIC_VLAN_FILTERING 0x800000
1025
1021#define QLCNIC_IS_MSI_FAMILY(adapter) \ 1026#define QLCNIC_IS_MSI_FAMILY(adapter) \
1022 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 1027 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
1023#define QLCNIC_IS_TSO_CAPABLE(adapter) \ 1028#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -1316,6 +1321,7 @@ struct qlcnic_eswitch {
1316#define QL_STATUS_INVALID_PARAM -1 1321#define QL_STATUS_INVALID_PARAM -1
1317 1322
1318#define MAX_BW 100 /* % of link speed */ 1323#define MAX_BW 100 /* % of link speed */
1324#define MIN_BW 1 /* % of link speed */
1319#define MAX_VLAN_ID 4095 1325#define MAX_VLAN_ID 4095
1320#define MIN_VLAN_ID 2 1326#define MIN_VLAN_ID 2
1321#define DEFAULT_MAC_LEARN 1 1327#define DEFAULT_MAC_LEARN 1
@@ -1692,7 +1698,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *);
1692int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); 1698int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
1693void qlcnic_set_netdev_features(struct qlcnic_adapter *, 1699void qlcnic_set_netdev_features(struct qlcnic_adapter *,
1694 struct qlcnic_esw_func_cfg *); 1700 struct qlcnic_esw_func_cfg *);
1695void qlcnic_sriov_vf_schedule_multi(struct net_device *); 1701void qlcnic_sriov_vf_set_multi(struct net_device *);
1696int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8); 1702int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
1697int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *, 1703int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
1698 u16 *); 1704 u16 *);
@@ -2338,6 +2344,16 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
2338 return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false; 2344 return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
2339} 2345}
2340 2346
2347static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
2348{
2349 bool status;
2350
2351 status = (qlcnic_sriov_pf_check(adapter) ||
2352 qlcnic_sriov_vf_check(adapter)) ? true : false;
2353
2354 return status;
2355}
2356
2341static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter) 2357static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
2342{ 2358{
2343 if (qlcnic_84xx_check(adapter)) 2359 if (qlcnic_84xx_check(adapter))
@@ -2345,4 +2361,18 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
2345 else 2361 else
2346 return QLC_DEFAULT_VNIC_COUNT; 2362 return QLC_DEFAULT_VNIC_COUNT;
2347} 2363}
2364
2365#ifdef CONFIG_QLCNIC_HWMON
2366void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
2367void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
2368#else
2369static inline void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
2370{
2371 return;
2372}
2373static inline void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
2374{
2375 return;
2376}
2377#endif
2348#endif /* __QLCNIC_H_ */ 2378#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b7cffb46a75d..a4a4ec0b68f8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -33,6 +33,7 @@ static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
33#define RSS_HASHTYPE_IP_TCP 0x3 33#define RSS_HASHTYPE_IP_TCP 0x3
34#define QLC_83XX_FW_MBX_CMD 0 34#define QLC_83XX_FW_MBX_CMD 0
35#define QLC_SKIP_INACTIVE_PCI_REGS 7 35#define QLC_SKIP_INACTIVE_PCI_REGS 7
36#define QLC_MAX_LEGACY_FUNC_SUPP 8
36 37
37static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { 38static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
38 {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1}, 39 {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -357,8 +358,15 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
357 if (!ahw->intr_tbl) 358 if (!ahw->intr_tbl)
358 return -ENOMEM; 359 return -ENOMEM;
359 360
360 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) 361 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
362 if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) {
363 dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n",
364 ahw->pci_func);
365 return -EOPNOTSUPP;
366 }
367
361 qlcnic_83xx_enable_legacy(adapter); 368 qlcnic_83xx_enable_legacy(adapter);
369 }
362 370
363 for (i = 0; i < num_msix; i++) { 371 for (i = 0; i < num_msix; i++) {
364 if (adapter->flags & QLCNIC_MSIX_ENABLED) 372 if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -879,6 +887,9 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
879 return 0; 887 return 0;
880 } 888 }
881 } 889 }
890
891 dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n",
892 __func__, type);
882 return -EINVAL; 893 return -EINVAL;
883} 894}
884 895
@@ -3026,19 +3037,18 @@ void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
3026 QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK); 3037 QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
3027} 3038}
3028 3039
3029int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr, 3040int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
3030 u32 *data, u32 count) 3041 u32 *data, u32 count)
3031{ 3042{
3032 int i, j, ret = 0; 3043 int i, j, ret = 0;
3033 u32 temp; 3044 u32 temp;
3034 int err = 0;
3035 3045
3036 /* Check alignment */ 3046 /* Check alignment */
3037 if (addr & 0xF) 3047 if (addr & 0xF)
3038 return -EIO; 3048 return -EIO;
3039 3049
3040 mutex_lock(&adapter->ahw->mem_lock); 3050 mutex_lock(&adapter->ahw->mem_lock);
3041 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_HI, 0); 3051 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
3042 3052
3043 for (i = 0; i < count; i++, addr += 16) { 3053 for (i = 0; i < count; i++, addr += 16) {
3044 if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET, 3054 if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
@@ -3049,26 +3059,16 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
3049 return -EIO; 3059 return -EIO;
3050 } 3060 }
3051 3061
3052 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_LO, addr); 3062 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
3053 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_LO, 3063 qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++);
3054 *data++); 3064 qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++);
3055 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_HI, 3065 qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++);
3056 *data++); 3066 qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++);
3057 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_ULO, 3067 qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE);
3058 *data++); 3068 qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START);
3059 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_UHI,
3060 *data++);
3061 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
3062 QLCNIC_TA_WRITE_ENABLE);
3063 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
3064 QLCNIC_TA_WRITE_START);
3065 3069
3066 for (j = 0; j < MAX_CTL_CHECK; j++) { 3070 for (j = 0; j < MAX_CTL_CHECK; j++) {
3067 temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err); 3071 temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
3068 if (err == -EIO) {
3069 mutex_unlock(&adapter->ahw->mem_lock);
3070 return err;
3071 }
3072 3072
3073 if ((temp & TA_CTL_BUSY) == 0) 3073 if ((temp & TA_CTL_BUSY) == 0)
3074 break; 3074 break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 88d809c35633..2bf101a47d02 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -418,7 +418,6 @@ enum qlcnic_83xx_states {
418#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000) 418#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
419#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20) 419#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
420#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40) 420#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
421#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
422#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400) 421#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
423#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000) 422#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
424#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000) 423#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
@@ -560,7 +559,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
560void qlcnic_83xx_napi_enable(struct qlcnic_adapter *); 559void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
561void qlcnic_83xx_napi_disable(struct qlcnic_adapter *); 560void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
562int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32); 561int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
563void qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32); 562int qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
564int qlcnic_ind_rd(struct qlcnic_adapter *, u32); 563int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
565int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *); 564int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
566int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *, 565int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
@@ -617,7 +616,6 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
617int qlcnic_83xx_lock_driver(struct qlcnic_adapter *); 616int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
618void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *); 617void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
619int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *); 618int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
620int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
621int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *); 619int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
622int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int); 620int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
623int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *); 621int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
@@ -659,4 +657,5 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
659u32 qlcnic_83xx_get_cap_size(void *, int); 657u32 qlcnic_83xx_get_cap_size(void *, int);
660void qlcnic_83xx_set_sys_info(void *, int, u32); 658void qlcnic_83xx_set_sys_info(void *, int, u32);
661void qlcnic_83xx_store_cap_mask(void *, u32); 659void qlcnic_83xx_store_cap_mask(void *, u32);
660int qlcnic_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
662#endif 661#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index ba20c721ee97..f33559b72528 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1363,8 +1363,8 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
1363 return ret; 1363 return ret;
1364 } 1364 }
1365 /* 16 byte write to MS memory */ 1365 /* 16 byte write to MS memory */
1366 ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, 1366 ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache,
1367 size / 16); 1367 size / 16);
1368 if (ret) { 1368 if (ret) {
1369 vfree(p_cache); 1369 vfree(p_cache);
1370 return ret; 1370 return ret;
@@ -1389,8 +1389,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1389 p_cache = (u32 *)fw->data; 1389 p_cache = (u32 *)fw->data;
1390 addr = (u64)dest; 1390 addr = (u64)dest;
1391 1391
1392 ret = qlcnic_83xx_ms_mem_write128(adapter, addr, 1392 ret = qlcnic_ms_mem_write128(adapter, addr,
1393 p_cache, size / 16); 1393 p_cache, size / 16);
1394 if (ret) { 1394 if (ret) {
1395 dev_err(&adapter->pdev->dev, "MS memory write failed\n"); 1395 dev_err(&adapter->pdev->dev, "MS memory write failed\n");
1396 release_firmware(fw); 1396 release_firmware(fw);
@@ -1405,8 +1405,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1405 data[i] = fw->data[size + i]; 1405 data[i] = fw->data[size + i];
1406 for (; i < 16; i++) 1406 for (; i < 16; i++)
1407 data[i] = 0; 1407 data[i] = 0;
1408 ret = qlcnic_83xx_ms_mem_write128(adapter, addr, 1408 ret = qlcnic_ms_mem_write128(adapter, addr,
1409 (u32 *)data, 1); 1409 (u32 *)data, 1);
1410 if (ret) { 1410 if (ret) {
1411 dev_err(&adapter->pdev->dev, 1411 dev_err(&adapter->pdev->dev,
1412 "MS memory write failed\n"); 1412 "MS memory write failed\n");
@@ -2181,6 +2181,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2181 max_sds_rings = QLCNIC_MAX_SDS_RINGS; 2181 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
2182 max_tx_rings = QLCNIC_MAX_TX_RINGS; 2182 max_tx_rings = QLCNIC_MAX_TX_RINGS;
2183 } else { 2183 } else {
2184 dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n",
2185 __func__, ret);
2184 return -EIO; 2186 return -EIO;
2185 } 2187 }
2186 2188
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index c1e11f5715b0..304e247bdf33 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -1027,8 +1027,11 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
1027 u32 arg1; 1027 u32 arg1;
1028 1028
1029 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC || 1029 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
1030 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) 1030 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
1031 dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
1032 __func__);
1031 return err; 1033 return err;
1034 }
1032 1035
1033 arg1 = id | (enable_mirroring ? BIT_4 : 0); 1036 arg1 = id | (enable_mirroring ? BIT_4 : 0);
1034 arg1 |= pci_func << 8; 1037 arg1 |= pci_func << 8;
@@ -1318,8 +1321,12 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1318 u32 arg1, arg2 = 0; 1321 u32 arg1, arg2 = 0;
1319 u8 pci_func; 1322 u8 pci_func;
1320 1323
1321 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1324 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
1325 dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
1326 __func__);
1322 return err; 1327 return err;
1328 }
1329
1323 pci_func = esw_cfg->pci_func; 1330 pci_func = esw_cfg->pci_func;
1324 index = qlcnic_is_valid_nic_func(adapter, pci_func); 1331 index = qlcnic_is_valid_nic_func(adapter, pci_func);
1325 if (index < 0) 1332 if (index < 0)
@@ -1363,6 +1370,8 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1363 arg1 &= ~(0x0ffff << 16); 1370 arg1 &= ~(0x0ffff << 16);
1364 break; 1371 break;
1365 default: 1372 default:
1373 dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
1374 __func__, esw_cfg->op_mode);
1366 return err; 1375 return err;
1367 } 1376 }
1368 1377
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 5bacf5210aed..1b7f3dbae289 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -726,6 +726,11 @@ static int qlcnic_set_channels(struct net_device *dev,
726 struct qlcnic_adapter *adapter = netdev_priv(dev); 726 struct qlcnic_adapter *adapter = netdev_priv(dev);
727 int err; 727 int err;
728 728
729 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
730 netdev_err(dev, "No RSS/TSS support in non MSI-X mode\n");
731 return -EINVAL;
732 }
733
729 if (channel->other_count || channel->combined_count) 734 if (channel->other_count || channel->combined_count)
730 return -EINVAL; 735 return -EINVAL;
731 736
@@ -734,7 +739,7 @@ static int qlcnic_set_channels(struct net_device *dev,
734 if (err) 739 if (err)
735 return err; 740 return err;
736 741
737 if (channel->rx_count) { 742 if (adapter->drv_sds_rings != channel->rx_count) {
738 err = qlcnic_validate_rings(adapter, channel->rx_count, 743 err = qlcnic_validate_rings(adapter, channel->rx_count,
739 QLCNIC_RX_QUEUE); 744 QLCNIC_RX_QUEUE);
740 if (err) { 745 if (err) {
@@ -745,7 +750,7 @@ static int qlcnic_set_channels(struct net_device *dev,
745 adapter->drv_rss_rings = channel->rx_count; 750 adapter->drv_rss_rings = channel->rx_count;
746 } 751 }
747 752
748 if (channel->tx_count) { 753 if (adapter->drv_tx_rings != channel->tx_count) {
749 err = qlcnic_validate_rings(adapter, channel->tx_count, 754 err = qlcnic_validate_rings(adapter, channel->tx_count,
750 QLCNIC_TX_QUEUE); 755 QLCNIC_TX_QUEUE);
751 if (err) { 756 if (err) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 9f3adf4e70b5..851cb4a80d50 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -373,12 +373,16 @@ int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
373 return data; 373 return data;
374} 374}
375 375
376void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data) 376int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
377{ 377{
378 int ret = 0;
379
378 if (qlcnic_82xx_check(adapter)) 380 if (qlcnic_82xx_check(adapter))
379 qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data); 381 qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
380 else 382 else
381 qlcnic_83xx_wrt_reg_indirect(adapter, addr, data); 383 ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
384
385 return ret;
382} 386}
383 387
384static int 388static int
@@ -567,28 +571,14 @@ static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
567void qlcnic_set_multi(struct net_device *netdev) 571void qlcnic_set_multi(struct net_device *netdev)
568{ 572{
569 struct qlcnic_adapter *adapter = netdev_priv(netdev); 573 struct qlcnic_adapter *adapter = netdev_priv(netdev);
570 struct qlcnic_mac_vlan_list *cur;
571 struct netdev_hw_addr *ha;
572 size_t temp;
573 574
574 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 575 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
575 return; 576 return;
576 if (qlcnic_sriov_vf_check(adapter)) { 577
577 if (!netdev_mc_empty(netdev)) { 578 if (qlcnic_sriov_vf_check(adapter))
578 netdev_for_each_mc_addr(ha, netdev) { 579 qlcnic_sriov_vf_set_multi(netdev);
579 temp = sizeof(struct qlcnic_mac_vlan_list); 580 else
580 cur = kzalloc(temp, GFP_ATOMIC); 581 __qlcnic_set_multi(netdev, 0);
581 if (cur == NULL)
582 break;
583 memcpy(cur->mac_addr,
584 ha->addr, ETH_ALEN);
585 list_add_tail(&cur->list, &adapter->vf_mc_list);
586 }
587 }
588 qlcnic_sriov_vf_schedule_multi(adapter->netdev);
589 return;
590 }
591 __qlcnic_set_multi(netdev, 0);
592} 582}
593 583
594int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) 584int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
@@ -630,7 +620,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
630 struct hlist_node *n; 620 struct hlist_node *n;
631 struct hlist_head *head; 621 struct hlist_head *head;
632 int i; 622 int i;
633 unsigned long time; 623 unsigned long expires;
634 u8 cmd; 624 u8 cmd;
635 625
636 for (i = 0; i < adapter->fhash.fbucket_size; i++) { 626 for (i = 0; i < adapter->fhash.fbucket_size; i++) {
@@ -638,8 +628,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
638 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { 628 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
639 cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : 629 cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
640 QLCNIC_MAC_DEL; 630 QLCNIC_MAC_DEL;
641 time = tmp_fil->ftime; 631 expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
642 if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) { 632 if (time_before(expires, jiffies)) {
643 qlcnic_sre_macaddr_change(adapter, 633 qlcnic_sre_macaddr_change(adapter,
644 tmp_fil->faddr, 634 tmp_fil->faddr,
645 tmp_fil->vlan_id, 635 tmp_fil->vlan_id,
@@ -657,8 +647,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
657 647
658 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) 648 hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
659 { 649 {
660 time = tmp_fil->ftime; 650 expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
661 if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) { 651 if (time_before(expires, jiffies)) {
662 spin_lock_bh(&adapter->rx_mac_learn_lock); 652 spin_lock_bh(&adapter->rx_mac_learn_lock);
663 adapter->rx_fhash.fnum--; 653 adapter->rx_fhash.fnum--;
664 hlist_del(&tmp_fil->fnode); 654 hlist_del(&tmp_fil->fnode);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 173b3d12991f..e45bf09af0c9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -305,7 +305,6 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
305{ 305{
306 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data); 306 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
307 struct ethhdr *phdr = (struct ethhdr *)(skb->data); 307 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
308 struct net_device *netdev = adapter->netdev;
309 u16 protocol = ntohs(skb->protocol); 308 u16 protocol = ntohs(skb->protocol);
310 struct qlcnic_filter *fil, *tmp_fil; 309 struct qlcnic_filter *fil, *tmp_fil;
311 struct hlist_head *head; 310 struct hlist_head *head;
@@ -314,27 +313,16 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
314 u16 vlan_id = 0; 313 u16 vlan_id = 0;
315 u8 hindex, hval; 314 u8 hindex, hval;
316 315
317 if (!qlcnic_sriov_pf_check(adapter)) { 316 if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
318 if (ether_addr_equal(phdr->h_source, adapter->mac_addr)) 317 return;
319 return; 318
320 } else { 319 if (adapter->flags & QLCNIC_VLAN_FILTERING) {
321 if (protocol == ETH_P_8021Q) { 320 if (protocol == ETH_P_8021Q) {
322 vh = (struct vlan_ethhdr *)skb->data; 321 vh = (struct vlan_ethhdr *)skb->data;
323 vlan_id = ntohs(vh->h_vlan_TCI); 322 vlan_id = ntohs(vh->h_vlan_TCI);
324 } else if (vlan_tx_tag_present(skb)) { 323 } else if (vlan_tx_tag_present(skb)) {
325 vlan_id = vlan_tx_tag_get(skb); 324 vlan_id = vlan_tx_tag_get(skb);
326 } 325 }
327
328 if (ether_addr_equal(phdr->h_source, adapter->mac_addr) &&
329 !vlan_id)
330 return;
331 }
332
333 if (adapter->fhash.fnum >= adapter->fhash.fmax) {
334 adapter->stats.mac_filter_limit_overrun++;
335 netdev_info(netdev, "Can not add more than %d mac-vlan filters, configured %d\n",
336 adapter->fhash.fmax, adapter->fhash.fnum);
337 return;
338 } 326 }
339 327
340 memcpy(&src_addr, phdr->h_source, ETH_ALEN); 328 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
@@ -353,6 +341,11 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
353 } 341 }
354 } 342 }
355 343
344 if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
345 adapter->stats.mac_filter_limit_overrun++;
346 return;
347 }
348
356 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); 349 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
357 if (!fil) 350 if (!fil)
358 return; 351 return;
@@ -1216,8 +1209,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1216 if (!skb) 1209 if (!skb)
1217 return buffer; 1210 return buffer;
1218 1211
1219 if (adapter->drv_mac_learn && 1212 if (adapter->rx_mac_learn) {
1220 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1221 t_vid = 0; 1213 t_vid = 0;
1222 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0); 1214 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1223 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid); 1215 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
@@ -1293,8 +1285,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1293 if (!skb) 1285 if (!skb)
1294 return buffer; 1286 return buffer;
1295 1287
1296 if (adapter->drv_mac_learn && 1288 if (adapter->rx_mac_learn) {
1297 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1298 t_vid = 0; 1289 t_vid = 0;
1299 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0); 1290 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1300 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid); 1291 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 7e55e88a81bf..4fc186713b66 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -378,7 +378,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
378 if (!adapter->fdb_mac_learn) 378 if (!adapter->fdb_mac_learn)
379 return ndo_dflt_fdb_del(ndm, tb, netdev, addr); 379 return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
380 380
381 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { 381 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
382 qlcnic_sriov_check(adapter)) {
382 if (is_unicast_ether_addr(addr)) { 383 if (is_unicast_ether_addr(addr)) {
383 err = dev_uc_del(netdev, addr); 384 err = dev_uc_del(netdev, addr);
384 if (!err) 385 if (!err)
@@ -402,7 +403,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
402 if (!adapter->fdb_mac_learn) 403 if (!adapter->fdb_mac_learn)
403 return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags); 404 return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
404 405
405 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) { 406 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
407 !qlcnic_sriov_check(adapter)) {
406 pr_info("%s: FDB e-switch is not enabled\n", __func__); 408 pr_info("%s: FDB e-switch is not enabled\n", __func__);
407 return -EOPNOTSUPP; 409 return -EOPNOTSUPP;
408 } 410 }
@@ -432,7 +434,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
432 if (!adapter->fdb_mac_learn) 434 if (!adapter->fdb_mac_learn)
433 return ndo_dflt_fdb_dump(skb, ncb, netdev, idx); 435 return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
434 436
435 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) 437 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
438 qlcnic_sriov_check(adapter))
436 idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx); 439 idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
437 440
438 return idx; 441 return idx;
@@ -522,7 +525,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
522#endif 525#endif
523#ifdef CONFIG_QLCNIC_SRIOV 526#ifdef CONFIG_QLCNIC_SRIOV
524 .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac, 527 .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
525 .ndo_set_vf_tx_rate = qlcnic_sriov_set_vf_tx_rate, 528 .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
526 .ndo_get_vf_config = qlcnic_sriov_get_vf_config, 529 .ndo_get_vf_config = qlcnic_sriov_get_vf_config,
527 .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan, 530 .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan,
528 .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk, 531 .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk,
@@ -690,10 +693,10 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
690 adapter->msix_entries[vector].entry = vector; 693 adapter->msix_entries[vector].entry = vector;
691 694
692restore: 695restore:
693 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); 696 err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix);
694 if (err > 0) { 697 if (err == -ENOSPC) {
695 if (!adapter->drv_tss_rings && !adapter->drv_rss_rings) 698 if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
696 return -ENOSPC; 699 return err;
697 700
698 netdev_info(adapter->netdev, 701 netdev_info(adapter->netdev,
699 "Unable to allocate %d MSI-X vectors, Available vectors %d\n", 702 "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
@@ -1014,6 +1017,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
1014 1017
1015 if (pfn >= ahw->max_vnic_func) { 1018 if (pfn >= ahw->max_vnic_func) {
1016 ret = QL_STATUS_INVALID_PARAM; 1019 ret = QL_STATUS_INVALID_PARAM;
1020 dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
1021 __func__, pfn, ahw->max_vnic_func);
1017 goto err_eswitch; 1022 goto err_eswitch;
1018 } 1023 }
1019 1024
@@ -1915,8 +1920,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1915 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state)) 1920 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1916 return; 1921 return;
1917 1922
1918 if (qlcnic_sriov_vf_check(adapter))
1919 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1920 smp_mb(); 1923 smp_mb();
1921 netif_carrier_off(netdev); 1924 netif_carrier_off(netdev);
1922 adapter->ahw->linkup = 0; 1925 adapter->ahw->linkup = 0;
@@ -1928,6 +1931,8 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1928 qlcnic_delete_lb_filters(adapter); 1931 qlcnic_delete_lb_filters(adapter);
1929 1932
1930 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE); 1933 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1934 if (qlcnic_sriov_vf_check(adapter))
1935 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1931 1936
1932 qlcnic_napi_disable(adapter); 1937 qlcnic_napi_disable(adapter);
1933 1938
@@ -2052,6 +2057,7 @@ out:
2052 2057
2053static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter) 2058static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
2054{ 2059{
2060 struct qlcnic_hardware_context *ahw = adapter->ahw;
2055 int err = 0; 2061 int err = 0;
2056 2062
2057 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context), 2063 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
@@ -2061,6 +2067,18 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
2061 goto err_out; 2067 goto err_out;
2062 } 2068 }
2063 2069
2070 if (qlcnic_83xx_check(adapter)) {
2071 ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
2072 ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
2073 ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
2074 ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
2075 ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
2076 } else {
2077 ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
2078 ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
2079 ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
2080 }
2081
2064 /* clear stats */ 2082 /* clear stats */
2065 memset(&adapter->stats, 0, sizeof(adapter->stats)); 2083 memset(&adapter->stats, 0, sizeof(adapter->stats));
2066err_out: 2084err_out:
@@ -2069,12 +2087,20 @@ err_out:
2069 2087
2070static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter) 2088static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
2071{ 2089{
2090 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
2091
2072 kfree(adapter->recv_ctx); 2092 kfree(adapter->recv_ctx);
2073 adapter->recv_ctx = NULL; 2093 adapter->recv_ctx = NULL;
2074 2094
2075 if (adapter->ahw->fw_dump.tmpl_hdr) { 2095 if (fw_dump->tmpl_hdr) {
2076 vfree(adapter->ahw->fw_dump.tmpl_hdr); 2096 vfree(fw_dump->tmpl_hdr);
2077 adapter->ahw->fw_dump.tmpl_hdr = NULL; 2097 fw_dump->tmpl_hdr = NULL;
2098 }
2099
2100 if (fw_dump->dma_buffer) {
2101 dma_free_coherent(&adapter->pdev->dev, QLC_PEX_DMA_READ_SIZE,
2102 fw_dump->dma_buffer, fw_dump->phys_addr);
2103 fw_dump->dma_buffer = NULL;
2078 } 2104 }
2079 2105
2080 kfree(adapter->ahw->reset.buff); 2106 kfree(adapter->ahw->reset.buff);
@@ -2247,10 +2273,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2247 2273
2248 qlcnic_change_mtu(netdev, netdev->mtu); 2274 qlcnic_change_mtu(netdev, netdev->mtu);
2249 2275
2250 if (qlcnic_sriov_vf_check(adapter)) 2276 netdev->ethtool_ops = (qlcnic_sriov_vf_check(adapter)) ?
2251 SET_ETHTOOL_OPS(netdev, &qlcnic_sriov_vf_ethtool_ops); 2277 &qlcnic_sriov_vf_ethtool_ops : &qlcnic_ethtool_ops;
2252 else
2253 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
2254 2278
2255 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2279 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2256 NETIF_F_IPV6_CSUM | NETIF_F_GRO | 2280 NETIF_F_IPV6_CSUM | NETIF_F_GRO |
@@ -2417,9 +2441,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2417 int err, pci_using_dac = -1; 2441 int err, pci_using_dac = -1;
2418 char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ 2442 char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
2419 2443
2420 if (pdev->is_virtfn)
2421 return -ENODEV;
2422
2423 err = pci_enable_device(pdev); 2444 err = pci_enable_device(pdev);
2424 if (err) 2445 if (err)
2425 return err; 2446 return err;
@@ -2552,9 +2573,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2552 case -ENOMEM: 2573 case -ENOMEM:
2553 dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n"); 2574 dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
2554 goto err_out_free_hw; 2575 goto err_out_free_hw;
2576 case -EOPNOTSUPP:
2577 dev_err(&pdev->dev, "Adapter initialization failed\n");
2578 goto err_out_free_hw;
2555 default: 2579 default:
2556 dev_err(&pdev->dev, "Adapter initialization failed. A reboot may be required to recover from this failure\n"); 2580 dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n");
2557 dev_err(&pdev->dev, "If reboot does not help to recover from this failure, try a flash update of the adapter\n");
2558 goto err_out_maintenance_mode; 2581 goto err_out_maintenance_mode;
2559 } 2582 }
2560 } 2583 }
@@ -2628,7 +2651,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2628 qlcnic_alloc_lb_filters_mem(adapter); 2651 qlcnic_alloc_lb_filters_mem(adapter);
2629 2652
2630 qlcnic_add_sysfs(adapter); 2653 qlcnic_add_sysfs(adapter);
2631 2654 qlcnic_register_hwmon_dev(adapter);
2632 return 0; 2655 return 0;
2633 2656
2634err_out_disable_mbx_intr: 2657err_out_disable_mbx_intr:
@@ -2665,7 +2688,7 @@ err_out_disable_pdev:
2665err_out_maintenance_mode: 2688err_out_maintenance_mode:
2666 set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state); 2689 set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
2667 netdev->netdev_ops = &qlcnic_netdev_failed_ops; 2690 netdev->netdev_ops = &qlcnic_netdev_failed_ops;
2668 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops); 2691 netdev->ethtool_ops = &qlcnic_ethtool_failed_ops;
2669 ahw->port_type = QLCNIC_XGBE; 2692 ahw->port_type = QLCNIC_XGBE;
2670 2693
2671 if (qlcnic_83xx_check(adapter)) 2694 if (qlcnic_83xx_check(adapter))
@@ -2698,9 +2721,9 @@ static void qlcnic_remove(struct pci_dev *pdev)
2698 return; 2721 return;
2699 2722
2700 netdev = adapter->netdev; 2723 netdev = adapter->netdev;
2701 qlcnic_sriov_pf_disable(adapter);
2702 2724
2703 qlcnic_cancel_idc_work(adapter); 2725 qlcnic_cancel_idc_work(adapter);
2726 qlcnic_sriov_pf_disable(adapter);
2704 ahw = adapter->ahw; 2727 ahw = adapter->ahw;
2705 2728
2706 unregister_netdev(netdev); 2729 unregister_netdev(netdev);
@@ -2735,6 +2758,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
2735 2758
2736 qlcnic_remove_sysfs(adapter); 2759 qlcnic_remove_sysfs(adapter);
2737 2760
2761 qlcnic_unregister_hwmon_dev(adapter);
2762
2738 qlcnic_cleanup_pci_map(adapter->ahw); 2763 qlcnic_cleanup_pci_map(adapter->ahw);
2739 2764
2740 qlcnic_release_firmware(adapter); 2765 qlcnic_release_firmware(adapter);
@@ -2828,6 +2853,8 @@ static int qlcnic_close(struct net_device *netdev)
2828 return 0; 2853 return 0;
2829} 2854}
2830 2855
2856#define QLCNIC_VF_LB_BUCKET_SIZE 1
2857
2831void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter) 2858void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
2832{ 2859{
2833 void *head; 2860 void *head;
@@ -2843,7 +2870,10 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
2843 spin_lock_init(&adapter->mac_learn_lock); 2870 spin_lock_init(&adapter->mac_learn_lock);
2844 spin_lock_init(&adapter->rx_mac_learn_lock); 2871 spin_lock_init(&adapter->rx_mac_learn_lock);
2845 2872
2846 if (qlcnic_82xx_check(adapter)) { 2873 if (qlcnic_sriov_vf_check(adapter)) {
2874 filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1;
2875 adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE;
2876 } else if (qlcnic_82xx_check(adapter)) {
2847 filter_size = QLCNIC_LB_MAX_FILTERS; 2877 filter_size = QLCNIC_LB_MAX_FILTERS;
2848 adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE; 2878 adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
2849 } else { 2879 } else {
@@ -3973,16 +4003,6 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
3973 strcpy(buf, "Tx"); 4003 strcpy(buf, "Tx");
3974 } 4004 }
3975 4005
3976 if (!QLCNIC_IS_MSI_FAMILY(adapter)) {
3977 netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
3978 return -EINVAL;
3979 }
3980
3981 if (adapter->flags & QLCNIC_MSI_ENABLED) {
3982 netdev_err(netdev, "No RSS/TSS support in MSI mode\n");
3983 return -EINVAL;
3984 }
3985
3986 if (!is_power_of_2(ring_cnt)) { 4006 if (!is_power_of_2(ring_cnt)) {
3987 netdev_err(netdev, "%s rings value should be a power of 2\n", 4007 netdev_err(netdev, "%s rings value should be a power of 2\n",
3988 buf); 4008 buf);
@@ -4122,7 +4142,7 @@ void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4122 4142
4123 rcu_read_lock(); 4143 rcu_read_lock();
4124 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) { 4144 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
4125 dev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), vid); 4145 dev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), vid);
4126 if (!dev) 4146 if (!dev)
4127 continue; 4147 continue;
4128 qlcnic_config_indev_addr(adapter, dev, event); 4148 qlcnic_config_indev_addr(adapter, dev, event);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 37b979b1266b..e46fc39d425d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -238,6 +238,8 @@ void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
238 238
239 hdr->drv_cap_mask = hdr->cap_mask; 239 hdr->drv_cap_mask = hdr->cap_mask;
240 fw_dump->cap_mask = hdr->cap_mask; 240 fw_dump->cap_mask = hdr->cap_mask;
241
242 fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
241} 243}
242 244
243inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index) 245inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
@@ -276,6 +278,8 @@ inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
276 hdr->saved_state[index] = value; 278 hdr->saved_state[index] = value;
277} 279}
278 280
281#define QLCNIC_TEMPLATE_VERSION (0x20001)
282
279void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump) 283void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
280{ 284{
281 struct qlcnic_83xx_dump_template_hdr *hdr; 285 struct qlcnic_83xx_dump_template_hdr *hdr;
@@ -288,6 +292,9 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
288 292
289 hdr->drv_cap_mask = hdr->cap_mask; 293 hdr->drv_cap_mask = hdr->cap_mask;
290 fw_dump->cap_mask = hdr->cap_mask; 294 fw_dump->cap_mask = hdr->cap_mask;
295
296 fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
297 QLCNIC_TEMPLATE_VERSION;
291} 298}
292 299
293inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index) 300inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
@@ -653,34 +660,31 @@ out:
653#define QLC_DMA_CMD_BUFF_ADDR_HI 4 660#define QLC_DMA_CMD_BUFF_ADDR_HI 4
654#define QLC_DMA_CMD_STATUS_CTRL 8 661#define QLC_DMA_CMD_STATUS_CTRL 8
655 662
656#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
657
658static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter, 663static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
659 struct __mem *mem) 664 struct __mem *mem)
660{ 665{
661 struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
662 struct device *dev = &adapter->pdev->dev; 666 struct device *dev = &adapter->pdev->dev;
663 u32 dma_no, dma_base_addr, temp_addr; 667 u32 dma_no, dma_base_addr, temp_addr;
664 int i, ret, dma_sts; 668 int i, ret, dma_sts;
669 void *tmpl_hdr;
665 670
666 tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr; 671 tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
667 dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX]; 672 dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
673 QLC_83XX_DMA_ENGINE_INDEX);
668 dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no); 674 dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
669 675
670 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW; 676 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
671 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 677 ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
672 mem->desc_card_addr);
673 if (ret) 678 if (ret)
674 return ret; 679 return ret;
675 680
676 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI; 681 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
677 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0); 682 ret = qlcnic_ind_wr(adapter, temp_addr, 0);
678 if (ret) 683 if (ret)
679 return ret; 684 return ret;
680 685
681 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL; 686 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
682 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 687 ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
683 mem->start_dma_cmd);
684 if (ret) 688 if (ret)
685 return ret; 689 return ret;
686 690
@@ -710,15 +714,16 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
710 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 714 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
711 u32 temp, dma_base_addr, size = 0, read_size = 0; 715 u32 temp, dma_base_addr, size = 0, read_size = 0;
712 struct qlcnic_pex_dma_descriptor *dma_descr; 716 struct qlcnic_pex_dma_descriptor *dma_descr;
713 struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
714 struct device *dev = &adapter->pdev->dev; 717 struct device *dev = &adapter->pdev->dev;
715 dma_addr_t dma_phys_addr; 718 dma_addr_t dma_phys_addr;
716 void *dma_buffer; 719 void *dma_buffer;
720 void *tmpl_hdr;
717 721
718 tmpl_hdr = fw_dump->tmpl_hdr; 722 tmpl_hdr = fw_dump->tmpl_hdr;
719 723
720 /* Check if DMA engine is available */ 724 /* Check if DMA engine is available */
721 temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX]; 725 temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
726 QLC_83XX_DMA_ENGINE_INDEX);
722 dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp); 727 dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
723 temp = qlcnic_ind_rd(adapter, 728 temp = qlcnic_ind_rd(adapter,
724 dma_base_addr + QLC_DMA_CMD_STATUS_CTRL); 729 dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
@@ -764,8 +769,8 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
764 769
765 /* Write DMA descriptor to MS memory*/ 770 /* Write DMA descriptor to MS memory*/
766 temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16; 771 temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
767 *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr, 772 *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
768 (u32 *)dma_descr, temp); 773 (u32 *)dma_descr, temp);
769 if (*ret) { 774 if (*ret) {
770 dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n", 775 dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
771 mem->desc_card_addr); 776 mem->desc_card_addr);
@@ -1141,8 +1146,6 @@ free_mem:
1141 return err; 1146 return err;
1142} 1147}
1143 1148
1144#define QLCNIC_TEMPLATE_VERSION (0x20001)
1145
1146int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) 1149int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1147{ 1150{
1148 struct qlcnic_hardware_context *ahw; 1151 struct qlcnic_hardware_context *ahw;
@@ -1150,6 +1153,7 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1150 u32 version, csum, *tmp_buf; 1153 u32 version, csum, *tmp_buf;
1151 u8 use_flash_temp = 0; 1154 u8 use_flash_temp = 0;
1152 u32 temp_size = 0; 1155 u32 temp_size = 0;
1156 void *temp_buffer;
1153 int err; 1157 int err;
1154 1158
1155 ahw = adapter->ahw; 1159 ahw = adapter->ahw;
@@ -1199,16 +1203,23 @@ flash_temp:
1199 1203
1200 qlcnic_cache_tmpl_hdr_values(adapter, fw_dump); 1204 qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
1201 1205
1206 if (fw_dump->use_pex_dma) {
1207 fw_dump->dma_buffer = NULL;
1208 temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
1209 QLC_PEX_DMA_READ_SIZE,
1210 &fw_dump->phys_addr,
1211 GFP_KERNEL);
1212 if (!temp_buffer)
1213 fw_dump->use_pex_dma = false;
1214 else
1215 fw_dump->dma_buffer = temp_buffer;
1216 }
1217
1218
1202 dev_info(&adapter->pdev->dev, 1219 dev_info(&adapter->pdev->dev,
1203 "Default minidump capture mask 0x%x\n", 1220 "Default minidump capture mask 0x%x\n",
1204 fw_dump->cap_mask); 1221 fw_dump->cap_mask);
1205 1222
1206 if (qlcnic_83xx_check(adapter) &&
1207 (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
1208 fw_dump->use_pex_dma = true;
1209 else
1210 fw_dump->use_pex_dma = false;
1211
1212 qlcnic_enable_fw_dump_state(adapter); 1223 qlcnic_enable_fw_dump_state(adapter);
1213 1224
1214 return 0; 1225 return 0;
@@ -1224,7 +1235,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1224 struct device *dev = &adapter->pdev->dev; 1235 struct device *dev = &adapter->pdev->dev;
1225 struct qlcnic_hardware_context *ahw; 1236 struct qlcnic_hardware_context *ahw;
1226 struct qlcnic_dump_entry *entry; 1237 struct qlcnic_dump_entry *entry;
1227 void *temp_buffer, *tmpl_hdr; 1238 void *tmpl_hdr;
1228 u32 ocm_window; 1239 u32 ocm_window;
1229 __le32 *buffer; 1240 __le32 *buffer;
1230 char mesg[64]; 1241 char mesg[64];
@@ -1268,16 +1279,6 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1268 qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION); 1279 qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
1269 qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version); 1280 qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
1270 1281
1271 if (fw_dump->use_pex_dma) {
1272 temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
1273 &fw_dump->phys_addr,
1274 GFP_KERNEL);
1275 if (!temp_buffer)
1276 fw_dump->use_pex_dma = false;
1277 else
1278 fw_dump->dma_buffer = temp_buffer;
1279 }
1280
1281 if (qlcnic_82xx_check(adapter)) { 1282 if (qlcnic_82xx_check(adapter)) {
1282 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops); 1283 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
1283 fw_dump_ops = qlcnic_fw_dump_ops; 1284 fw_dump_ops = qlcnic_fw_dump_ops;
@@ -1335,10 +1336,6 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1335 /* Send a udev event to notify availability of FW dump */ 1336 /* Send a udev event to notify availability of FW dump */
1336 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg); 1337 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
1337 1338
1338 if (fw_dump->use_pex_dma)
1339 dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
1340 fw_dump->dma_buffer, fw_dump->phys_addr);
1341
1342 return 0; 1339 return 0;
1343} 1340}
1344 1341
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 396bd1fd1d27..4677b2edccca 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -52,6 +52,7 @@ enum qlcnic_bc_commands {
52 QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3, 52 QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
53}; 53};
54 54
55#define QLCNIC_83XX_SRIOV_VF_MAX_MAC 2
55#define QLC_BC_CMD 1 56#define QLC_BC_CMD 1
56 57
57struct qlcnic_trans_list { 58struct qlcnic_trans_list {
@@ -151,13 +152,14 @@ struct qlcnic_vf_info {
151 struct qlcnic_trans_list rcv_pend; 152 struct qlcnic_trans_list rcv_pend;
152 struct qlcnic_adapter *adapter; 153 struct qlcnic_adapter *adapter;
153 struct qlcnic_vport *vp; 154 struct qlcnic_vport *vp;
154 struct mutex vlan_list_lock; /* Lock for VLAN list */ 155 spinlock_t vlan_list_lock; /* Lock for VLAN list */
155}; 156};
156 157
157struct qlcnic_async_work_list { 158struct qlcnic_async_work_list {
158 struct list_head list; 159 struct list_head list;
159 struct work_struct work; 160 struct work_struct work;
160 void *ptr; 161 void *ptr;
162 struct qlcnic_cmd_args *cmd;
161}; 163};
162 164
163struct qlcnic_back_channel { 165struct qlcnic_back_channel {
@@ -231,7 +233,7 @@ bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *,
231void qlcnic_sriov_pf_reset(struct qlcnic_adapter *); 233void qlcnic_sriov_pf_reset(struct qlcnic_adapter *);
232int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *); 234int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *);
233int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *); 235int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
234int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int); 236int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
235int qlcnic_sriov_get_vf_config(struct net_device *, int , 237int qlcnic_sriov_get_vf_config(struct net_device *, int ,
236 struct ifla_vf_info *); 238 struct ifla_vf_info *);
237int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8); 239int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 6afe9c1f5ab9..1659c804f1d5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -39,6 +39,8 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
39static void qlcnic_sriov_process_bc_cmd(struct work_struct *); 39static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
40static int qlcnic_sriov_vf_shutdown(struct pci_dev *); 40static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
41static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *); 41static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
42static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
43 struct qlcnic_cmd_args *);
42 44
43static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { 45static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
44 .read_crb = qlcnic_83xx_read_crb, 46 .read_crb = qlcnic_83xx_read_crb,
@@ -181,7 +183,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
181 vf->adapter = adapter; 183 vf->adapter = adapter;
182 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i); 184 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
183 mutex_init(&vf->send_cmd_lock); 185 mutex_init(&vf->send_cmd_lock);
184 mutex_init(&vf->vlan_list_lock); 186 spin_lock_init(&vf->vlan_list_lock);
185 INIT_LIST_HEAD(&vf->rcv_act.wait_list); 187 INIT_LIST_HEAD(&vf->rcv_act.wait_list);
186 INIT_LIST_HEAD(&vf->rcv_pend.wait_list); 188 INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
187 spin_lock_init(&vf->rcv_act.lock); 189 spin_lock_init(&vf->rcv_act.lock);
@@ -197,8 +199,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
197 goto qlcnic_destroy_async_wq; 199 goto qlcnic_destroy_async_wq;
198 } 200 }
199 sriov->vf_info[i].vp = vp; 201 sriov->vf_info[i].vp = vp;
202 vp->vlan_mode = QLC_GUEST_VLAN_MODE;
200 vp->max_tx_bw = MAX_BW; 203 vp->max_tx_bw = MAX_BW;
201 vp->spoofchk = true; 204 vp->min_tx_bw = MIN_BW;
205 vp->spoofchk = false;
202 random_ether_addr(vp->mac); 206 random_ether_addr(vp->mac);
203 dev_info(&adapter->pdev->dev, 207 dev_info(&adapter->pdev->dev,
204 "MAC Address %pM is configured for VF %d\n", 208 "MAC Address %pM is configured for VF %d\n",
@@ -454,6 +458,7 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
454 struct qlcnic_cmd_args cmd; 458 struct qlcnic_cmd_args cmd;
455 int ret = 0; 459 int ret = 0;
456 460
461 memset(&cmd, 0, sizeof(cmd));
457 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); 462 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
458 if (ret) 463 if (ret)
459 return ret; 464 return ret;
@@ -515,6 +520,8 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
515{ 520{
516 int err; 521 int err;
517 522
523 adapter->flags |= QLCNIC_VLAN_FILTERING;
524 adapter->ahw->total_nic_func = 1;
518 INIT_LIST_HEAD(&adapter->vf_mc_list); 525 INIT_LIST_HEAD(&adapter->vf_mc_list);
519 if (!qlcnic_use_msi_x && !!qlcnic_use_msi) 526 if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
520 dev_warn(&adapter->pdev->dev, 527 dev_warn(&adapter->pdev->dev,
@@ -770,6 +777,7 @@ static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
770 cmd->req.arg = (u32 *)trans->req_pay; 777 cmd->req.arg = (u32 *)trans->req_pay;
771 cmd->rsp.arg = (u32 *)trans->rsp_pay; 778 cmd->rsp.arg = (u32 *)trans->rsp_pay;
772 cmd_op = cmd->req.arg[0] & 0xff; 779 cmd_op = cmd->req.arg[0] & 0xff;
780 cmd->cmd_op = cmd_op;
773 remainder = (trans->rsp_pay_size) % (bc_pay_sz); 781 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
774 num_frags = (trans->rsp_pay_size) / (bc_pay_sz); 782 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
775 if (remainder) 783 if (remainder)
@@ -1356,7 +1364,7 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1356 return -EIO; 1364 return -EIO;
1357} 1365}
1358 1366
1359static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, 1367static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1360 struct qlcnic_cmd_args *cmd) 1368 struct qlcnic_cmd_args *cmd)
1361{ 1369{
1362 struct qlcnic_hardware_context *ahw = adapter->ahw; 1370 struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1408,12 +1416,17 @@ retry:
1408 (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) { 1416 (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
1409 rsp = QLCNIC_RCODE_SUCCESS; 1417 rsp = QLCNIC_RCODE_SUCCESS;
1410 } else { 1418 } else {
1411 rsp = mbx_err_code; 1419 if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1412 if (!rsp) 1420 rsp = QLCNIC_RCODE_SUCCESS;
1413 rsp = 1; 1421 } else {
1414 dev_err(dev, 1422 rsp = mbx_err_code;
1415 "MBX command 0x%x failed with err:0x%x for VF %d\n", 1423 if (!rsp)
1416 opcode, mbx_err_code, func); 1424 rsp = 1;
1425
1426 dev_err(dev,
1427 "MBX command 0x%x failed with err:0x%x for VF %d\n",
1428 opcode, mbx_err_code, func);
1429 }
1417 } 1430 }
1418 1431
1419err_out: 1432err_out:
@@ -1435,12 +1448,23 @@ free_cmd:
1435 return rsp; 1448 return rsp;
1436} 1449}
1437 1450
1451
1452static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1453 struct qlcnic_cmd_args *cmd)
1454{
1455 if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
1456 return qlcnic_sriov_async_issue_cmd(adapter, cmd);
1457 else
1458 return __qlcnic_sriov_issue_cmd(adapter, cmd);
1459}
1460
1438static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op) 1461static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
1439{ 1462{
1440 struct qlcnic_cmd_args cmd; 1463 struct qlcnic_cmd_args cmd;
1441 struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0]; 1464 struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
1442 int ret; 1465 int ret;
1443 1466
1467 memset(&cmd, 0, sizeof(cmd));
1444 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op)) 1468 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
1445 return -ENOMEM; 1469 return -ENOMEM;
1446 1470
@@ -1465,58 +1489,28 @@ out:
1465 return ret; 1489 return ret;
1466} 1490}
1467 1491
1468static void qlcnic_vf_add_mc_list(struct net_device *netdev) 1492static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac)
1469{ 1493{
1470 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1494 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1471 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 1495 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1472 struct qlcnic_mac_vlan_list *cur;
1473 struct list_head *head, tmp_list;
1474 struct qlcnic_vf_info *vf; 1496 struct qlcnic_vf_info *vf;
1475 u16 vlan_id; 1497 u16 vlan_id;
1476 int i; 1498 int i;
1477 1499
1478 static const u8 bcast_addr[ETH_ALEN] = {
1479 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1480 };
1481
1482 vf = &adapter->ahw->sriov->vf_info[0]; 1500 vf = &adapter->ahw->sriov->vf_info[0];
1483 INIT_LIST_HEAD(&tmp_list);
1484 head = &adapter->vf_mc_list;
1485 netif_addr_lock_bh(netdev);
1486 1501
1487 while (!list_empty(head)) { 1502 if (!qlcnic_sriov_check_any_vlan(vf)) {
1488 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list); 1503 qlcnic_nic_add_mac(adapter, mac, 0);
1489 list_move(&cur->list, &tmp_list); 1504 } else {
1490 } 1505 spin_lock(&vf->vlan_list_lock);
1491 1506 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1492 netif_addr_unlock_bh(netdev); 1507 vlan_id = vf->sriov_vlans[i];
1493 1508 if (vlan_id)
1494 while (!list_empty(&tmp_list)) { 1509 qlcnic_nic_add_mac(adapter, mac, vlan_id);
1495 cur = list_entry((&tmp_list)->next,
1496 struct qlcnic_mac_vlan_list, list);
1497 if (!qlcnic_sriov_check_any_vlan(vf)) {
1498 qlcnic_nic_add_mac(adapter, bcast_addr, 0);
1499 qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
1500 } else {
1501 mutex_lock(&vf->vlan_list_lock);
1502 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1503 vlan_id = vf->sriov_vlans[i];
1504 if (vlan_id) {
1505 qlcnic_nic_add_mac(adapter, bcast_addr,
1506 vlan_id);
1507 qlcnic_nic_add_mac(adapter,
1508 cur->mac_addr,
1509 vlan_id);
1510 }
1511 }
1512 mutex_unlock(&vf->vlan_list_lock);
1513 if (qlcnic_84xx_check(adapter)) {
1514 qlcnic_nic_add_mac(adapter, bcast_addr, 0);
1515 qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
1516 }
1517 } 1510 }
1518 list_del(&cur->list); 1511 spin_unlock(&vf->vlan_list_lock);
1519 kfree(cur); 1512 if (qlcnic_84xx_check(adapter))
1513 qlcnic_nic_add_mac(adapter, mac, 0);
1520 } 1514 }
1521} 1515}
1522 1516
@@ -1525,6 +1519,7 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1525 struct list_head *head = &bc->async_list; 1519 struct list_head *head = &bc->async_list;
1526 struct qlcnic_async_work_list *entry; 1520 struct qlcnic_async_work_list *entry;
1527 1521
1522 flush_workqueue(bc->bc_async_wq);
1528 while (!list_empty(head)) { 1523 while (!list_empty(head)) {
1529 entry = list_entry(head->next, struct qlcnic_async_work_list, 1524 entry = list_entry(head->next, struct qlcnic_async_work_list,
1530 list); 1525 list);
@@ -1534,10 +1529,14 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1534 } 1529 }
1535} 1530}
1536 1531
1537static void qlcnic_sriov_vf_set_multi(struct net_device *netdev) 1532void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1538{ 1533{
1539 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1534 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1540 struct qlcnic_hardware_context *ahw = adapter->ahw; 1535 struct qlcnic_hardware_context *ahw = adapter->ahw;
1536 static const u8 bcast_addr[ETH_ALEN] = {
1537 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1538 };
1539 struct netdev_hw_addr *ha;
1541 u32 mode = VPORT_MISS_MODE_DROP; 1540 u32 mode = VPORT_MISS_MODE_DROP;
1542 1541
1543 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 1542 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
@@ -1549,23 +1548,49 @@ static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1549 } else if ((netdev->flags & IFF_ALLMULTI) || 1548 } else if ((netdev->flags & IFF_ALLMULTI) ||
1550 (netdev_mc_count(netdev) > ahw->max_mc_count)) { 1549 (netdev_mc_count(netdev) > ahw->max_mc_count)) {
1551 mode = VPORT_MISS_MODE_ACCEPT_MULTI; 1550 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1551 } else {
1552 qlcnic_vf_add_mc_list(netdev, bcast_addr);
1553 if (!netdev_mc_empty(netdev)) {
1554 netdev_for_each_mc_addr(ha, netdev)
1555 qlcnic_vf_add_mc_list(netdev, ha->addr);
1556 }
1552 } 1557 }
1553 1558
1554 if (qlcnic_sriov_vf_check(adapter)) 1559 /* configure unicast MAC address, if there is not sufficient space
1555 qlcnic_vf_add_mc_list(netdev); 1560 * to store all the unicast addresses then enable promiscuous mode
1561 */
1562 if (netdev_uc_count(netdev) > ahw->max_uc_count) {
1563 mode = VPORT_MISS_MODE_ACCEPT_ALL;
1564 } else if (!netdev_uc_empty(netdev)) {
1565 netdev_for_each_uc_addr(ha, netdev)
1566 qlcnic_vf_add_mc_list(netdev, ha->addr);
1567 }
1568
1569 if (adapter->pdev->is_virtfn) {
1570 if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
1571 !adapter->fdb_mac_learn) {
1572 qlcnic_alloc_lb_filters_mem(adapter);
1573 adapter->drv_mac_learn = 1;
1574 adapter->rx_mac_learn = true;
1575 } else {
1576 adapter->drv_mac_learn = 0;
1577 adapter->rx_mac_learn = false;
1578 }
1579 }
1556 1580
1557 qlcnic_nic_set_promisc(adapter, mode); 1581 qlcnic_nic_set_promisc(adapter, mode);
1558} 1582}
1559 1583
1560static void qlcnic_sriov_handle_async_multi(struct work_struct *work) 1584static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
1561{ 1585{
1562 struct qlcnic_async_work_list *entry; 1586 struct qlcnic_async_work_list *entry;
1563 struct net_device *netdev; 1587 struct qlcnic_adapter *adapter;
1588 struct qlcnic_cmd_args *cmd;
1564 1589
1565 entry = container_of(work, struct qlcnic_async_work_list, work); 1590 entry = container_of(work, struct qlcnic_async_work_list, work);
1566 netdev = (struct net_device *)entry->ptr; 1591 adapter = entry->ptr;
1567 1592 cmd = entry->cmd;
1568 qlcnic_sriov_vf_set_multi(netdev); 1593 __qlcnic_sriov_issue_cmd(adapter, cmd);
1569 return; 1594 return;
1570} 1595}
1571 1596
@@ -1595,8 +1620,9 @@ qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
1595 return entry; 1620 return entry;
1596} 1621}
1597 1622
1598static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc, 1623static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
1599 work_func_t func, void *data) 1624 work_func_t func, void *data,
1625 struct qlcnic_cmd_args *cmd)
1600{ 1626{
1601 struct qlcnic_async_work_list *entry = NULL; 1627 struct qlcnic_async_work_list *entry = NULL;
1602 1628
@@ -1605,21 +1631,23 @@ static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
1605 return; 1631 return;
1606 1632
1607 entry->ptr = data; 1633 entry->ptr = data;
1634 entry->cmd = cmd;
1608 INIT_WORK(&entry->work, func); 1635 INIT_WORK(&entry->work, func);
1609 queue_work(bc->bc_async_wq, &entry->work); 1636 queue_work(bc->bc_async_wq, &entry->work);
1610} 1637}
1611 1638
1612void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev) 1639static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
1640 struct qlcnic_cmd_args *cmd)
1613{ 1641{
1614 1642
1615 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1616 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; 1643 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1617 1644
1618 if (adapter->need_fw_reset) 1645 if (adapter->need_fw_reset)
1619 return; 1646 return -EIO;
1620 1647
1621 qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi, 1648 qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
1622 netdev); 1649 adapter, cmd);
1650 return 0;
1623} 1651}
1624 1652
1625static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) 1653static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
@@ -1843,6 +1871,12 @@ static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
1843 return 0; 1871 return 0;
1844} 1872}
1845 1873
1874static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
1875{
1876 if (adapter->fhash.fnum)
1877 qlcnic_prune_lb_filters(adapter);
1878}
1879
1846static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work) 1880static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1847{ 1881{
1848 struct qlcnic_adapter *adapter; 1882 struct qlcnic_adapter *adapter;
@@ -1874,6 +1908,8 @@ static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1874 } 1908 }
1875 1909
1876 idc->prev_state = idc->curr_state; 1910 idc->prev_state = idc->curr_state;
1911 qlcnic_sriov_vf_periodic_tasks(adapter);
1912
1877 if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status)) 1913 if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
1878 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, 1914 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
1879 idc->delay); 1915 idc->delay);
@@ -1897,7 +1933,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
1897 if (!vf->sriov_vlans) 1933 if (!vf->sriov_vlans)
1898 return err; 1934 return err;
1899 1935
1900 mutex_lock(&vf->vlan_list_lock); 1936 spin_lock_bh(&vf->vlan_list_lock);
1901 1937
1902 for (i = 0; i < sriov->num_allowed_vlans; i++) { 1938 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1903 if (vf->sriov_vlans[i] == vlan_id) { 1939 if (vf->sriov_vlans[i] == vlan_id) {
@@ -1906,7 +1942,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
1906 } 1942 }
1907 } 1943 }
1908 1944
1909 mutex_unlock(&vf->vlan_list_lock); 1945 spin_unlock_bh(&vf->vlan_list_lock);
1910 return err; 1946 return err;
1911} 1947}
1912 1948
@@ -1915,12 +1951,12 @@ static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
1915{ 1951{
1916 int err = 0; 1952 int err = 0;
1917 1953
1918 mutex_lock(&vf->vlan_list_lock); 1954 spin_lock_bh(&vf->vlan_list_lock);
1919 1955
1920 if (vf->num_vlan >= sriov->num_allowed_vlans) 1956 if (vf->num_vlan >= sriov->num_allowed_vlans)
1921 err = -EINVAL; 1957 err = -EINVAL;
1922 1958
1923 mutex_unlock(&vf->vlan_list_lock); 1959 spin_unlock_bh(&vf->vlan_list_lock);
1924 return err; 1960 return err;
1925} 1961}
1926 1962
@@ -1973,7 +2009,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
1973 if (!vf->sriov_vlans) 2009 if (!vf->sriov_vlans)
1974 return; 2010 return;
1975 2011
1976 mutex_lock(&vf->vlan_list_lock); 2012 spin_lock_bh(&vf->vlan_list_lock);
1977 2013
1978 switch (opcode) { 2014 switch (opcode) {
1979 case QLC_VLAN_ADD: 2015 case QLC_VLAN_ADD:
@@ -1986,7 +2022,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
1986 netdev_err(adapter->netdev, "Invalid VLAN operation\n"); 2022 netdev_err(adapter->netdev, "Invalid VLAN operation\n");
1987 } 2023 }
1988 2024
1989 mutex_unlock(&vf->vlan_list_lock); 2025 spin_unlock_bh(&vf->vlan_list_lock);
1990 return; 2026 return;
1991} 2027}
1992 2028
@@ -1994,10 +2030,12 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
1994 u16 vid, u8 enable) 2030 u16 vid, u8 enable)
1995{ 2031{
1996 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 2032 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2033 struct net_device *netdev = adapter->netdev;
1997 struct qlcnic_vf_info *vf; 2034 struct qlcnic_vf_info *vf;
1998 struct qlcnic_cmd_args cmd; 2035 struct qlcnic_cmd_args cmd;
1999 int ret; 2036 int ret;
2000 2037
2038 memset(&cmd, 0, sizeof(cmd));
2001 if (vid == 0) 2039 if (vid == 0)
2002 return 0; 2040 return 0;
2003 2041
@@ -2019,14 +2057,18 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
2019 dev_err(&adapter->pdev->dev, 2057 dev_err(&adapter->pdev->dev,
2020 "Failed to configure guest VLAN, err=%d\n", ret); 2058 "Failed to configure guest VLAN, err=%d\n", ret);
2021 } else { 2059 } else {
2060 netif_addr_lock_bh(netdev);
2022 qlcnic_free_mac_list(adapter); 2061 qlcnic_free_mac_list(adapter);
2062 netif_addr_unlock_bh(netdev);
2023 2063
2024 if (enable) 2064 if (enable)
2025 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD); 2065 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
2026 else 2066 else
2027 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE); 2067 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
2028 2068
2029 qlcnic_set_multi(adapter->netdev); 2069 netif_addr_lock_bh(netdev);
2070 qlcnic_set_multi(netdev);
2071 netif_addr_unlock_bh(netdev);
2030 } 2072 }
2031 2073
2032 qlcnic_free_mbx_args(&cmd); 2074 qlcnic_free_mbx_args(&cmd);
@@ -2157,11 +2199,11 @@ bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
2157{ 2199{
2158 bool err = false; 2200 bool err = false;
2159 2201
2160 mutex_lock(&vf->vlan_list_lock); 2202 spin_lock_bh(&vf->vlan_list_lock);
2161 2203
2162 if (vf->num_vlan) 2204 if (vf->num_vlan)
2163 err = true; 2205 err = true;
2164 2206
2165 mutex_unlock(&vf->vlan_list_lock); 2207 spin_unlock_bh(&vf->vlan_list_lock);
2166 return err; 2208 return err;
2167} 2209}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 280137991544..a29538b86edf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -16,6 +16,7 @@
16#define QLC_VF_FLOOD_BIT BIT_16 16#define QLC_VF_FLOOD_BIT BIT_16
17#define QLC_FLOOD_MODE 0x5 17#define QLC_FLOOD_MODE 0x5
18#define QLC_SRIOV_ALLOW_VLAN0 BIT_19 18#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
19#define QLC_INTR_COAL_TYPE_MASK 0x7
19 20
20static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8); 21static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
21 22
@@ -83,7 +84,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
83 info->max_tx_ques = res->num_tx_queues / max; 84 info->max_tx_ques = res->num_tx_queues / max;
84 85
85 if (qlcnic_83xx_pf_check(adapter)) 86 if (qlcnic_83xx_pf_check(adapter))
86 num_macs = 1; 87 num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC;
87 88
88 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; 89 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
89 90
@@ -337,9 +338,12 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
337 338
338 cmd.req.arg[1] = 0x4; 339 cmd.req.arg[1] = 0x4;
339 if (enable) { 340 if (enable) {
341 adapter->flags |= QLCNIC_VLAN_FILTERING;
340 cmd.req.arg[1] |= BIT_16; 342 cmd.req.arg[1] |= BIT_16;
341 if (qlcnic_84xx_check(adapter)) 343 if (qlcnic_84xx_check(adapter))
342 cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0; 344 cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
345 } else {
346 adapter->flags &= ~QLCNIC_VLAN_FILTERING;
343 } 347 }
344 348
345 err = qlcnic_issue_cmd(adapter, &cmd); 349 err = qlcnic_issue_cmd(adapter, &cmd);
@@ -471,12 +475,12 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
471 return -EPERM; 475 return -EPERM;
472 } 476 }
473 477
478 qlcnic_sriov_pf_disable(adapter);
479
474 rtnl_lock(); 480 rtnl_lock();
475 if (netif_running(netdev)) 481 if (netif_running(netdev))
476 __qlcnic_down(adapter, netdev); 482 __qlcnic_down(adapter, netdev);
477 483
478 qlcnic_sriov_pf_disable(adapter);
479
480 qlcnic_sriov_free_vlans(adapter); 484 qlcnic_sriov_free_vlans(adapter);
481 485
482 qlcnic_sriov_pf_cleanup(adapter); 486 qlcnic_sriov_pf_cleanup(adapter);
@@ -595,7 +599,6 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
595 599
596 qlcnic_sriov_alloc_vlans(adapter); 600 qlcnic_sriov_alloc_vlans(adapter);
597 601
598 err = qlcnic_sriov_pf_enable(adapter, num_vfs);
599 return err; 602 return err;
600 603
601del_flr_queue: 604del_flr_queue:
@@ -626,25 +629,36 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
626 __qlcnic_down(adapter, netdev); 629 __qlcnic_down(adapter, netdev);
627 630
628 err = __qlcnic_pci_sriov_enable(adapter, num_vfs); 631 err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
629 if (err) { 632 if (err)
630 netdev_info(netdev, "Failed to enable SR-IOV on port %d\n", 633 goto error;
631 adapter->portnum);
632 634
633 err = -EIO; 635 if (netif_running(netdev))
634 if (qlcnic_83xx_configure_opmode(adapter)) 636 __qlcnic_up(adapter, netdev);
635 goto error; 637
636 } else { 638 rtnl_unlock();
639 err = qlcnic_sriov_pf_enable(adapter, num_vfs);
640 if (!err) {
637 netdev_info(netdev, 641 netdev_info(netdev,
638 "SR-IOV is enabled successfully on port %d\n", 642 "SR-IOV is enabled successfully on port %d\n",
639 adapter->portnum); 643 adapter->portnum);
640 /* Return number of vfs enabled */ 644 /* Return number of vfs enabled */
641 err = num_vfs; 645 return num_vfs;
642 } 646 }
647
648 rtnl_lock();
643 if (netif_running(netdev)) 649 if (netif_running(netdev))
644 __qlcnic_up(adapter, netdev); 650 __qlcnic_down(adapter, netdev);
645 651
646error: 652error:
653 if (!qlcnic_83xx_configure_opmode(adapter)) {
654 if (netif_running(netdev))
655 __qlcnic_up(adapter, netdev);
656 }
657
647 rtnl_unlock(); 658 rtnl_unlock();
659 netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
660 adapter->portnum);
661
648 return err; 662 return err;
649} 663}
650 664
@@ -773,7 +787,7 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
773 struct qlcnic_vf_info *vf, 787 struct qlcnic_vf_info *vf,
774 u16 vlan, u8 op) 788 u16 vlan, u8 op)
775{ 789{
776 struct qlcnic_cmd_args cmd; 790 struct qlcnic_cmd_args *cmd;
777 struct qlcnic_macvlan_mbx mv; 791 struct qlcnic_macvlan_mbx mv;
778 struct qlcnic_vport *vp; 792 struct qlcnic_vport *vp;
779 u8 *addr; 793 u8 *addr;
@@ -783,21 +797,27 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
783 797
784 vp = vf->vp; 798 vp = vf->vp;
785 799
786 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN)) 800 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
801 if (!cmd)
787 return -ENOMEM; 802 return -ENOMEM;
788 803
804 err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
805 if (err)
806 goto free_cmd;
807
808 cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
789 vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func); 809 vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
790 if (vpid < 0) { 810 if (vpid < 0) {
791 err = -EINVAL; 811 err = -EINVAL;
792 goto out; 812 goto free_args;
793 } 813 }
794 814
795 if (vlan) 815 if (vlan)
796 op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? 816 op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
797 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL); 817 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
798 818
799 cmd.req.arg[1] = op | (1 << 8) | (3 << 6); 819 cmd->req.arg[1] = op | (1 << 8) | (3 << 6);
800 cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31; 820 cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
801 821
802 addr = vp->mac; 822 addr = vp->mac;
803 mv.vlan = vlan; 823 mv.vlan = vlan;
@@ -807,18 +827,18 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
807 mv.mac_addr3 = addr[3]; 827 mv.mac_addr3 = addr[3];
808 mv.mac_addr4 = addr[4]; 828 mv.mac_addr4 = addr[4];
809 mv.mac_addr5 = addr[5]; 829 mv.mac_addr5 = addr[5];
810 buf = &cmd.req.arg[2]; 830 buf = &cmd->req.arg[2];
811 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); 831 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
812 832
813 err = qlcnic_issue_cmd(adapter, &cmd); 833 err = qlcnic_issue_cmd(adapter, cmd);
814 834
815 if (err) 835 if (!err)
816 dev_err(&adapter->pdev->dev, 836 return err;
817 "MAC-VLAN %s to CAM failed, err=%d.\n",
818 ((op == 1) ? "add " : "delete "), err);
819 837
820out: 838free_args:
821 qlcnic_free_mbx_args(&cmd); 839 qlcnic_free_mbx_args(cmd);
840free_cmd:
841 kfree(cmd);
822 return err; 842 return err;
823} 843}
824 844
@@ -840,7 +860,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
840 860
841 sriov = adapter->ahw->sriov; 861 sriov = adapter->ahw->sriov;
842 862
843 mutex_lock(&vf->vlan_list_lock); 863 spin_lock_bh(&vf->vlan_list_lock);
844 if (vf->num_vlan) { 864 if (vf->num_vlan) {
845 for (i = 0; i < sriov->num_allowed_vlans; i++) { 865 for (i = 0; i < sriov->num_allowed_vlans; i++) {
846 vlan = vf->sriov_vlans[i]; 866 vlan = vf->sriov_vlans[i];
@@ -849,7 +869,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
849 opcode); 869 opcode);
850 } 870 }
851 } 871 }
852 mutex_unlock(&vf->vlan_list_lock); 872 spin_unlock_bh(&vf->vlan_list_lock);
853 873
854 if (vf->vp->vlan_mode != QLC_PVID_MODE) { 874 if (vf->vp->vlan_mode != QLC_PVID_MODE) {
855 if (qlcnic_83xx_pf_check(adapter) && 875 if (qlcnic_83xx_pf_check(adapter) &&
@@ -1178,19 +1198,41 @@ static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
1178{ 1198{
1179 struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; 1199 struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
1180 u16 ctx_id, pkts, time; 1200 u16 ctx_id, pkts, time;
1201 int err = -EINVAL;
1202 u8 type;
1181 1203
1204 type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK;
1182 ctx_id = cmd->req.arg[1] >> 16; 1205 ctx_id = cmd->req.arg[1] >> 16;
1183 pkts = cmd->req.arg[2] & 0xffff; 1206 pkts = cmd->req.arg[2] & 0xffff;
1184 time = cmd->req.arg[2] >> 16; 1207 time = cmd->req.arg[2] >> 16;
1185 1208
1186 if (ctx_id != vf->rx_ctx_id) 1209 switch (type) {
1187 return -EINVAL; 1210 case QLCNIC_INTR_COAL_TYPE_RX:
1188 if (pkts > coal->rx_packets) 1211 if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets ||
1189 return -EINVAL; 1212 time < coal->rx_time_us)
1190 if (time < coal->rx_time_us) 1213 goto err_label;
1191 return -EINVAL; 1214 break;
1215 case QLCNIC_INTR_COAL_TYPE_TX:
1216 if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets ||
1217 time < coal->tx_time_us)
1218 goto err_label;
1219 break;
1220 default:
1221 netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n",
1222 type);
1223 return err;
1224 }
1192 1225
1193 return 0; 1226 return 0;
1227
1228err_label:
1229 netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n",
1230 vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us,
1231 vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us);
1232 netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n",
1233 ctx_id, pkts, time, type);
1234
1235 return err;
1194} 1236}
1195 1237
1196static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran, 1238static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
@@ -1214,7 +1256,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
1214 struct qlcnic_vf_info *vf, 1256 struct qlcnic_vf_info *vf,
1215 struct qlcnic_cmd_args *cmd) 1257 struct qlcnic_cmd_args *cmd)
1216{ 1258{
1217 struct qlcnic_macvlan_mbx *macvlan;
1218 struct qlcnic_vport *vp = vf->vp; 1259 struct qlcnic_vport *vp = vf->vp;
1219 u8 op, new_op; 1260 u8 op, new_op;
1220 1261
@@ -1224,14 +1265,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
1224 cmd->req.arg[1] |= (vf->vp->handle << 16); 1265 cmd->req.arg[1] |= (vf->vp->handle << 16);
1225 cmd->req.arg[1] |= BIT_31; 1266 cmd->req.arg[1] |= BIT_31;
1226 1267
1227 macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2];
1228 if (!(macvlan->mac_addr0 & BIT_0)) {
1229 dev_err(&adapter->pdev->dev,
1230 "MAC address change is not allowed from VF %d",
1231 vf->pci_func);
1232 return -EINVAL;
1233 }
1234
1235 if (vp->vlan_mode == QLC_PVID_MODE) { 1268 if (vp->vlan_mode == QLC_PVID_MODE) {
1236 op = cmd->req.arg[1] & 0x7; 1269 op = cmd->req.arg[1] & 0x7;
1237 cmd->req.arg[1] &= ~0x7; 1270 cmd->req.arg[1] &= ~0x7;
@@ -1815,7 +1848,8 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1815 return 0; 1848 return 0;
1816} 1849}
1817 1850
1818int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate) 1851int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
1852 int min_tx_rate, int max_tx_rate)
1819{ 1853{
1820 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1854 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1821 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 1855 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
@@ -1830,35 +1864,52 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate)
1830 if (vf >= sriov->num_vfs) 1864 if (vf >= sriov->num_vfs)
1831 return -EINVAL; 1865 return -EINVAL;
1832 1866
1833 if (tx_rate >= 10000 || tx_rate < 100) { 1867 vf_info = &sriov->vf_info[vf];
1868 vp = vf_info->vp;
1869 vpid = vp->handle;
1870
1871 if (!min_tx_rate)
1872 min_tx_rate = QLC_VF_MIN_TX_RATE;
1873
1874 if (max_tx_rate &&
1875 (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) {
1834 netdev_err(netdev, 1876 netdev_err(netdev,
1835 "Invalid Tx rate, allowed range is [%d - %d]", 1877 "Invalid max Tx rate, allowed range is [%d - %d]",
1836 QLC_VF_MIN_TX_RATE, QLC_VF_MAX_TX_RATE); 1878 min_tx_rate, QLC_VF_MAX_TX_RATE);
1837 return -EINVAL; 1879 return -EINVAL;
1838 } 1880 }
1839 1881
1840 if (tx_rate == 0) 1882 if (!max_tx_rate)
1841 tx_rate = 10000; 1883 max_tx_rate = 10000;
1842 1884
1843 vf_info = &sriov->vf_info[vf]; 1885 if (min_tx_rate &&
1844 vp = vf_info->vp; 1886 (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) {
1845 vpid = vp->handle; 1887 netdev_err(netdev,
1888 "Invalid min Tx rate, allowed range is [%d - %d]",
1889 QLC_VF_MIN_TX_RATE, max_tx_rate);
1890 return -EINVAL;
1891 }
1846 1892
1847 if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { 1893 if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
1848 if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid)) 1894 if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid))
1849 return -EIO; 1895 return -EIO;
1850 1896
1851 nic_info.max_tx_bw = tx_rate / 100; 1897 nic_info.max_tx_bw = max_tx_rate / 100;
1898 nic_info.min_tx_bw = min_tx_rate / 100;
1852 nic_info.bit_offsets = BIT_0; 1899 nic_info.bit_offsets = BIT_0;
1853 1900
1854 if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid)) 1901 if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid))
1855 return -EIO; 1902 return -EIO;
1856 } 1903 }
1857 1904
1858 vp->max_tx_bw = tx_rate / 100; 1905 vp->max_tx_bw = max_tx_rate / 100;
1859 netdev_info(netdev, 1906 netdev_info(netdev,
1860 "Setting Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n", 1907 "Setting Max Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
1861 tx_rate, vp->max_tx_bw, vf); 1908 max_tx_rate, vp->max_tx_bw, vf);
1909 vp->min_tx_bw = min_tx_rate / 100;
1910 netdev_info(netdev,
1911 "Setting Min Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
1912 min_tx_rate, vp->min_tx_bw, vf);
1862 return 0; 1913 return 0;
1863} 1914}
1864 1915
@@ -1957,9 +2008,13 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
1957 ivi->qos = vp->qos; 2008 ivi->qos = vp->qos;
1958 ivi->spoofchk = vp->spoofchk; 2009 ivi->spoofchk = vp->spoofchk;
1959 if (vp->max_tx_bw == MAX_BW) 2010 if (vp->max_tx_bw == MAX_BW)
1960 ivi->tx_rate = 0; 2011 ivi->max_tx_rate = 0;
2012 else
2013 ivi->max_tx_rate = vp->max_tx_bw * 100;
2014 if (vp->min_tx_bw == MIN_BW)
2015 ivi->min_tx_rate = 0;
1961 else 2016 else
1962 ivi->tx_rate = vp->max_tx_bw * 100; 2017 ivi->min_tx_rate = vp->min_tx_bw * 100;
1963 2018
1964 ivi->vf = vf; 2019 ivi->vf = vf;
1965 return 0; 2020 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index cd346e27f2e1..f5786d5792df 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -19,6 +19,10 @@
19#include <linux/sysfs.h> 19#include <linux/sysfs.h>
20#include <linux/aer.h> 20#include <linux/aer.h>
21#include <linux/log2.h> 21#include <linux/log2.h>
22#ifdef CONFIG_QLCNIC_HWMON
23#include <linux/hwmon.h>
24#include <linux/hwmon-sysfs.h>
25#endif
22 26
23#define QLC_STATUS_UNSUPPORTED_CMD -2 27#define QLC_STATUS_UNSUPPORTED_CMD -2
24 28
@@ -358,6 +362,8 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
358 if (adapter->npars[i].pci_func == pci_func) 362 if (adapter->npars[i].pci_func == pci_func)
359 return i; 363 return i;
360 } 364 }
365
366 dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__);
361 return -EINVAL; 367 return -EINVAL;
362} 368}
363 369
@@ -1243,6 +1249,68 @@ static struct bin_attribute bin_attr_flash = {
1243 .write = qlcnic_83xx_sysfs_flash_write_handler, 1249 .write = qlcnic_83xx_sysfs_flash_write_handler,
1244}; 1250};
1245 1251
1252#ifdef CONFIG_QLCNIC_HWMON
1253
1254static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
1255 struct device_attribute *dev_attr,
1256 char *buf)
1257{
1258 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
1259 unsigned int temperature = 0, value = 0;
1260
1261 if (qlcnic_83xx_check(adapter))
1262 value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
1263 else if (qlcnic_82xx_check(adapter))
1264 value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
1265
1266 temperature = qlcnic_get_temp_val(value);
1267 /* display millidegree celcius */
1268 temperature *= 1000;
1269 return sprintf(buf, "%u\n", temperature);
1270}
1271
1272/* hwmon-sysfs attributes */
1273static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
1274 qlcnic_hwmon_show_temp, NULL, 1);
1275
1276static struct attribute *qlcnic_hwmon_attrs[] = {
1277 &sensor_dev_attr_temp1_input.dev_attr.attr,
1278 NULL
1279};
1280
1281ATTRIBUTE_GROUPS(qlcnic_hwmon);
1282
1283void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
1284{
1285 struct device *dev = &adapter->pdev->dev;
1286 struct device *hwmon_dev;
1287
1288 /* Skip hwmon registration for a VF device */
1289 if (qlcnic_sriov_vf_check(adapter)) {
1290 adapter->ahw->hwmon_dev = NULL;
1291 return;
1292 }
1293 hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name,
1294 adapter,
1295 qlcnic_hwmon_groups);
1296 if (IS_ERR(hwmon_dev)) {
1297 dev_err(dev, "Cannot register with hwmon, err=%ld\n",
1298 PTR_ERR(hwmon_dev));
1299 hwmon_dev = NULL;
1300 }
1301 adapter->ahw->hwmon_dev = hwmon_dev;
1302}
1303
1304void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
1305{
1306 struct device *hwmon_dev = adapter->ahw->hwmon_dev;
1307 if (hwmon_dev) {
1308 hwmon_device_unregister(hwmon_dev);
1309 adapter->ahw->hwmon_dev = NULL;
1310 }
1311}
1312#endif
1313
1246void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) 1314void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
1247{ 1315{
1248 struct device *dev = &adapter->pdev->dev; 1316 struct device *dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 0a1d76acab81..b40050e03a56 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3595,7 +3595,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
3595 } 3595 }
3596 return status; 3596 return status;
3597err_irq: 3597err_irq:
3598 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n"); 3598 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3599 ql_free_irq(qdev); 3599 ql_free_irq(qdev);
3600 return status; 3600 return status;
3601} 3601}
@@ -4770,7 +4770,7 @@ static int qlge_probe(struct pci_dev *pdev,
4770 ndev->irq = pdev->irq; 4770 ndev->irq = pdev->irq;
4771 4771
4772 ndev->netdev_ops = &qlge_netdev_ops; 4772 ndev->netdev_ops = &qlge_netdev_ops;
4773 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops); 4773 ndev->ethtool_ops = &qlge_ethtool_ops;
4774 ndev->watchdog_timeo = 10 * HZ; 4774 ndev->watchdog_timeo = 10 * HZ;
4775 4775
4776 err = register_netdev(ndev); 4776 err = register_netdev(ndev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index aa1c079f231d..be425ad5e824 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7125,7 +7125,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7125 for (i = 0; i < ETH_ALEN; i++) 7125 for (i = 0; i < ETH_ALEN; i++)
7126 dev->dev_addr[i] = RTL_R8(MAC0 + i); 7126 dev->dev_addr[i] = RTL_R8(MAC0 + i);
7127 7127
7128 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops); 7128 dev->ethtool_ops = &rtl8169_ethtool_ops;
7129 dev->watchdog_timeo = RTL8169_TX_TIMEOUT; 7129 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
7130 7130
7131 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); 7131 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6a9509ccd33b..7622213beef1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -307,6 +307,27 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
307}; 307};
308 308
309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { 309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
310 [EDMR] = 0x0000,
311 [EDTRR] = 0x0004,
312 [EDRRR] = 0x0008,
313 [TDLAR] = 0x000c,
314 [RDLAR] = 0x0010,
315 [EESR] = 0x0014,
316 [EESIPR] = 0x0018,
317 [TRSCER] = 0x001c,
318 [RMFCR] = 0x0020,
319 [TFTR] = 0x0024,
320 [FDR] = 0x0028,
321 [RMCR] = 0x002c,
322 [EDOCR] = 0x0030,
323 [FCFTR] = 0x0034,
324 [RPADIR] = 0x0038,
325 [TRIMD] = 0x003c,
326 [RBWAR] = 0x0040,
327 [RDFAR] = 0x0044,
328 [TBRAR] = 0x004c,
329 [TDFAR] = 0x0050,
330
310 [ECMR] = 0x0160, 331 [ECMR] = 0x0160,
311 [ECSR] = 0x0164, 332 [ECSR] = 0x0164,
312 [ECSIPR] = 0x0168, 333 [ECSIPR] = 0x0168,
@@ -546,7 +567,6 @@ static struct sh_eth_cpu_data sh7757_data = {
546 .register_type = SH_ETH_REG_FAST_SH4, 567 .register_type = SH_ETH_REG_FAST_SH4,
547 568
548 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 569 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
549 .rmcr_value = RMCR_RNC,
550 570
551 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 571 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
552 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 572 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -624,7 +644,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
624 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 644 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
625 EESR_TDE | EESR_ECI, 645 EESR_TDE | EESR_ECI,
626 .fdr_value = 0x0000072f, 646 .fdr_value = 0x0000072f,
627 .rmcr_value = RMCR_RNC,
628 647
629 .irq_flags = IRQF_SHARED, 648 .irq_flags = IRQF_SHARED,
630 .apr = 1, 649 .apr = 1,
@@ -752,7 +771,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
752 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 771 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
753 EESR_TDE | EESR_ECI, 772 EESR_TDE | EESR_ECI,
754 .fdr_value = 0x0000070f, 773 .fdr_value = 0x0000070f,
755 .rmcr_value = RMCR_RNC,
756 774
757 .apr = 1, 775 .apr = 1,
758 .mpr = 1, 776 .mpr = 1,
@@ -784,7 +802,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
784 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 802 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
785 EESR_TDE | EESR_ECI, 803 EESR_TDE | EESR_ECI,
786 .fdr_value = 0x0000070f, 804 .fdr_value = 0x0000070f,
787 .rmcr_value = RMCR_RNC,
788 805
789 .no_psr = 1, 806 .no_psr = 1,
790 .apr = 1, 807 .apr = 1,
@@ -833,9 +850,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
833 if (!cd->fdr_value) 850 if (!cd->fdr_value)
834 cd->fdr_value = DEFAULT_FDR_INIT; 851 cd->fdr_value = DEFAULT_FDR_INIT;
835 852
836 if (!cd->rmcr_value)
837 cd->rmcr_value = DEFAULT_RMCR_VALUE;
838
839 if (!cd->tx_check) 853 if (!cd->tx_check)
840 cd->tx_check = DEFAULT_TX_CHECK; 854 cd->tx_check = DEFAULT_TX_CHECK;
841 855
@@ -1287,8 +1301,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1287 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); 1301 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1288 sh_eth_write(ndev, 0, TFTR); 1302 sh_eth_write(ndev, 0, TFTR);
1289 1303
1290 /* Frame recv control */ 1304 /* Frame recv control (enable multiple-packets per rx irq) */
1291 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); 1305 sh_eth_write(ndev, RMCR_RNC, RMCR);
1292 1306
1293 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); 1307 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
1294 1308
@@ -1385,7 +1399,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1385 int entry = mdp->cur_rx % mdp->num_rx_ring; 1399 int entry = mdp->cur_rx % mdp->num_rx_ring;
1386 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; 1400 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1387 struct sk_buff *skb; 1401 struct sk_buff *skb;
1388 int exceeded = 0;
1389 u16 pkt_len = 0; 1402 u16 pkt_len = 0;
1390 u32 desc_status; 1403 u32 desc_status;
1391 1404
@@ -1397,10 +1410,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1397 if (--boguscnt < 0) 1410 if (--boguscnt < 0)
1398 break; 1411 break;
1399 1412
1400 if (*quota <= 0) { 1413 if (*quota <= 0)
1401 exceeded = 1;
1402 break; 1414 break;
1403 } 1415
1404 (*quota)--; 1416 (*quota)--;
1405 1417
1406 if (!(desc_status & RDFEND)) 1418 if (!(desc_status & RDFEND))
@@ -1448,7 +1460,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1448 ndev->stats.rx_packets++; 1460 ndev->stats.rx_packets++;
1449 ndev->stats.rx_bytes += pkt_len; 1461 ndev->stats.rx_bytes += pkt_len;
1450 } 1462 }
1451 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1452 entry = (++mdp->cur_rx) % mdp->num_rx_ring; 1463 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1453 rxdesc = &mdp->rx_ring[entry]; 1464 rxdesc = &mdp->rx_ring[entry];
1454 } 1465 }
@@ -1494,7 +1505,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1494 sh_eth_write(ndev, EDRRR_R, EDRRR); 1505 sh_eth_write(ndev, EDRRR_R, EDRRR);
1495 } 1506 }
1496 1507
1497 return exceeded; 1508 return *quota <= 0;
1498} 1509}
1499 1510
1500static void sh_eth_rcv_snd_disable(struct net_device *ndev) 1511static void sh_eth_rcv_snd_disable(struct net_device *ndev)
@@ -2627,8 +2638,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
2627 pdev->name, pdev->id); 2638 pdev->name, pdev->id);
2628 2639
2629 /* PHY IRQ */ 2640 /* PHY IRQ */
2630 mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR, 2641 mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
2631 GFP_KERNEL); 2642 GFP_KERNEL);
2632 if (!mdp->mii_bus->irq) { 2643 if (!mdp->mii_bus->irq) {
2633 ret = -ENOMEM; 2644 ret = -ENOMEM;
2634 goto out_free_bus; 2645 goto out_free_bus;
@@ -2843,7 +2854,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2843 ndev->netdev_ops = &sh_eth_netdev_ops_tsu; 2854 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2844 else 2855 else
2845 ndev->netdev_ops = &sh_eth_netdev_ops; 2856 ndev->netdev_ops = &sh_eth_netdev_ops;
2846 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); 2857 ndev->ethtool_ops = &sh_eth_ethtool_ops;
2847 ndev->watchdog_timeo = TX_TIMEOUT; 2858 ndev->watchdog_timeo = TX_TIMEOUT;
2848 2859
2849 /* debug message level */ 2860 /* debug message level */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index d55e37cd5fec..b37c427144ee 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -319,7 +319,6 @@ enum TD_STS_BIT {
319enum RMCR_BIT { 319enum RMCR_BIT {
320 RMCR_RNC = 0x00000001, 320 RMCR_RNC = 0x00000001,
321}; 321};
322#define DEFAULT_RMCR_VALUE 0x00000000
323 322
324/* ECMR */ 323/* ECMR */
325enum FELIC_MODE_BIT { 324enum FELIC_MODE_BIT {
@@ -466,7 +465,6 @@ struct sh_eth_cpu_data {
466 unsigned long fdr_value; 465 unsigned long fdr_value;
467 unsigned long fcftr_value; 466 unsigned long fcftr_value;
468 unsigned long rpadir_value; 467 unsigned long rpadir_value;
469 unsigned long rmcr_value;
470 468
471 /* interrupt checking mask */ 469 /* interrupt checking mask */
472 unsigned long tx_check; 470 unsigned long tx_check;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 0415fa50eeb7..c0981ae45874 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -520,5 +520,5 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
520 520
521void sxgbe_set_ethtool_ops(struct net_device *netdev) 521void sxgbe_set_ethtool_ops(struct net_device *netdev)
522{ 522{
523 SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops); 523 netdev->ethtool_ops = &sxgbe_ethtool_ops;
524} 524}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 82a9a983869f..698494481d18 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -425,8 +425,8 @@ dmamem_err:
425 * @rx_rsize: ring size 425 * @rx_rsize: ring size
426 * Description: this function initializes the DMA RX descriptor 426 * Description: this function initializes the DMA RX descriptor
427 */ 427 */
428void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring, 428static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
429 int rx_rsize) 429 int rx_rsize)
430{ 430{
431 dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc), 431 dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
432 rx_ring->dma_rx, rx_ring->dma_rx_phy); 432 rx_ring->dma_rx, rx_ring->dma_rx_phy);
@@ -519,8 +519,8 @@ error:
519 * @tx_rsize: ring size 519 * @tx_rsize: ring size
520 * Description: this function initializes the DMA TX descriptor 520 * Description: this function initializes the DMA TX descriptor
521 */ 521 */
522void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring, 522static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
523 int tx_rsize) 523 int tx_rsize)
524{ 524{
525 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc), 525 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
526 tx_ring->dma_tx, tx_ring->dma_tx_phy); 526 tx_ring->dma_tx, tx_ring->dma_tx_phy);
@@ -1221,11 +1221,10 @@ static int sxgbe_release(struct net_device *dev)
1221 1221
1222 return 0; 1222 return 0;
1223} 1223}
1224
1225/* Prepare first Tx descriptor for doing TSO operation */ 1224/* Prepare first Tx descriptor for doing TSO operation */
1226void sxgbe_tso_prepare(struct sxgbe_priv_data *priv, 1225static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
1227 struct sxgbe_tx_norm_desc *first_desc, 1226 struct sxgbe_tx_norm_desc *first_desc,
1228 struct sk_buff *skb) 1227 struct sk_buff *skb)
1229{ 1228{
1230 unsigned int total_hdr_len, tcp_hdr_len; 1229 unsigned int total_hdr_len, tcp_hdr_len;
1231 1230
@@ -1914,40 +1913,6 @@ static void sxgbe_set_rx_mode(struct net_device *dev)
1914 readl(ioaddr + SXGBE_HASH_LOW)); 1913 readl(ioaddr + SXGBE_HASH_LOW));
1915} 1914}
1916 1915
1917/**
1918 * sxgbe_config - entry point for changing configuration mode passed on by
1919 * ifconfig
1920 * @dev : pointer to the device structure
1921 * @map : pointer to the device mapping structure
1922 * Description:
1923 * This function is a driver entry point which gets called by the kernel
1924 * whenever some device configuration is changed.
1925 * Return value:
1926 * This function returns 0 if success and appropriate error otherwise.
1927 */
1928static int sxgbe_config(struct net_device *dev, struct ifmap *map)
1929{
1930 struct sxgbe_priv_data *priv = netdev_priv(dev);
1931
1932 /* Can't act on a running interface */
1933 if (dev->flags & IFF_UP)
1934 return -EBUSY;
1935
1936 /* Don't allow changing the I/O address */
1937 if (map->base_addr != (unsigned long)priv->ioaddr) {
1938 netdev_warn(dev, "can't change I/O address\n");
1939 return -EOPNOTSUPP;
1940 }
1941
1942 /* Don't allow changing the IRQ */
1943 if (map->irq != priv->irq) {
1944 netdev_warn(dev, "not change IRQ number %d\n", priv->irq);
1945 return -EOPNOTSUPP;
1946 }
1947
1948 return 0;
1949}
1950
1951#ifdef CONFIG_NET_POLL_CONTROLLER 1916#ifdef CONFIG_NET_POLL_CONTROLLER
1952/** 1917/**
1953 * sxgbe_poll_controller - entry point for polling receive by device 1918 * sxgbe_poll_controller - entry point for polling receive by device
@@ -2009,7 +1974,6 @@ static const struct net_device_ops sxgbe_netdev_ops = {
2009 .ndo_set_rx_mode = sxgbe_set_rx_mode, 1974 .ndo_set_rx_mode = sxgbe_set_rx_mode,
2010 .ndo_tx_timeout = sxgbe_tx_timeout, 1975 .ndo_tx_timeout = sxgbe_tx_timeout,
2011 .ndo_do_ioctl = sxgbe_ioctl, 1976 .ndo_do_ioctl = sxgbe_ioctl,
2012 .ndo_set_config = sxgbe_config,
2013#ifdef CONFIG_NET_POLL_CONTROLLER 1977#ifdef CONFIG_NET_POLL_CONTROLLER
2014 .ndo_poll_controller = sxgbe_poll_controller, 1978 .ndo_poll_controller = sxgbe_poll_controller,
2015#endif 1979#endif
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
index 56f8bf5a3f1b..81437d91df99 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -188,7 +188,6 @@
188 188
189/* L3/L4 function registers */ 189/* L3/L4 function registers */
190#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00 190#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
191#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
192#define SXGBE_CORE_L34_DATA_REG 0x0C04 191#define SXGBE_CORE_L34_DATA_REG 0x0C04
193 192
194/* ARP registers */ 193/* ARP registers */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 63d595fd3cc5..1e274045970f 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2248,7 +2248,7 @@ static int efx_register_netdev(struct efx_nic *efx)
2248 } else { 2248 } else {
2249 net_dev->netdev_ops = &efx_farch_netdev_ops; 2249 net_dev->netdev_ops = &efx_farch_netdev_ops;
2250 } 2250 }
2251 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 2251 net_dev->ethtool_ops = &efx_ethtool_ops;
2252 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; 2252 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2253 2253
2254 rtnl_lock(); 2254 rtnl_lock();
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 0de8b07c24c2..74739c4b9997 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1033,7 +1033,7 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
1033 0 : ARRAY_SIZE(efx->rx_indir_table)); 1033 0 : ARRAY_SIZE(efx->rx_indir_table));
1034} 1034}
1035 1035
1036static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir) 1036static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key)
1037{ 1037{
1038 struct efx_nic *efx = netdev_priv(net_dev); 1038 struct efx_nic *efx = netdev_priv(net_dev);
1039 1039
@@ -1041,8 +1041,8 @@ static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
1041 return 0; 1041 return 0;
1042} 1042}
1043 1043
1044static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, 1044static int efx_ethtool_set_rxfh(struct net_device *net_dev,
1045 const u32 *indir) 1045 const u32 *indir, const u8 *key)
1046{ 1046{
1047 struct efx_nic *efx = netdev_priv(net_dev); 1047 struct efx_nic *efx = netdev_priv(net_dev);
1048 1048
@@ -1125,8 +1125,8 @@ const struct ethtool_ops efx_ethtool_ops = {
1125 .get_rxnfc = efx_ethtool_get_rxnfc, 1125 .get_rxnfc = efx_ethtool_get_rxnfc,
1126 .set_rxnfc = efx_ethtool_set_rxnfc, 1126 .set_rxnfc = efx_ethtool_set_rxnfc,
1127 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, 1127 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
1128 .get_rxfh_indir = efx_ethtool_get_rxfh_indir, 1128 .get_rxfh = efx_ethtool_get_rxfh,
1129 .set_rxfh_indir = efx_ethtool_set_rxfh_indir, 1129 .set_rxfh = efx_ethtool_set_rxfh,
1130 .get_ts_info = efx_ethtool_get_ts_info, 1130 .get_ts_info = efx_ethtool_get_ts_info,
1131 .get_module_info = efx_ethtool_get_module_info, 1131 .get_module_info = efx_ethtool_get_module_info,
1132 .get_module_eeprom = efx_ethtool_get_module_eeprom, 1132 .get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 4d3f119b67b3..afb94aa2c15e 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -66,10 +66,17 @@
66#define EFX_USE_QWORD_IO 1 66#define EFX_USE_QWORD_IO 1
67#endif 67#endif
68 68
69/* Hardware issue requires that only 64-bit naturally aligned writes
70 * are seen by hardware. Its not strictly necessary to restrict to
71 * x86_64 arch, but done for safety since unusual write combining behaviour
72 * can break PIO.
73 */
74#ifdef CONFIG_X86_64
69/* PIO is a win only if write-combining is possible */ 75/* PIO is a win only if write-combining is possible */
70#ifdef ARCH_HAS_IOREMAP_WC 76#ifdef ARCH_HAS_IOREMAP_WC
71#define EFX_USE_PIO 1 77#define EFX_USE_PIO 1
72#endif 78#endif
79#endif
73 80
74#ifdef EFX_USE_QWORD_IO 81#ifdef EFX_USE_QWORD_IO
75static inline void _efx_writeq(struct efx_nic *efx, __le64 value, 82static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 9a9205e77896..43d2e64546ed 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1633,7 +1633,8 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
1633 1633
1634 ivi->vf = vf_i; 1634 ivi->vf = vf_i;
1635 ether_addr_copy(ivi->mac, vf->addr.mac_addr); 1635 ether_addr_copy(ivi->mac, vf->addr.mac_addr);
1636 ivi->tx_rate = 0; 1636 ivi->max_tx_rate = 0;
1637 ivi->min_tx_rate = 0;
1637 tci = ntohs(vf->addr.tci); 1638 tci = ntohs(vf->addr.tci);
1638 ivi->vlan = tci & VLAN_VID_MASK; 1639 ivi->vlan = tci & VLAN_VID_MASK;
1639 ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7; 1640 ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index fa9475300411..ede8dcca0ff3 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -189,6 +189,18 @@ struct efx_short_copy_buffer {
189 u8 buf[L1_CACHE_BYTES]; 189 u8 buf[L1_CACHE_BYTES];
190}; 190};
191 191
192/* Copy in explicit 64-bit writes. */
193static void efx_memcpy_64(void __iomem *dest, void *src, size_t len)
194{
195 u64 *src64 = src;
196 u64 __iomem *dest64 = dest;
197 size_t l64 = len / 8;
198 size_t i;
199
200 for (i = 0; i < l64; i++)
201 writeq(src64[i], &dest64[i]);
202}
203
192/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. 204/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
193 * Advances piobuf pointer. Leaves additional data in the copy buffer. 205 * Advances piobuf pointer. Leaves additional data in the copy buffer.
194 */ 206 */
@@ -198,7 +210,7 @@ static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
198{ 210{
199 int block_len = len & ~(sizeof(copy_buf->buf) - 1); 211 int block_len = len & ~(sizeof(copy_buf->buf) - 1);
200 212
201 memcpy_toio(*piobuf, data, block_len); 213 efx_memcpy_64(*piobuf, data, block_len);
202 *piobuf += block_len; 214 *piobuf += block_len;
203 len -= block_len; 215 len -= block_len;
204 216
@@ -230,7 +242,7 @@ static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
230 if (copy_buf->used < sizeof(copy_buf->buf)) 242 if (copy_buf->used < sizeof(copy_buf->buf))
231 return; 243 return;
232 244
233 memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf)); 245 efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
234 *piobuf += sizeof(copy_buf->buf); 246 *piobuf += sizeof(copy_buf->buf);
235 data += copy_to_buf; 247 data += copy_to_buf;
236 len -= copy_to_buf; 248 len -= copy_to_buf;
@@ -245,7 +257,7 @@ static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
245{ 257{
246 /* if there's anything in it, write the whole buffer, including junk */ 258 /* if there's anything in it, write the whole buffer, including junk */
247 if (copy_buf->used) 259 if (copy_buf->used)
248 memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf)); 260 efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
249} 261}
250 262
251/* Traverse skb structure and copy fragments in to PIO buffer. 263/* Traverse skb structure and copy fragments in to PIO buffer.
@@ -304,8 +316,8 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
304 */ 316 */
305 BUILD_BUG_ON(L1_CACHE_BYTES > 317 BUILD_BUG_ON(L1_CACHE_BYTES >
306 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 318 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
307 memcpy_toio(tx_queue->piobuf, skb->data, 319 efx_memcpy_64(tx_queue->piobuf, skb->data,
308 ALIGN(skb->len, L1_CACHE_BYTES)); 320 ALIGN(skb->len, L1_CACHE_BYTES));
309 } 321 }
310 322
311 EFX_POPULATE_QWORD_5(buffer->option, 323 EFX_POPULATE_QWORD_5(buffer->option,
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index acbbe48a519c..a86339903b9b 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1877,7 +1877,7 @@ static int sis190_init_one(struct pci_dev *pdev,
1877 1877
1878 dev->netdev_ops = &sis190_netdev_ops; 1878 dev->netdev_ops = &sis190_netdev_ops;
1879 1879
1880 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops); 1880 dev->ethtool_ops = &sis190_ethtool_ops;
1881 dev->watchdog_timeo = SIS190_TX_TIMEOUT; 1881 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1882 1882
1883 spin_lock_init(&tp->lock); 1883 spin_lock_init(&tp->lock);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index c7a4868571f9..6b33127ab352 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -318,7 +318,7 @@ static int smc91c92_probe(struct pcmcia_device *link)
318 318
319 /* The SMC91c92-specific entries in the device structure. */ 319 /* The SMC91c92-specific entries in the device structure. */
320 dev->netdev_ops = &smc_netdev_ops; 320 dev->netdev_ops = &smc_netdev_ops;
321 SET_ETHTOOL_OPS(dev, &ethtool_ops); 321 dev->ethtool_ops = &ethtool_ops;
322 dev->watchdog_timeo = TX_TIMEOUT; 322 dev->watchdog_timeo = TX_TIMEOUT;
323 323
324 smc->mii_if.dev = dev; 324 smc->mii_if.dev = dev;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a0fc151da40d..5e13fa5524ae 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2477,6 +2477,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2477 goto out_disable_resources; 2477 goto out_disable_resources;
2478 } 2478 }
2479 2479
2480 netif_carrier_off(dev);
2481
2480 retval = register_netdev(dev); 2482 retval = register_netdev(dev);
2481 if (retval) { 2483 if (retval) {
2482 SMSC_WARN(pdata, probe, "Error %i registering device", retval); 2484 SMSC_WARN(pdata, probe, "Error %i registering device", retval);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index c5f9cb85c8ef..c62e67f3c2f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -322,9 +322,7 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
322 return -EBUSY; 322 return -EBUSY;
323 } 323 }
324 cmd->transceiver = XCVR_INTERNAL; 324 cmd->transceiver = XCVR_INTERNAL;
325 spin_lock_irq(&priv->lock);
326 rc = phy_ethtool_gset(phy, cmd); 325 rc = phy_ethtool_gset(phy, cmd);
327 spin_unlock_irq(&priv->lock);
328 return rc; 326 return rc;
329} 327}
330 328
@@ -431,8 +429,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
431 if (priv->pcs) /* FIXME */ 429 if (priv->pcs) /* FIXME */
432 return; 430 return;
433 431
434 spin_lock(&priv->lock);
435
436 pause->rx_pause = 0; 432 pause->rx_pause = 0;
437 pause->tx_pause = 0; 433 pause->tx_pause = 0;
438 pause->autoneg = priv->phydev->autoneg; 434 pause->autoneg = priv->phydev->autoneg;
@@ -442,7 +438,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
442 if (priv->flow_ctrl & FLOW_TX) 438 if (priv->flow_ctrl & FLOW_TX)
443 pause->tx_pause = 1; 439 pause->tx_pause = 1;
444 440
445 spin_unlock(&priv->lock);
446} 441}
447 442
448static int 443static int
@@ -457,8 +452,6 @@ stmmac_set_pauseparam(struct net_device *netdev,
457 if (priv->pcs) /* FIXME */ 452 if (priv->pcs) /* FIXME */
458 return -EOPNOTSUPP; 453 return -EOPNOTSUPP;
459 454
460 spin_lock(&priv->lock);
461
462 if (pause->rx_pause) 455 if (pause->rx_pause)
463 new_pause |= FLOW_RX; 456 new_pause |= FLOW_RX;
464 if (pause->tx_pause) 457 if (pause->tx_pause)
@@ -473,7 +466,6 @@ stmmac_set_pauseparam(struct net_device *netdev,
473 } else 466 } else
474 priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex, 467 priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
475 priv->flow_ctrl, priv->pause); 468 priv->flow_ctrl, priv->pause);
476 spin_unlock(&priv->lock);
477 return ret; 469 return ret;
478} 470}
479 471
@@ -784,5 +776,5 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
784 776
785void stmmac_set_ethtool_ops(struct net_device *netdev) 777void stmmac_set_ethtool_ops(struct net_device *netdev)
786{ 778{
787 SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops); 779 netdev->ethtool_ops = &stmmac_ethtool_ops;
788} 780}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0f4841d2e8dc..057a1208e594 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1753,7 +1753,7 @@ static int stmmac_open(struct net_device *dev)
1753 } 1753 }
1754 1754
1755 /* Request the IRQ lines */ 1755 /* Request the IRQ lines */
1756 if (priv->lpi_irq != -ENXIO) { 1756 if (priv->lpi_irq > 0) {
1757 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, 1757 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1758 dev->name, dev); 1758 dev->name, dev);
1759 if (unlikely(ret < 0)) { 1759 if (unlikely(ret < 0)) {
@@ -1813,7 +1813,7 @@ static int stmmac_release(struct net_device *dev)
1813 free_irq(dev->irq, dev); 1813 free_irq(dev->irq, dev);
1814 if (priv->wol_irq != dev->irq) 1814 if (priv->wol_irq != dev->irq)
1815 free_irq(priv->wol_irq, dev); 1815 free_irq(priv->wol_irq, dev);
1816 if (priv->lpi_irq != -ENXIO) 1816 if (priv->lpi_irq > 0)
1817 free_irq(priv->lpi_irq, dev); 1817 free_irq(priv->lpi_irq, dev);
1818 1818
1819 /* Stop TX/RX DMA and clear the descriptors */ 1819 /* Stop TX/RX DMA and clear the descriptors */
@@ -2212,27 +2212,6 @@ static void stmmac_tx_timeout(struct net_device *dev)
2212 stmmac_tx_err(priv); 2212 stmmac_tx_err(priv);
2213} 2213}
2214 2214
2215/* Configuration changes (passed on by ifconfig) */
2216static int stmmac_config(struct net_device *dev, struct ifmap *map)
2217{
2218 if (dev->flags & IFF_UP) /* can't act on a running interface */
2219 return -EBUSY;
2220
2221 /* Don't allow changing the I/O address */
2222 if (map->base_addr != dev->base_addr) {
2223 pr_warn("%s: can't change I/O address\n", dev->name);
2224 return -EOPNOTSUPP;
2225 }
2226
2227 /* Don't allow changing the IRQ */
2228 if (map->irq != dev->irq) {
2229 pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
2230 return -EOPNOTSUPP;
2231 }
2232
2233 return 0;
2234}
2235
2236/** 2215/**
2237 * stmmac_set_rx_mode - entry point for multicast addressing 2216 * stmmac_set_rx_mode - entry point for multicast addressing
2238 * @dev : pointer to the device structure 2217 * @dev : pointer to the device structure
@@ -2598,7 +2577,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
2598 .ndo_set_rx_mode = stmmac_set_rx_mode, 2577 .ndo_set_rx_mode = stmmac_set_rx_mode,
2599 .ndo_tx_timeout = stmmac_tx_timeout, 2578 .ndo_tx_timeout = stmmac_tx_timeout,
2600 .ndo_do_ioctl = stmmac_ioctl, 2579 .ndo_do_ioctl = stmmac_ioctl,
2601 .ndo_set_config = stmmac_config,
2602#ifdef CONFIG_NET_POLL_CONTROLLER 2580#ifdef CONFIG_NET_POLL_CONTROLLER
2603 .ndo_poll_controller = stmmac_poll_controller, 2581 .ndo_poll_controller = stmmac_poll_controller,
2604#endif 2582#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index a468eb107823..a5b1e1b776fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -205,10 +205,13 @@ int stmmac_mdio_register(struct net_device *ndev)
205 if (new_bus == NULL) 205 if (new_bus == NULL)
206 return -ENOMEM; 206 return -ENOMEM;
207 207
208 if (mdio_bus_data->irqs) 208 if (mdio_bus_data->irqs) {
209 irqlist = mdio_bus_data->irqs; 209 irqlist = mdio_bus_data->irqs;
210 else 210 } else {
211 for (addr = 0; addr < PHY_MAX_ADDR; addr++)
212 priv->mii_irq[addr] = PHY_POLL;
211 irqlist = priv->mii_irq; 213 irqlist = priv->mii_irq;
214 }
212 215
213#ifdef CONFIG_OF 216#ifdef CONFIG_OF
214 if (priv->device->of_node) 217 if (priv->device->of_node)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 46aef5108bea..ea7a65be1f9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -237,10 +237,12 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
237 237
238 /* Get the MAC information */ 238 /* Get the MAC information */
239 priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); 239 priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
240 if (priv->dev->irq == -ENXIO) { 240 if (priv->dev->irq < 0) {
241 pr_err("%s: ERROR: MAC IRQ configuration " 241 if (priv->dev->irq != -EPROBE_DEFER) {
242 "information not found\n", __func__); 242 netdev_err(priv->dev,
243 return -ENXIO; 243 "MAC IRQ configuration information not found\n");
244 }
245 return priv->dev->irq;
244 } 246 }
245 247
246 /* 248 /*
@@ -252,10 +254,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
252 * so the driver will continue to use the mac irq (ndev->irq) 254 * so the driver will continue to use the mac irq (ndev->irq)
253 */ 255 */
254 priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); 256 priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
255 if (priv->wol_irq == -ENXIO) 257 if (priv->wol_irq < 0) {
258 if (priv->wol_irq == -EPROBE_DEFER)
259 return -EPROBE_DEFER;
256 priv->wol_irq = priv->dev->irq; 260 priv->wol_irq = priv->dev->irq;
261 }
257 262
258 priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); 263 priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
264 if (priv->lpi_irq == -EPROBE_DEFER)
265 return -EPROBE_DEFER;
259 266
260 platform_set_drvdata(pdev, priv->dev); 267 platform_set_drvdata(pdev, priv->dev);
261 268
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 2ead87759ab4..38da73a2a886 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2413,7 +2413,7 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
2413 .get_ethtool_stats = bdx_get_ethtool_stats, 2413 .get_ethtool_stats = bdx_get_ethtool_stats,
2414 }; 2414 };
2415 2415
2416 SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops); 2416 netdev->ethtool_ops = &bdx_ethtool_ops;
2417} 2417}
2418 2418
2419/** 2419/**
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 73f74f369437..7399a52f7c26 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -313,19 +313,6 @@ static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
313 313
314static struct mii_bus *cpmac_mii; 314static struct mii_bus *cpmac_mii;
315 315
316static int cpmac_config(struct net_device *dev, struct ifmap *map)
317{
318 if (dev->flags & IFF_UP)
319 return -EBUSY;
320
321 /* Don't allow changing the I/O address */
322 if (map->base_addr != dev->base_addr)
323 return -EOPNOTSUPP;
324
325 /* ignore other fields */
326 return 0;
327}
328
329static void cpmac_set_multicast_list(struct net_device *dev) 316static void cpmac_set_multicast_list(struct net_device *dev)
330{ 317{
331 struct netdev_hw_addr *ha; 318 struct netdev_hw_addr *ha;
@@ -1100,7 +1087,6 @@ static const struct net_device_ops cpmac_netdev_ops = {
1100 .ndo_tx_timeout = cpmac_tx_timeout, 1087 .ndo_tx_timeout = cpmac_tx_timeout,
1101 .ndo_set_rx_mode = cpmac_set_multicast_list, 1088 .ndo_set_rx_mode = cpmac_set_multicast_list,
1102 .ndo_do_ioctl = cpmac_ioctl, 1089 .ndo_do_ioctl = cpmac_ioctl,
1103 .ndo_set_config = cpmac_config,
1104 .ndo_change_mtu = eth_change_mtu, 1090 .ndo_change_mtu = eth_change_mtu,
1105 .ndo_validate_addr = eth_validate_addr, 1091 .ndo_validate_addr = eth_validate_addr,
1106 .ndo_set_mac_address = eth_mac_addr, 1092 .ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 148da9ae8366..aa8bf45e53dc 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -29,6 +29,8 @@
29#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7) 29#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
30#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6) 30#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
31 31
32#define GMII_SEL_MODE_MASK 0x3
33
32struct cpsw_phy_sel_priv { 34struct cpsw_phy_sel_priv {
33 struct device *dev; 35 struct device *dev;
34 u32 __iomem *gmii_sel; 36 u32 __iomem *gmii_sel;
@@ -65,7 +67,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
65 break; 67 break;
66 }; 68 };
67 69
68 mask = 0x3 << (slave * 2) | BIT(slave + 6); 70 mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
69 mode <<= slave * 2; 71 mode <<= slave * 2;
70 72
71 if (priv->rmii_clock_external) { 73 if (priv->rmii_clock_external) {
@@ -81,6 +83,55 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
81 writel(reg, priv->gmii_sel); 83 writel(reg, priv->gmii_sel);
82} 84}
83 85
86static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
87 phy_interface_t phy_mode, int slave)
88{
89 u32 reg;
90 u32 mask;
91 u32 mode = 0;
92
93 reg = readl(priv->gmii_sel);
94
95 switch (phy_mode) {
96 case PHY_INTERFACE_MODE_RMII:
97 mode = AM33XX_GMII_SEL_MODE_RMII;
98 break;
99
100 case PHY_INTERFACE_MODE_RGMII:
101 case PHY_INTERFACE_MODE_RGMII_ID:
102 case PHY_INTERFACE_MODE_RGMII_RXID:
103 case PHY_INTERFACE_MODE_RGMII_TXID:
104 mode = AM33XX_GMII_SEL_MODE_RGMII;
105 break;
106
107 case PHY_INTERFACE_MODE_MII:
108 default:
109 mode = AM33XX_GMII_SEL_MODE_MII;
110 break;
111 };
112
113 switch (slave) {
114 case 0:
115 mask = GMII_SEL_MODE_MASK;
116 break;
117 case 1:
118 mask = GMII_SEL_MODE_MASK << 4;
119 mode <<= 4;
120 break;
121 default:
122 dev_err(priv->dev, "invalid slave number...\n");
123 return;
124 }
125
126 if (priv->rmii_clock_external)
127 dev_err(priv->dev, "RMII External clock is not supported\n");
128
129 reg &= ~mask;
130 reg |= mode;
131
132 writel(reg, priv->gmii_sel);
133}
134
84static struct platform_driver cpsw_phy_sel_driver; 135static struct platform_driver cpsw_phy_sel_driver;
85static int match(struct device *dev, void *data) 136static int match(struct device *dev, void *data)
86{ 137{
@@ -112,6 +163,14 @@ static const struct of_device_id cpsw_phy_sel_id_table[] = {
112 .compatible = "ti,am3352-cpsw-phy-sel", 163 .compatible = "ti,am3352-cpsw-phy-sel",
113 .data = &cpsw_gmii_sel_am3352, 164 .data = &cpsw_gmii_sel_am3352,
114 }, 165 },
166 {
167 .compatible = "ti,dra7xx-cpsw-phy-sel",
168 .data = &cpsw_gmii_sel_dra7xx,
169 },
170 {
171 .compatible = "ti,am43xx-cpsw-phy-sel",
172 .data = &cpsw_gmii_sel_am3352,
173 },
115 {} 174 {}
116}; 175};
117MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table); 176MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
@@ -132,6 +191,7 @@ static int cpsw_phy_sel_probe(struct platform_device *pdev)
132 return -ENOMEM; 191 return -ENOMEM;
133 } 192 }
134 193
194 priv->dev = &pdev->dev;
135 priv->cpsw_phy_sel = of_id->data; 195 priv->cpsw_phy_sel = of_id->data;
136 196
137 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel"); 197 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c331b7ebc812..ff380dac6629 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -143,13 +143,13 @@ do { \
143 u32 i; \ 143 u32 i; \
144 for (i = 0; i < priv->num_irqs; i++) \ 144 for (i = 0; i < priv->num_irqs; i++) \
145 enable_irq(priv->irqs_table[i]); \ 145 enable_irq(priv->irqs_table[i]); \
146 } while (0); 146 } while (0)
147#define cpsw_disable_irq(priv) \ 147#define cpsw_disable_irq(priv) \
148 do { \ 148 do { \
149 u32 i; \ 149 u32 i; \
150 for (i = 0; i < priv->num_irqs; i++) \ 150 for (i = 0; i < priv->num_irqs; i++) \
151 disable_irq_nosync(priv->irqs_table[i]); \ 151 disable_irq_nosync(priv->irqs_table[i]); \
152 } while (0); 152 } while (0)
153 153
154#define cpsw_slave_index(priv) \ 154#define cpsw_slave_index(priv) \
155 ((priv->data.dual_emac) ? priv->emac_port : \ 155 ((priv->data.dual_emac) ? priv->emac_port : \
@@ -248,20 +248,31 @@ struct cpsw_ss_regs {
248#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */ 248#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
249#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */ 249#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
250#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */ 250#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
251#define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */ 251#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */
252#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */
252#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */ 253#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
253#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */ 254#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
254#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */ 255#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
255#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */ 256#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
256#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */ 257#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
257 258
258#define CTRL_TS_BITS \ 259#define CTRL_V2_TS_BITS \
259 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \ 260 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
260 TS_ANNEX_D_EN | TS_LTYPE1_EN) 261 TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
262
263#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
264#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
265#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
266
261 267
262#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN) 268#define CTRL_V3_TS_BITS \
263#define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN) 269 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
264#define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN) 270 TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
271 TS_LTYPE1_EN)
272
273#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
274#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
275#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
265 276
266/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */ 277/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
267#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */ 278#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
@@ -1376,13 +1387,27 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
1376 slave = &priv->slaves[priv->data.active_slave]; 1387 slave = &priv->slaves[priv->data.active_slave];
1377 1388
1378 ctrl = slave_read(slave, CPSW2_CONTROL); 1389 ctrl = slave_read(slave, CPSW2_CONTROL);
1379 ctrl &= ~CTRL_ALL_TS_MASK; 1390 switch (priv->version) {
1391 case CPSW_VERSION_2:
1392 ctrl &= ~CTRL_V2_ALL_TS_MASK;
1380 1393
1381 if (priv->cpts->tx_enable) 1394 if (priv->cpts->tx_enable)
1382 ctrl |= CTRL_TX_TS_BITS; 1395 ctrl |= CTRL_V2_TX_TS_BITS;
1383 1396
1384 if (priv->cpts->rx_enable) 1397 if (priv->cpts->rx_enable)
1385 ctrl |= CTRL_RX_TS_BITS; 1398 ctrl |= CTRL_V2_RX_TS_BITS;
1399 break;
1400 case CPSW_VERSION_3:
1401 default:
1402 ctrl &= ~CTRL_V3_ALL_TS_MASK;
1403
1404 if (priv->cpts->tx_enable)
1405 ctrl |= CTRL_V3_TX_TS_BITS;
1406
1407 if (priv->cpts->rx_enable)
1408 ctrl |= CTRL_V3_RX_TS_BITS;
1409 break;
1410 }
1386 1411
1387 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; 1412 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
1388 1413
@@ -1398,7 +1423,8 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1398 struct hwtstamp_config cfg; 1423 struct hwtstamp_config cfg;
1399 1424
1400 if (priv->version != CPSW_VERSION_1 && 1425 if (priv->version != CPSW_VERSION_1 &&
1401 priv->version != CPSW_VERSION_2) 1426 priv->version != CPSW_VERSION_2 &&
1427 priv->version != CPSW_VERSION_3)
1402 return -EOPNOTSUPP; 1428 return -EOPNOTSUPP;
1403 1429
1404 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1430 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -1443,6 +1469,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1443 cpsw_hwtstamp_v1(priv); 1469 cpsw_hwtstamp_v1(priv);
1444 break; 1470 break;
1445 case CPSW_VERSION_2: 1471 case CPSW_VERSION_2:
1472 case CPSW_VERSION_3:
1446 cpsw_hwtstamp_v2(priv); 1473 cpsw_hwtstamp_v2(priv);
1447 break; 1474 break;
1448 default: 1475 default:
@@ -1459,7 +1486,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1459 struct hwtstamp_config cfg; 1486 struct hwtstamp_config cfg;
1460 1487
1461 if (priv->version != CPSW_VERSION_1 && 1488 if (priv->version != CPSW_VERSION_1 &&
1462 priv->version != CPSW_VERSION_2) 1489 priv->version != CPSW_VERSION_2 &&
1490 priv->version != CPSW_VERSION_3)
1463 return -EOPNOTSUPP; 1491 return -EOPNOTSUPP;
1464 1492
1465 cfg.flags = 0; 1493 cfg.flags = 0;
@@ -1780,25 +1808,25 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1780 return -EINVAL; 1808 return -EINVAL;
1781 1809
1782 if (of_property_read_u32(node, "slaves", &prop)) { 1810 if (of_property_read_u32(node, "slaves", &prop)) {
1783 pr_err("Missing slaves property in the DT.\n"); 1811 dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
1784 return -EINVAL; 1812 return -EINVAL;
1785 } 1813 }
1786 data->slaves = prop; 1814 data->slaves = prop;
1787 1815
1788 if (of_property_read_u32(node, "active_slave", &prop)) { 1816 if (of_property_read_u32(node, "active_slave", &prop)) {
1789 pr_err("Missing active_slave property in the DT.\n"); 1817 dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
1790 return -EINVAL; 1818 return -EINVAL;
1791 } 1819 }
1792 data->active_slave = prop; 1820 data->active_slave = prop;
1793 1821
1794 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { 1822 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
1795 pr_err("Missing cpts_clock_mult property in the DT.\n"); 1823 dev_err(&pdev->dev, "Missing cpts_clock_mult property in the DT.\n");
1796 return -EINVAL; 1824 return -EINVAL;
1797 } 1825 }
1798 data->cpts_clock_mult = prop; 1826 data->cpts_clock_mult = prop;
1799 1827
1800 if (of_property_read_u32(node, "cpts_clock_shift", &prop)) { 1828 if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
1801 pr_err("Missing cpts_clock_shift property in the DT.\n"); 1829 dev_err(&pdev->dev, "Missing cpts_clock_shift property in the DT.\n");
1802 return -EINVAL; 1830 return -EINVAL;
1803 } 1831 }
1804 data->cpts_clock_shift = prop; 1832 data->cpts_clock_shift = prop;
@@ -1810,31 +1838,31 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1810 return -ENOMEM; 1838 return -ENOMEM;
1811 1839
1812 if (of_property_read_u32(node, "cpdma_channels", &prop)) { 1840 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
1813 pr_err("Missing cpdma_channels property in the DT.\n"); 1841 dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
1814 return -EINVAL; 1842 return -EINVAL;
1815 } 1843 }
1816 data->channels = prop; 1844 data->channels = prop;
1817 1845
1818 if (of_property_read_u32(node, "ale_entries", &prop)) { 1846 if (of_property_read_u32(node, "ale_entries", &prop)) {
1819 pr_err("Missing ale_entries property in the DT.\n"); 1847 dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
1820 return -EINVAL; 1848 return -EINVAL;
1821 } 1849 }
1822 data->ale_entries = prop; 1850 data->ale_entries = prop;
1823 1851
1824 if (of_property_read_u32(node, "bd_ram_size", &prop)) { 1852 if (of_property_read_u32(node, "bd_ram_size", &prop)) {
1825 pr_err("Missing bd_ram_size property in the DT.\n"); 1853 dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
1826 return -EINVAL; 1854 return -EINVAL;
1827 } 1855 }
1828 data->bd_ram_size = prop; 1856 data->bd_ram_size = prop;
1829 1857
1830 if (of_property_read_u32(node, "rx_descs", &prop)) { 1858 if (of_property_read_u32(node, "rx_descs", &prop)) {
1831 pr_err("Missing rx_descs property in the DT.\n"); 1859 dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n");
1832 return -EINVAL; 1860 return -EINVAL;
1833 } 1861 }
1834 data->rx_descs = prop; 1862 data->rx_descs = prop;
1835 1863
1836 if (of_property_read_u32(node, "mac_control", &prop)) { 1864 if (of_property_read_u32(node, "mac_control", &prop)) {
1837 pr_err("Missing mac_control property in the DT.\n"); 1865 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
1838 return -EINVAL; 1866 return -EINVAL;
1839 } 1867 }
1840 data->mac_control = prop; 1868 data->mac_control = prop;
@@ -1848,7 +1876,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1848 ret = of_platform_populate(node, NULL, NULL, &pdev->dev); 1876 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
1849 /* We do not want to force this, as in some cases may not have child */ 1877 /* We do not want to force this, as in some cases may not have child */
1850 if (ret) 1878 if (ret)
1851 pr_warn("Doesn't have any child node\n"); 1879 dev_warn(&pdev->dev, "Doesn't have any child node\n");
1852 1880
1853 for_each_child_of_node(node, slave_node) { 1881 for_each_child_of_node(node, slave_node) {
1854 struct cpsw_slave_data *slave_data = data->slave_data + i; 1882 struct cpsw_slave_data *slave_data = data->slave_data + i;
@@ -1865,7 +1893,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1865 1893
1866 parp = of_get_property(slave_node, "phy_id", &lenp); 1894 parp = of_get_property(slave_node, "phy_id", &lenp);
1867 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 1895 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
1868 pr_err("Missing slave[%d] phy_id property\n", i); 1896 dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
1869 return -EINVAL; 1897 return -EINVAL;
1870 } 1898 }
1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1899 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
@@ -1885,18 +1913,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1885 1913
1886 slave_data->phy_if = of_get_phy_mode(slave_node); 1914 slave_data->phy_if = of_get_phy_mode(slave_node);
1887 if (slave_data->phy_if < 0) { 1915 if (slave_data->phy_if < 0) {
1888 pr_err("Missing or malformed slave[%d] phy-mode property\n", 1916 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
1889 i); 1917 i);
1890 return slave_data->phy_if; 1918 return slave_data->phy_if;
1891 } 1919 }
1892 1920
1893 if (data->dual_emac) { 1921 if (data->dual_emac) {
1894 if (of_property_read_u32(slave_node, "dual_emac_res_vlan", 1922 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
1895 &prop)) { 1923 &prop)) {
1896 pr_err("Missing dual_emac_res_vlan in DT.\n"); 1924 dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
1897 slave_data->dual_emac_res_vlan = i+1; 1925 slave_data->dual_emac_res_vlan = i+1;
1898 pr_err("Using %d as Reserved VLAN for %d slave\n", 1926 dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
1899 slave_data->dual_emac_res_vlan, i); 1927 slave_data->dual_emac_res_vlan, i);
1900 } else { 1928 } else {
1901 slave_data->dual_emac_res_vlan = prop; 1929 slave_data->dual_emac_res_vlan = prop;
1902 } 1930 }
@@ -1920,7 +1948,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1920 1948
1921 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 1949 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
1922 if (!ndev) { 1950 if (!ndev) {
1923 pr_err("cpsw: error allocating net_device\n"); 1951 dev_err(&pdev->dev, "cpsw: error allocating net_device\n");
1924 return -ENOMEM; 1952 return -ENOMEM;
1925 } 1953 }
1926 1954
@@ -1936,10 +1964,10 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1936 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { 1964 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
1937 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, 1965 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
1938 ETH_ALEN); 1966 ETH_ALEN);
1939 pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); 1967 dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
1940 } else { 1968 } else {
1941 random_ether_addr(priv_sl2->mac_addr); 1969 random_ether_addr(priv_sl2->mac_addr);
1942 pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); 1970 dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
1943 } 1971 }
1944 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); 1972 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
1945 1973
@@ -1970,14 +1998,14 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1970 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1998 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1971 1999
1972 ndev->netdev_ops = &cpsw_netdev_ops; 2000 ndev->netdev_ops = &cpsw_netdev_ops;
1973 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 2001 ndev->ethtool_ops = &cpsw_ethtool_ops;
1974 netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT); 2002 netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
1975 2003
1976 /* register the network device */ 2004 /* register the network device */
1977 SET_NETDEV_DEV(ndev, &pdev->dev); 2005 SET_NETDEV_DEV(ndev, &pdev->dev);
1978 ret = register_netdev(ndev); 2006 ret = register_netdev(ndev);
1979 if (ret) { 2007 if (ret) {
1980 pr_err("cpsw: error registering net device\n"); 2008 dev_err(&pdev->dev, "cpsw: error registering net device\n");
1981 free_netdev(ndev); 2009 free_netdev(ndev);
1982 ret = -ENODEV; 2010 ret = -ENODEV;
1983 } 2011 }
@@ -1999,7 +2027,7 @@ static int cpsw_probe(struct platform_device *pdev)
1999 2027
2000 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 2028 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
2001 if (!ndev) { 2029 if (!ndev) {
2002 pr_err("error allocating net_device\n"); 2030 dev_err(&pdev->dev, "error allocating net_device\n");
2003 return -ENOMEM; 2031 return -ENOMEM;
2004 } 2032 }
2005 2033
@@ -2014,7 +2042,7 @@ static int cpsw_probe(struct platform_device *pdev)
2014 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); 2042 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
2015 priv->irq_enabled = true; 2043 priv->irq_enabled = true;
2016 if (!priv->cpts) { 2044 if (!priv->cpts) {
2017 pr_err("error allocating cpts\n"); 2045 dev_err(&pdev->dev, "error allocating cpts\n");
2018 goto clean_ndev_ret; 2046 goto clean_ndev_ret;
2019 } 2047 }
2020 2048
@@ -2027,7 +2055,7 @@ static int cpsw_probe(struct platform_device *pdev)
2027 pinctrl_pm_select_default_state(&pdev->dev); 2055 pinctrl_pm_select_default_state(&pdev->dev);
2028 2056
2029 if (cpsw_probe_dt(&priv->data, pdev)) { 2057 if (cpsw_probe_dt(&priv->data, pdev)) {
2030 pr_err("cpsw: platform data missing\n"); 2058 dev_err(&pdev->dev, "cpsw: platform data missing\n");
2031 ret = -ENODEV; 2059 ret = -ENODEV;
2032 goto clean_runtime_disable_ret; 2060 goto clean_runtime_disable_ret;
2033 } 2061 }
@@ -2035,10 +2063,10 @@ static int cpsw_probe(struct platform_device *pdev)
2035 2063
2036 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 2064 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
2037 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); 2065 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
2038 pr_info("Detected MACID = %pM\n", priv->mac_addr); 2066 dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
2039 } else { 2067 } else {
2040 eth_random_addr(priv->mac_addr); 2068 eth_random_addr(priv->mac_addr);
2041 pr_info("Random MACID = %pM\n", priv->mac_addr); 2069 dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
2042 } 2070 }
2043 2071
2044 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 2072 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
@@ -2199,7 +2227,7 @@ static int cpsw_probe(struct platform_device *pdev)
2199 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2227 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2200 2228
2201 ndev->netdev_ops = &cpsw_netdev_ops; 2229 ndev->netdev_ops = &cpsw_netdev_ops;
2202 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 2230 ndev->ethtool_ops = &cpsw_ethtool_ops;
2203 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT); 2231 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
2204 2232
2205 /* register the network device */ 2233 /* register the network device */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 243513980b51..6b56f85951e5 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -236,13 +236,11 @@ static void cpts_overflow_check(struct work_struct *work)
236 schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD); 236 schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
237} 237}
238 238
239#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk" 239static void cpts_clk_init(struct device *dev, struct cpts *cpts)
240
241static void cpts_clk_init(struct cpts *cpts)
242{ 240{
243 cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME); 241 cpts->refclk = devm_clk_get(dev, "cpts");
244 if (IS_ERR(cpts->refclk)) { 242 if (IS_ERR(cpts->refclk)) {
245 pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME); 243 dev_err(dev, "Failed to get cpts refclk\n");
246 cpts->refclk = NULL; 244 cpts->refclk = NULL;
247 return; 245 return;
248 } 246 }
@@ -252,7 +250,6 @@ static void cpts_clk_init(struct cpts *cpts)
252static void cpts_clk_release(struct cpts *cpts) 250static void cpts_clk_release(struct cpts *cpts)
253{ 251{
254 clk_disable(cpts->refclk); 252 clk_disable(cpts->refclk);
255 clk_put(cpts->refclk);
256} 253}
257 254
258static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, 255static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
@@ -390,7 +387,7 @@ int cpts_register(struct device *dev, struct cpts *cpts,
390 for (i = 0; i < CPTS_MAX_EVENTS; i++) 387 for (i = 0; i < CPTS_MAX_EVENTS; i++)
391 list_add(&cpts->pool_data[i].list, &cpts->pool); 388 list_add(&cpts->pool_data[i].list, &cpts->pool);
392 389
393 cpts_clk_init(cpts); 390 cpts_clk_init(dev, cpts);
394 cpts_write32(cpts, CPTS_EN, control); 391 cpts_write32(cpts, CPTS_EN, control);
395 cpts_write32(cpts, TS_PEND_EN, int_enable); 392 cpts_write32(cpts, TS_PEND_EN, int_enable);
396 393
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 88ef27067bf2..4a000f6dd6fc 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -158,9 +158,9 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
158 int bitmap_size; 158 int bitmap_size;
159 struct cpdma_desc_pool *pool; 159 struct cpdma_desc_pool *pool;
160 160
161 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 161 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
162 if (!pool) 162 if (!pool)
163 return NULL; 163 goto fail;
164 164
165 spin_lock_init(&pool->lock); 165 spin_lock_init(&pool->lock);
166 166
@@ -170,7 +170,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
170 pool->num_desc = size / pool->desc_size; 170 pool->num_desc = size / pool->desc_size;
171 171
172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); 172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
173 pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 173 pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
174 if (!pool->bitmap) 174 if (!pool->bitmap)
175 goto fail; 175 goto fail;
176 176
@@ -187,10 +187,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
187 187
188 if (pool->iomap) 188 if (pool->iomap)
189 return pool; 189 return pool;
190
191fail: 190fail:
192 kfree(pool->bitmap);
193 kfree(pool);
194 return NULL; 191 return NULL;
195} 192}
196 193
@@ -203,7 +200,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
203 200
204 spin_lock_irqsave(&pool->lock, flags); 201 spin_lock_irqsave(&pool->lock, flags);
205 WARN_ON(pool->used_desc); 202 WARN_ON(pool->used_desc);
206 kfree(pool->bitmap);
207 if (pool->cpumap) { 203 if (pool->cpumap) {
208 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, 204 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
209 pool->phys); 205 pool->phys);
@@ -211,7 +207,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
211 iounmap(pool->iomap); 207 iounmap(pool->iomap);
212 } 208 }
213 spin_unlock_irqrestore(&pool->lock, flags); 209 spin_unlock_irqrestore(&pool->lock, flags);
214 kfree(pool);
215} 210}
216 211
217static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, 212static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -276,7 +271,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
276{ 271{
277 struct cpdma_ctlr *ctlr; 272 struct cpdma_ctlr *ctlr;
278 273
279 ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL); 274 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
280 if (!ctlr) 275 if (!ctlr)
281 return NULL; 276 return NULL;
282 277
@@ -290,10 +285,8 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
290 ctlr->params.desc_hw_addr, 285 ctlr->params.desc_hw_addr,
291 ctlr->params.desc_mem_size, 286 ctlr->params.desc_mem_size,
292 ctlr->params.desc_align); 287 ctlr->params.desc_align);
293 if (!ctlr->pool) { 288 if (!ctlr->pool)
294 kfree(ctlr);
295 return NULL; 289 return NULL;
296 }
297 290
298 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) 291 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
299 ctlr->num_chan = CPDMA_MAX_CHANNELS; 292 ctlr->num_chan = CPDMA_MAX_CHANNELS;
@@ -468,7 +461,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
468 461
469 cpdma_desc_pool_destroy(ctlr->pool); 462 cpdma_desc_pool_destroy(ctlr->pool);
470 spin_unlock_irqrestore(&ctlr->lock, flags); 463 spin_unlock_irqrestore(&ctlr->lock, flags);
471 kfree(ctlr);
472 return ret; 464 return ret;
473} 465}
474EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); 466EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
@@ -507,21 +499,22 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
507 cpdma_handler_fn handler) 499 cpdma_handler_fn handler)
508{ 500{
509 struct cpdma_chan *chan; 501 struct cpdma_chan *chan;
510 int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; 502 int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
511 unsigned long flags; 503 unsigned long flags;
512 504
513 if (__chan_linear(chan_num) >= ctlr->num_chan) 505 if (__chan_linear(chan_num) >= ctlr->num_chan)
514 return NULL; 506 return NULL;
515 507
516 ret = -ENOMEM; 508 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
517 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
518 if (!chan) 509 if (!chan)
519 goto err_chan_alloc; 510 return ERR_PTR(-ENOMEM);
520 511
521 spin_lock_irqsave(&ctlr->lock, flags); 512 spin_lock_irqsave(&ctlr->lock, flags);
522 ret = -EBUSY; 513 if (ctlr->channels[chan_num]) {
523 if (ctlr->channels[chan_num]) 514 spin_unlock_irqrestore(&ctlr->lock, flags);
524 goto err_chan_busy; 515 devm_kfree(ctlr->dev, chan);
516 return ERR_PTR(-EBUSY);
517 }
525 518
526 chan->ctlr = ctlr; 519 chan->ctlr = ctlr;
527 chan->state = CPDMA_STATE_IDLE; 520 chan->state = CPDMA_STATE_IDLE;
@@ -551,12 +544,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
551 ctlr->channels[chan_num] = chan; 544 ctlr->channels[chan_num] = chan;
552 spin_unlock_irqrestore(&ctlr->lock, flags); 545 spin_unlock_irqrestore(&ctlr->lock, flags);
553 return chan; 546 return chan;
554
555err_chan_busy:
556 spin_unlock_irqrestore(&ctlr->lock, flags);
557 kfree(chan);
558err_chan_alloc:
559 return ERR_PTR(ret);
560} 547}
561EXPORT_SYMBOL_GPL(cpdma_chan_create); 548EXPORT_SYMBOL_GPL(cpdma_chan_create);
562 549
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 8f0e69ce07ca..35a139e9a833 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1567,7 +1567,6 @@ static int emac_dev_open(struct net_device *ndev)
1567 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, 1567 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
1568 res_num))) { 1568 res_num))) {
1569 for (irq_num = res->start; irq_num <= res->end; irq_num++) { 1569 for (irq_num = res->start; irq_num <= res->end; irq_num++) {
1570 dev_err(emac_dev, "Request IRQ %d\n", irq_num);
1571 if (request_irq(irq_num, emac_irq, 0, ndev->name, 1570 if (request_irq(irq_num, emac_irq, 0, ndev->name,
1572 ndev)) { 1571 ndev)) {
1573 dev_err(emac_dev, 1572 dev_err(emac_dev,
@@ -1865,7 +1864,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
1865 struct emac_priv *priv; 1864 struct emac_priv *priv;
1866 unsigned long hw_ram_addr; 1865 unsigned long hw_ram_addr;
1867 struct emac_platform_data *pdata; 1866 struct emac_platform_data *pdata;
1868 struct device *emac_dev;
1869 struct cpdma_params dma_params; 1867 struct cpdma_params dma_params;
1870 struct clk *emac_clk; 1868 struct clk *emac_clk;
1871 unsigned long emac_bus_frequency; 1869 unsigned long emac_bus_frequency;
@@ -1911,7 +1909,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
1911 priv->coal_intvl = 0; 1909 priv->coal_intvl = 0;
1912 priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000); 1910 priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
1913 1911
1914 emac_dev = &ndev->dev;
1915 /* Get EMAC platform data */ 1912 /* Get EMAC platform data */
1916 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1913 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1917 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; 1914 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
@@ -1930,7 +1927,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1930 hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset; 1927 hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
1931 1928
1932 memset(&dma_params, 0, sizeof(dma_params)); 1929 memset(&dma_params, 0, sizeof(dma_params));
1933 dma_params.dev = emac_dev; 1930 dma_params.dev = &pdev->dev;
1934 dma_params.dmaregs = priv->emac_base; 1931 dma_params.dmaregs = priv->emac_base;
1935 dma_params.rxthresh = priv->emac_base + 0x120; 1932 dma_params.rxthresh = priv->emac_base + 0x120;
1936 dma_params.rxfree = priv->emac_base + 0x140; 1933 dma_params.rxfree = priv->emac_base + 0x140;
@@ -1980,7 +1977,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1980 } 1977 }
1981 1978
1982 ndev->netdev_ops = &emac_netdev_ops; 1979 ndev->netdev_ops = &emac_netdev_ops;
1983 SET_ETHTOOL_OPS(ndev, &ethtool_ops); 1980 ndev->ethtool_ops = &ethtool_ops;
1984 netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); 1981 netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
1985 1982
1986 /* register the network device */ 1983 /* register the network device */
@@ -1994,7 +1991,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1994 1991
1995 1992
1996 if (netif_msg_probe(priv)) { 1993 if (netif_msg_probe(priv)) {
1997 dev_notice(emac_dev, "DaVinci EMAC Probe found device "\ 1994 dev_notice(&pdev->dev, "DaVinci EMAC Probe found device "
1998 "(regs: %p, irq: %d)\n", 1995 "(regs: %p, irq: %d)\n",
1999 (void *)priv->emac_base_phys, ndev->irq); 1996 (void *)priv->emac_base_phys, ndev->irq);
2000 } 1997 }
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 0cca9dec5d82..735dc53d4b01 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -303,7 +303,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
303 return -EINVAL; 303 return -EINVAL;
304 304
305 if (of_property_read_u32(node, "bus_freq", &prop)) { 305 if (of_property_read_u32(node, "bus_freq", &prop)) {
306 pr_err("Missing bus_freq property in the DT.\n"); 306 dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
307 return -EINVAL; 307 return -EINVAL;
308 } 308 }
309 data->bus_freq = prop; 309 data->bus_freq = prop;
@@ -321,15 +321,14 @@ static int davinci_mdio_probe(struct platform_device *pdev)
321 struct phy_device *phy; 321 struct phy_device *phy;
322 int ret, addr; 322 int ret, addr;
323 323
324 data = kzalloc(sizeof(*data), GFP_KERNEL); 324 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
325 if (!data) 325 if (!data)
326 return -ENOMEM; 326 return -ENOMEM;
327 327
328 data->bus = mdiobus_alloc(); 328 data->bus = devm_mdiobus_alloc(dev);
329 if (!data->bus) { 329 if (!data->bus) {
330 dev_err(dev, "failed to alloc mii bus\n"); 330 dev_err(dev, "failed to alloc mii bus\n");
331 ret = -ENOMEM; 331 return -ENOMEM;
332 goto bail_out;
333 } 332 }
334 333
335 if (dev->of_node) { 334 if (dev->of_node) {
@@ -349,12 +348,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
349 data->bus->parent = dev; 348 data->bus->parent = dev;
350 data->bus->priv = data; 349 data->bus->priv = data;
351 350
352 /* Select default pin state */
353 pinctrl_pm_select_default_state(&pdev->dev);
354
355 pm_runtime_enable(&pdev->dev); 351 pm_runtime_enable(&pdev->dev);
356 pm_runtime_get_sync(&pdev->dev); 352 pm_runtime_get_sync(&pdev->dev);
357 data->clk = clk_get(&pdev->dev, "fck"); 353 data->clk = devm_clk_get(dev, "fck");
358 if (IS_ERR(data->clk)) { 354 if (IS_ERR(data->clk)) {
359 dev_err(dev, "failed to get device clock\n"); 355 dev_err(dev, "failed to get device clock\n");
360 ret = PTR_ERR(data->clk); 356 ret = PTR_ERR(data->clk);
@@ -367,24 +363,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
367 spin_lock_init(&data->lock); 363 spin_lock_init(&data->lock);
368 364
369 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 365 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
370 if (!res) { 366 data->regs = devm_ioremap_resource(dev, res);
371 dev_err(dev, "could not find register map resource\n"); 367 if (IS_ERR(data->regs)) {
372 ret = -ENOENT; 368 ret = PTR_ERR(data->regs);
373 goto bail_out;
374 }
375
376 res = devm_request_mem_region(dev, res->start, resource_size(res),
377 dev_name(dev));
378 if (!res) {
379 dev_err(dev, "could not allocate register map resource\n");
380 ret = -ENXIO;
381 goto bail_out;
382 }
383
384 data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
385 if (!data->regs) {
386 dev_err(dev, "could not map mdio registers\n");
387 ret = -ENOMEM;
388 goto bail_out; 369 goto bail_out;
389 } 370 }
390 371
@@ -406,16 +387,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
406 return 0; 387 return 0;
407 388
408bail_out: 389bail_out:
409 if (data->bus)
410 mdiobus_free(data->bus);
411
412 if (data->clk)
413 clk_put(data->clk);
414 pm_runtime_put_sync(&pdev->dev); 390 pm_runtime_put_sync(&pdev->dev);
415 pm_runtime_disable(&pdev->dev); 391 pm_runtime_disable(&pdev->dev);
416 392
417 kfree(data);
418
419 return ret; 393 return ret;
420} 394}
421 395
@@ -423,18 +397,12 @@ static int davinci_mdio_remove(struct platform_device *pdev)
423{ 397{
424 struct davinci_mdio_data *data = platform_get_drvdata(pdev); 398 struct davinci_mdio_data *data = platform_get_drvdata(pdev);
425 399
426 if (data->bus) { 400 if (data->bus)
427 mdiobus_unregister(data->bus); 401 mdiobus_unregister(data->bus);
428 mdiobus_free(data->bus);
429 }
430 402
431 if (data->clk)
432 clk_put(data->clk);
433 pm_runtime_put_sync(&pdev->dev); 403 pm_runtime_put_sync(&pdev->dev);
434 pm_runtime_disable(&pdev->dev); 404 pm_runtime_disable(&pdev->dev);
435 405
436 kfree(data);
437
438 return 0; 406 return 0;
439} 407}
440 408
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 449011b0e007..14389f841d43 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2192,7 +2192,6 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
2192{ 2192{
2193 int ret; 2193 int ret;
2194 int i; 2194 int i;
2195 int nz_addr = 0;
2196 struct net_device *dev; 2195 struct net_device *dev;
2197 struct tile_net_priv *priv; 2196 struct tile_net_priv *priv;
2198 2197
@@ -2212,7 +2211,6 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
2212 2211
2213 /* Initialize "priv". */ 2212 /* Initialize "priv". */
2214 priv = netdev_priv(dev); 2213 priv = netdev_priv(dev);
2215 memset(priv, 0, sizeof(*priv));
2216 priv->dev = dev; 2214 priv->dev = dev;
2217 priv->channel = -1; 2215 priv->channel = -1;
2218 priv->loopify_channel = -1; 2216 priv->loopify_channel = -1;
@@ -2223,15 +2221,10 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
2223 * be done before the device is opened. If the MAC is all zeroes, 2221 * be done before the device is opened. If the MAC is all zeroes,
2224 * we use a random address, since we're probably on the simulator. 2222 * we use a random address, since we're probably on the simulator.
2225 */ 2223 */
2226 for (i = 0; i < 6; i++) 2224 if (!is_zero_ether_addr(mac))
2227 nz_addr |= mac[i]; 2225 ether_addr_copy(dev->dev_addr, mac);
2228 2226 else
2229 if (nz_addr) {
2230 memcpy(dev->dev_addr, mac, ETH_ALEN);
2231 dev->addr_len = 6;
2232 } else {
2233 eth_hw_addr_random(dev); 2227 eth_hw_addr_random(dev);
2234 }
2235 2228
2236 /* Register the network device. */ 2229 /* Register the network device. */
2237 ret = register_netdev(dev); 2230 ret = register_netdev(dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index d899d0072ae0..bb7992804664 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1561,7 +1561,7 @@ static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
1561 * alloc netdev 1561 * alloc netdev
1562 */ 1562 */
1563 *netdev = alloc_etherdev(sizeof(struct gelic_port)); 1563 *netdev = alloc_etherdev(sizeof(struct gelic_port));
1564 if (!netdev) { 1564 if (!*netdev) {
1565 kfree(card->unalign); 1565 kfree(card->unalign);
1566 return NULL; 1566 return NULL;
1567 } 1567 }
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 8a049a2b4474..f66ddaee0c87 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_VIA
19 19
20config VIA_RHINE 20config VIA_RHINE
21 tristate "VIA Rhine support" 21 tristate "VIA Rhine support"
22 depends on PCI 22 depends on (PCI || USE_OF)
23 select CRC32 23 select CRC32
24 select MII 24 select MII
25 ---help--- 25 ---help---
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index f61dc2b72bb2..2d72f96a9e2c 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -94,6 +94,10 @@ static const int multicast_filter_limit = 32;
94#include <linux/ioport.h> 94#include <linux/ioport.h>
95#include <linux/interrupt.h> 95#include <linux/interrupt.h>
96#include <linux/pci.h> 96#include <linux/pci.h>
97#include <linux/of_address.h>
98#include <linux/of_device.h>
99#include <linux/of_irq.h>
100#include <linux/platform_device.h>
97#include <linux/dma-mapping.h> 101#include <linux/dma-mapping.h>
98#include <linux/netdevice.h> 102#include <linux/netdevice.h>
99#include <linux/etherdevice.h> 103#include <linux/etherdevice.h>
@@ -116,13 +120,6 @@ static const int multicast_filter_limit = 32;
116static const char version[] = 120static const char version[] =
117 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker"; 121 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
118 122
119/* This driver was written to use PCI memory space. Some early versions
120 of the Rhine may only work correctly with I/O space accesses. */
121#ifdef CONFIG_VIA_RHINE_MMIO
122#define USE_MMIO
123#else
124#endif
125
126MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 123MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
127MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); 124MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128MODULE_LICENSE("GPL"); 125MODULE_LICENSE("GPL");
@@ -260,6 +257,12 @@ enum rhine_quirks {
260 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */ 257 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
261 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */ 258 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
262 rqRhineI = 0x0100, /* See comment below */ 259 rqRhineI = 0x0100, /* See comment below */
260 rqIntPHY = 0x0200, /* Integrated PHY */
261 rqMgmt = 0x0400, /* Management adapter */
262 rqNeedEnMMIO = 0x0800, /* Whether the core needs to be
263 * switched from PIO mode to MMIO
264 * (only applies to PCI)
265 */
263}; 266};
264/* 267/*
265 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable 268 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
@@ -279,6 +282,15 @@ static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
279}; 282};
280MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); 283MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
281 284
285/* OpenFirmware identifiers for platform-bus devices
286 * The .data field is currently only used to store quirks
287 */
288static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
289static struct of_device_id rhine_of_tbl[] = {
290 { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
291 { } /* terminate list */
292};
293MODULE_DEVICE_TABLE(of, rhine_of_tbl);
282 294
283/* Offsets to the device registers. */ 295/* Offsets to the device registers. */
284enum register_offsets { 296enum register_offsets {
@@ -338,13 +350,11 @@ enum bcr1_bits {
338 BCR1_MED1=0x80, /* for VT6102 */ 350 BCR1_MED1=0x80, /* for VT6102 */
339}; 351};
340 352
341#ifdef USE_MMIO
342/* Registers we check that mmio and reg are the same. */ 353/* Registers we check that mmio and reg are the same. */
343static const int mmio_verify_registers[] = { 354static const int mmio_verify_registers[] = {
344 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD, 355 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
345 0 356 0
346}; 357};
347#endif
348 358
349/* Bits in the interrupt status/mask registers. */ 359/* Bits in the interrupt status/mask registers. */
350enum intr_status_bits { 360enum intr_status_bits {
@@ -446,7 +456,7 @@ struct rhine_private {
446 unsigned char *tx_bufs; 456 unsigned char *tx_bufs;
447 dma_addr_t tx_bufs_dma; 457 dma_addr_t tx_bufs_dma;
448 458
449 struct pci_dev *pdev; 459 int irq;
450 long pioaddr; 460 long pioaddr;
451 struct net_device *dev; 461 struct net_device *dev;
452 struct napi_struct napi; 462 struct napi_struct napi;
@@ -649,20 +659,46 @@ static void rhine_chip_reset(struct net_device *dev)
649 "failed" : "succeeded"); 659 "failed" : "succeeded");
650} 660}
651 661
652#ifdef USE_MMIO
653static void enable_mmio(long pioaddr, u32 quirks) 662static void enable_mmio(long pioaddr, u32 quirks)
654{ 663{
655 int n; 664 int n;
656 if (quirks & rqRhineI) { 665
657 /* More recent docs say that this bit is reserved ... */ 666 if (quirks & rqNeedEnMMIO) {
658 n = inb(pioaddr + ConfigA) | 0x20; 667 if (quirks & rqRhineI) {
659 outb(n, pioaddr + ConfigA); 668 /* More recent docs say that this bit is reserved */
660 } else { 669 n = inb(pioaddr + ConfigA) | 0x20;
661 n = inb(pioaddr + ConfigD) | 0x80; 670 outb(n, pioaddr + ConfigA);
662 outb(n, pioaddr + ConfigD); 671 } else {
672 n = inb(pioaddr + ConfigD) | 0x80;
673 outb(n, pioaddr + ConfigD);
674 }
663 } 675 }
664} 676}
665#endif 677
678static inline int verify_mmio(struct device *hwdev,
679 long pioaddr,
680 void __iomem *ioaddr,
681 u32 quirks)
682{
683 if (quirks & rqNeedEnMMIO) {
684 int i = 0;
685
686 /* Check that selected MMIO registers match the PIO ones */
687 while (mmio_verify_registers[i]) {
688 int reg = mmio_verify_registers[i++];
689 unsigned char a = inb(pioaddr+reg);
690 unsigned char b = readb(ioaddr+reg);
691
692 if (a != b) {
693 dev_err(hwdev,
694 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
695 reg, a, b);
696 return -EIO;
697 }
698 }
699 }
700 return 0;
701}
666 702
667/* 703/*
668 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM 704 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
@@ -682,14 +718,12 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
682 if (i > 512) 718 if (i > 512)
683 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__); 719 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
684 720
685#ifdef USE_MMIO
686 /* 721 /*
687 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable 722 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
688 * MMIO. If reloading EEPROM was done first this could be avoided, but 723 * MMIO. If reloading EEPROM was done first this could be avoided, but
689 * it is not known if that still works with the "win98-reboot" problem. 724 * it is not known if that still works with the "win98-reboot" problem.
690 */ 725 */
691 enable_mmio(pioaddr, rp->quirks); 726 enable_mmio(pioaddr, rp->quirks);
692#endif
693 727
694 /* Turn off EEPROM-controlled wake-up (magic packet) */ 728 /* Turn off EEPROM-controlled wake-up (magic packet) */
695 if (rp->quirks & rqWOL) 729 if (rp->quirks & rqWOL)
@@ -701,7 +735,7 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
701static void rhine_poll(struct net_device *dev) 735static void rhine_poll(struct net_device *dev)
702{ 736{
703 struct rhine_private *rp = netdev_priv(dev); 737 struct rhine_private *rp = netdev_priv(dev);
704 const int irq = rp->pdev->irq; 738 const int irq = rp->irq;
705 739
706 disable_irq(irq); 740 disable_irq(irq);
707 rhine_interrupt(irq, dev); 741 rhine_interrupt(irq, dev);
@@ -846,7 +880,8 @@ static void rhine_hw_init(struct net_device *dev, long pioaddr)
846 msleep(5); 880 msleep(5);
847 881
848 /* Reload EEPROM controlled bytes cleared by soft reset */ 882 /* Reload EEPROM controlled bytes cleared by soft reset */
849 rhine_reload_eeprom(pioaddr, dev); 883 if (dev_is_pci(dev->dev.parent))
884 rhine_reload_eeprom(pioaddr, dev);
850} 885}
851 886
852static const struct net_device_ops rhine_netdev_ops = { 887static const struct net_device_ops rhine_netdev_ops = {
@@ -867,125 +902,37 @@ static const struct net_device_ops rhine_netdev_ops = {
867#endif 902#endif
868}; 903};
869 904
870static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 905static int rhine_init_one_common(struct device *hwdev, u32 quirks,
906 long pioaddr, void __iomem *ioaddr, int irq)
871{ 907{
872 struct net_device *dev; 908 struct net_device *dev;
873 struct rhine_private *rp; 909 struct rhine_private *rp;
874 int i, rc; 910 int i, rc, phy_id;
875 u32 quirks;
876 long pioaddr;
877 long memaddr;
878 void __iomem *ioaddr;
879 int io_size, phy_id;
880 const char *name; 911 const char *name;
881#ifdef USE_MMIO
882 int bar = 1;
883#else
884 int bar = 0;
885#endif
886
887/* when built into the kernel, we only print version if device is found */
888#ifndef MODULE
889 pr_info_once("%s\n", version);
890#endif
891
892 io_size = 256;
893 phy_id = 0;
894 quirks = 0;
895 name = "Rhine";
896 if (pdev->revision < VTunknown0) {
897 quirks = rqRhineI;
898 io_size = 128;
899 }
900 else if (pdev->revision >= VT6102) {
901 quirks = rqWOL | rqForceReset;
902 if (pdev->revision < VT6105) {
903 name = "Rhine II";
904 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
905 }
906 else {
907 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
908 if (pdev->revision >= VT6105_B0)
909 quirks |= rq6patterns;
910 if (pdev->revision < VT6105M)
911 name = "Rhine III";
912 else
913 name = "Rhine III (Management Adapter)";
914 }
915 }
916
917 rc = pci_enable_device(pdev);
918 if (rc)
919 goto err_out;
920 912
921 /* this should always be supported */ 913 /* this should always be supported */
922 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 914 rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
923 if (rc) { 915 if (rc) {
924 dev_err(&pdev->dev, 916 dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
925 "32-bit PCI DMA addresses not supported by the card!?\n"); 917 goto err_out;
926 goto err_out_pci_disable;
927 }
928
929 /* sanity check */
930 if ((pci_resource_len(pdev, 0) < io_size) ||
931 (pci_resource_len(pdev, 1) < io_size)) {
932 rc = -EIO;
933 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
934 goto err_out_pci_disable;
935 } 918 }
936 919
937 pioaddr = pci_resource_start(pdev, 0);
938 memaddr = pci_resource_start(pdev, 1);
939
940 pci_set_master(pdev);
941
942 dev = alloc_etherdev(sizeof(struct rhine_private)); 920 dev = alloc_etherdev(sizeof(struct rhine_private));
943 if (!dev) { 921 if (!dev) {
944 rc = -ENOMEM; 922 rc = -ENOMEM;
945 goto err_out_pci_disable; 923 goto err_out;
946 } 924 }
947 SET_NETDEV_DEV(dev, &pdev->dev); 925 SET_NETDEV_DEV(dev, hwdev);
948 926
949 rp = netdev_priv(dev); 927 rp = netdev_priv(dev);
950 rp->dev = dev; 928 rp->dev = dev;
951 rp->quirks = quirks; 929 rp->quirks = quirks;
952 rp->pioaddr = pioaddr; 930 rp->pioaddr = pioaddr;
953 rp->pdev = pdev; 931 rp->base = ioaddr;
932 rp->irq = irq;
954 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); 933 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
955 934
956 rc = pci_request_regions(pdev, DRV_NAME); 935 phy_id = rp->quirks & rqIntPHY ? 1 : 0;
957 if (rc)
958 goto err_out_free_netdev;
959
960 ioaddr = pci_iomap(pdev, bar, io_size);
961 if (!ioaddr) {
962 rc = -EIO;
963 dev_err(&pdev->dev,
964 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
965 pci_name(pdev), io_size, memaddr);
966 goto err_out_free_res;
967 }
968
969#ifdef USE_MMIO
970 enable_mmio(pioaddr, quirks);
971
972 /* Check that selected MMIO registers match the PIO ones */
973 i = 0;
974 while (mmio_verify_registers[i]) {
975 int reg = mmio_verify_registers[i++];
976 unsigned char a = inb(pioaddr+reg);
977 unsigned char b = readb(ioaddr+reg);
978 if (a != b) {
979 rc = -EIO;
980 dev_err(&pdev->dev,
981 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
982 reg, a, b);
983 goto err_out_unmap;
984 }
985 }
986#endif /* USE_MMIO */
987
988 rp->base = ioaddr;
989 936
990 u64_stats_init(&rp->tx_stats.syncp); 937 u64_stats_init(&rp->tx_stats.syncp);
991 u64_stats_init(&rp->rx_stats.syncp); 938 u64_stats_init(&rp->rx_stats.syncp);
@@ -1030,7 +977,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1030 if (rp->quirks & rqRhineI) 977 if (rp->quirks & rqRhineI)
1031 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 978 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1032 979
1033 if (pdev->revision >= VT6105M) 980 if (rp->quirks & rqMgmt)
1034 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | 981 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
1035 NETIF_F_HW_VLAN_CTAG_RX | 982 NETIF_F_HW_VLAN_CTAG_RX |
1036 NETIF_F_HW_VLAN_CTAG_FILTER; 983 NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -1038,18 +985,21 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1038 /* dev->name not defined before register_netdev()! */ 985 /* dev->name not defined before register_netdev()! */
1039 rc = register_netdev(dev); 986 rc = register_netdev(dev);
1040 if (rc) 987 if (rc)
1041 goto err_out_unmap; 988 goto err_out_free_netdev;
989
990 if (rp->quirks & rqRhineI)
991 name = "Rhine";
992 else if (rp->quirks & rqStatusWBRace)
993 name = "Rhine II";
994 else if (rp->quirks & rqMgmt)
995 name = "Rhine III (Management Adapter)";
996 else
997 name = "Rhine III";
1042 998
1043 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n", 999 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1044 name, 1000 name, (long)ioaddr, dev->dev_addr, rp->irq);
1045#ifdef USE_MMIO
1046 memaddr,
1047#else
1048 (long)ioaddr,
1049#endif
1050 dev->dev_addr, pdev->irq);
1051 1001
1052 pci_set_drvdata(pdev, dev); 1002 dev_set_drvdata(hwdev, dev);
1053 1003
1054 { 1004 {
1055 u16 mii_cmd; 1005 u16 mii_cmd;
@@ -1078,41 +1028,158 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1078 1028
1079 return 0; 1029 return 0;
1080 1030
1031err_out_free_netdev:
1032 free_netdev(dev);
1033err_out:
1034 return rc;
1035}
1036
1037static int rhine_init_one_pci(struct pci_dev *pdev,
1038 const struct pci_device_id *ent)
1039{
1040 struct device *hwdev = &pdev->dev;
1041 int rc;
1042 long pioaddr, memaddr;
1043 void __iomem *ioaddr;
1044 int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1045
1046/* This driver was written to use PCI memory space. Some early versions
1047 * of the Rhine may only work correctly with I/O space accesses.
1048 * TODO: determine for which revisions this is true and assign the flag
1049 * in code as opposed to this Kconfig option (???)
1050 */
1051#ifdef CONFIG_VIA_RHINE_MMIO
1052 u32 quirks = rqNeedEnMMIO;
1053#else
1054 u32 quirks = 0;
1055#endif
1056
1057/* when built into the kernel, we only print version if device is found */
1058#ifndef MODULE
1059 pr_info_once("%s\n", version);
1060#endif
1061
1062 rc = pci_enable_device(pdev);
1063 if (rc)
1064 goto err_out;
1065
1066 if (pdev->revision < VTunknown0) {
1067 quirks |= rqRhineI;
1068 } else if (pdev->revision >= VT6102) {
1069 quirks |= rqWOL | rqForceReset;
1070 if (pdev->revision < VT6105) {
1071 quirks |= rqStatusWBRace;
1072 } else {
1073 quirks |= rqIntPHY;
1074 if (pdev->revision >= VT6105_B0)
1075 quirks |= rq6patterns;
1076 if (pdev->revision >= VT6105M)
1077 quirks |= rqMgmt;
1078 }
1079 }
1080
1081 /* sanity check */
1082 if ((pci_resource_len(pdev, 0) < io_size) ||
1083 (pci_resource_len(pdev, 1) < io_size)) {
1084 rc = -EIO;
1085 dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1086 goto err_out_pci_disable;
1087 }
1088
1089 pioaddr = pci_resource_start(pdev, 0);
1090 memaddr = pci_resource_start(pdev, 1);
1091
1092 pci_set_master(pdev);
1093
1094 rc = pci_request_regions(pdev, DRV_NAME);
1095 if (rc)
1096 goto err_out_pci_disable;
1097
1098 ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
1099 if (!ioaddr) {
1100 rc = -EIO;
1101 dev_err(hwdev,
1102 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1103 dev_name(hwdev), io_size, memaddr);
1104 goto err_out_free_res;
1105 }
1106
1107 enable_mmio(pioaddr, quirks);
1108
1109 rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1110 if (rc)
1111 goto err_out_unmap;
1112
1113 rc = rhine_init_one_common(&pdev->dev, quirks,
1114 pioaddr, ioaddr, pdev->irq);
1115 if (!rc)
1116 return 0;
1117
1081err_out_unmap: 1118err_out_unmap:
1082 pci_iounmap(pdev, ioaddr); 1119 pci_iounmap(pdev, ioaddr);
1083err_out_free_res: 1120err_out_free_res:
1084 pci_release_regions(pdev); 1121 pci_release_regions(pdev);
1085err_out_free_netdev:
1086 free_netdev(dev);
1087err_out_pci_disable: 1122err_out_pci_disable:
1088 pci_disable_device(pdev); 1123 pci_disable_device(pdev);
1089err_out: 1124err_out:
1090 return rc; 1125 return rc;
1091} 1126}
1092 1127
1128static int rhine_init_one_platform(struct platform_device *pdev)
1129{
1130 const struct of_device_id *match;
1131 const u32 *quirks;
1132 int irq;
1133 struct resource *res;
1134 void __iomem *ioaddr;
1135
1136 match = of_match_device(rhine_of_tbl, &pdev->dev);
1137 if (!match)
1138 return -EINVAL;
1139
1140 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1141 ioaddr = devm_ioremap_resource(&pdev->dev, res);
1142 if (IS_ERR(ioaddr))
1143 return PTR_ERR(ioaddr);
1144
1145 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1146 if (!irq)
1147 return -EINVAL;
1148
1149 quirks = match->data;
1150 if (!quirks)
1151 return -EINVAL;
1152
1153 return rhine_init_one_common(&pdev->dev, *quirks,
1154 (long)ioaddr, ioaddr, irq);
1155}
1156
1093static int alloc_ring(struct net_device* dev) 1157static int alloc_ring(struct net_device* dev)
1094{ 1158{
1095 struct rhine_private *rp = netdev_priv(dev); 1159 struct rhine_private *rp = netdev_priv(dev);
1160 struct device *hwdev = dev->dev.parent;
1096 void *ring; 1161 void *ring;
1097 dma_addr_t ring_dma; 1162 dma_addr_t ring_dma;
1098 1163
1099 ring = pci_alloc_consistent(rp->pdev, 1164 ring = dma_alloc_coherent(hwdev,
1100 RX_RING_SIZE * sizeof(struct rx_desc) + 1165 RX_RING_SIZE * sizeof(struct rx_desc) +
1101 TX_RING_SIZE * sizeof(struct tx_desc), 1166 TX_RING_SIZE * sizeof(struct tx_desc),
1102 &ring_dma); 1167 &ring_dma,
1168 GFP_ATOMIC);
1103 if (!ring) { 1169 if (!ring) {
1104 netdev_err(dev, "Could not allocate DMA memory\n"); 1170 netdev_err(dev, "Could not allocate DMA memory\n");
1105 return -ENOMEM; 1171 return -ENOMEM;
1106 } 1172 }
1107 if (rp->quirks & rqRhineI) { 1173 if (rp->quirks & rqRhineI) {
1108 rp->tx_bufs = pci_alloc_consistent(rp->pdev, 1174 rp->tx_bufs = dma_alloc_coherent(hwdev,
1109 PKT_BUF_SZ * TX_RING_SIZE, 1175 PKT_BUF_SZ * TX_RING_SIZE,
1110 &rp->tx_bufs_dma); 1176 &rp->tx_bufs_dma,
1177 GFP_ATOMIC);
1111 if (rp->tx_bufs == NULL) { 1178 if (rp->tx_bufs == NULL) {
1112 pci_free_consistent(rp->pdev, 1179 dma_free_coherent(hwdev,
1113 RX_RING_SIZE * sizeof(struct rx_desc) + 1180 RX_RING_SIZE * sizeof(struct rx_desc) +
1114 TX_RING_SIZE * sizeof(struct tx_desc), 1181 TX_RING_SIZE * sizeof(struct tx_desc),
1115 ring, ring_dma); 1182 ring, ring_dma);
1116 return -ENOMEM; 1183 return -ENOMEM;
1117 } 1184 }
1118 } 1185 }
@@ -1128,16 +1195,17 @@ static int alloc_ring(struct net_device* dev)
1128static void free_ring(struct net_device* dev) 1195static void free_ring(struct net_device* dev)
1129{ 1196{
1130 struct rhine_private *rp = netdev_priv(dev); 1197 struct rhine_private *rp = netdev_priv(dev);
1198 struct device *hwdev = dev->dev.parent;
1131 1199
1132 pci_free_consistent(rp->pdev, 1200 dma_free_coherent(hwdev,
1133 RX_RING_SIZE * sizeof(struct rx_desc) + 1201 RX_RING_SIZE * sizeof(struct rx_desc) +
1134 TX_RING_SIZE * sizeof(struct tx_desc), 1202 TX_RING_SIZE * sizeof(struct tx_desc),
1135 rp->rx_ring, rp->rx_ring_dma); 1203 rp->rx_ring, rp->rx_ring_dma);
1136 rp->tx_ring = NULL; 1204 rp->tx_ring = NULL;
1137 1205
1138 if (rp->tx_bufs) 1206 if (rp->tx_bufs)
1139 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE, 1207 dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1140 rp->tx_bufs, rp->tx_bufs_dma); 1208 rp->tx_bufs, rp->tx_bufs_dma);
1141 1209
1142 rp->tx_bufs = NULL; 1210 rp->tx_bufs = NULL;
1143 1211
@@ -1146,6 +1214,7 @@ static void free_ring(struct net_device* dev)
1146static void alloc_rbufs(struct net_device *dev) 1214static void alloc_rbufs(struct net_device *dev)
1147{ 1215{
1148 struct rhine_private *rp = netdev_priv(dev); 1216 struct rhine_private *rp = netdev_priv(dev);
1217 struct device *hwdev = dev->dev.parent;
1149 dma_addr_t next; 1218 dma_addr_t next;
1150 int i; 1219 int i;
1151 1220
@@ -1174,9 +1243,9 @@ static void alloc_rbufs(struct net_device *dev)
1174 break; 1243 break;
1175 1244
1176 rp->rx_skbuff_dma[i] = 1245 rp->rx_skbuff_dma[i] =
1177 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, 1246 dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
1178 PCI_DMA_FROMDEVICE); 1247 DMA_FROM_DEVICE);
1179 if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) { 1248 if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
1180 rp->rx_skbuff_dma[i] = 0; 1249 rp->rx_skbuff_dma[i] = 0;
1181 dev_kfree_skb(skb); 1250 dev_kfree_skb(skb);
1182 break; 1251 break;
@@ -1190,6 +1259,7 @@ static void alloc_rbufs(struct net_device *dev)
1190static void free_rbufs(struct net_device* dev) 1259static void free_rbufs(struct net_device* dev)
1191{ 1260{
1192 struct rhine_private *rp = netdev_priv(dev); 1261 struct rhine_private *rp = netdev_priv(dev);
1262 struct device *hwdev = dev->dev.parent;
1193 int i; 1263 int i;
1194 1264
1195 /* Free all the skbuffs in the Rx queue. */ 1265 /* Free all the skbuffs in the Rx queue. */
@@ -1197,9 +1267,9 @@ static void free_rbufs(struct net_device* dev)
1197 rp->rx_ring[i].rx_status = 0; 1267 rp->rx_ring[i].rx_status = 0;
1198 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ 1268 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1199 if (rp->rx_skbuff[i]) { 1269 if (rp->rx_skbuff[i]) {
1200 pci_unmap_single(rp->pdev, 1270 dma_unmap_single(hwdev,
1201 rp->rx_skbuff_dma[i], 1271 rp->rx_skbuff_dma[i],
1202 rp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1272 rp->rx_buf_sz, DMA_FROM_DEVICE);
1203 dev_kfree_skb(rp->rx_skbuff[i]); 1273 dev_kfree_skb(rp->rx_skbuff[i]);
1204 } 1274 }
1205 rp->rx_skbuff[i] = NULL; 1275 rp->rx_skbuff[i] = NULL;
@@ -1230,6 +1300,7 @@ static void alloc_tbufs(struct net_device* dev)
1230static void free_tbufs(struct net_device* dev) 1300static void free_tbufs(struct net_device* dev)
1231{ 1301{
1232 struct rhine_private *rp = netdev_priv(dev); 1302 struct rhine_private *rp = netdev_priv(dev);
1303 struct device *hwdev = dev->dev.parent;
1233 int i; 1304 int i;
1234 1305
1235 for (i = 0; i < TX_RING_SIZE; i++) { 1306 for (i = 0; i < TX_RING_SIZE; i++) {
@@ -1238,10 +1309,10 @@ static void free_tbufs(struct net_device* dev)
1238 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ 1309 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1239 if (rp->tx_skbuff[i]) { 1310 if (rp->tx_skbuff[i]) {
1240 if (rp->tx_skbuff_dma[i]) { 1311 if (rp->tx_skbuff_dma[i]) {
1241 pci_unmap_single(rp->pdev, 1312 dma_unmap_single(hwdev,
1242 rp->tx_skbuff_dma[i], 1313 rp->tx_skbuff_dma[i],
1243 rp->tx_skbuff[i]->len, 1314 rp->tx_skbuff[i]->len,
1244 PCI_DMA_TODEVICE); 1315 DMA_TO_DEVICE);
1245 } 1316 }
1246 dev_kfree_skb(rp->tx_skbuff[i]); 1317 dev_kfree_skb(rp->tx_skbuff[i]);
1247 } 1318 }
@@ -1278,8 +1349,9 @@ static void rhine_set_carrier(struct mii_if_info *mii)
1278 /* autoneg is off: Link is always assumed to be up */ 1349 /* autoneg is off: Link is always assumed to be up */
1279 if (!netif_carrier_ok(dev)) 1350 if (!netif_carrier_ok(dev))
1280 netif_carrier_on(dev); 1351 netif_carrier_on(dev);
1281 } else /* Let MMI library update carrier status */ 1352 }
1282 rhine_check_media(dev, 0); 1353
1354 rhine_check_media(dev, 0);
1283 1355
1284 netif_info(rp, link, dev, "force_media %d, carrier %d\n", 1356 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1285 mii->force_media, netif_carrier_ok(dev)); 1357 mii->force_media, netif_carrier_ok(dev));
@@ -1469,7 +1541,7 @@ static void init_registers(struct net_device *dev)
1469 1541
1470 rhine_set_rx_mode(dev); 1542 rhine_set_rx_mode(dev);
1471 1543
1472 if (rp->pdev->revision >= VT6105M) 1544 if (rp->quirks & rqMgmt)
1473 rhine_init_cam_filter(dev); 1545 rhine_init_cam_filter(dev);
1474 1546
1475 napi_enable(&rp->napi); 1547 napi_enable(&rp->napi);
@@ -1581,16 +1653,15 @@ static int rhine_open(struct net_device *dev)
1581 void __iomem *ioaddr = rp->base; 1653 void __iomem *ioaddr = rp->base;
1582 int rc; 1654 int rc;
1583 1655
1584 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name, 1656 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1585 dev);
1586 if (rc) 1657 if (rc)
1587 return rc; 1658 return rc;
1588 1659
1589 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq); 1660 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1590 1661
1591 rc = alloc_ring(dev); 1662 rc = alloc_ring(dev);
1592 if (rc) { 1663 if (rc) {
1593 free_irq(rp->pdev->irq, dev); 1664 free_irq(rp->irq, dev);
1594 return rc; 1665 return rc;
1595 } 1666 }
1596 alloc_rbufs(dev); 1667 alloc_rbufs(dev);
@@ -1659,6 +1730,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1659 struct net_device *dev) 1730 struct net_device *dev)
1660{ 1731{
1661 struct rhine_private *rp = netdev_priv(dev); 1732 struct rhine_private *rp = netdev_priv(dev);
1733 struct device *hwdev = dev->dev.parent;
1662 void __iomem *ioaddr = rp->base; 1734 void __iomem *ioaddr = rp->base;
1663 unsigned entry; 1735 unsigned entry;
1664 1736
@@ -1695,9 +1767,9 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1695 rp->tx_bufs)); 1767 rp->tx_bufs));
1696 } else { 1768 } else {
1697 rp->tx_skbuff_dma[entry] = 1769 rp->tx_skbuff_dma[entry] =
1698 pci_map_single(rp->pdev, skb->data, skb->len, 1770 dma_map_single(hwdev, skb->data, skb->len,
1699 PCI_DMA_TODEVICE); 1771 DMA_TO_DEVICE);
1700 if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) { 1772 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1701 dev_kfree_skb_any(skb); 1773 dev_kfree_skb_any(skb);
1702 rp->tx_skbuff_dma[entry] = 0; 1774 rp->tx_skbuff_dma[entry] = 0;
1703 dev->stats.tx_dropped++; 1775 dev->stats.tx_dropped++;
@@ -1788,6 +1860,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1788static void rhine_tx(struct net_device *dev) 1860static void rhine_tx(struct net_device *dev)
1789{ 1861{
1790 struct rhine_private *rp = netdev_priv(dev); 1862 struct rhine_private *rp = netdev_priv(dev);
1863 struct device *hwdev = dev->dev.parent;
1791 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; 1864 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1792 1865
1793 /* find and cleanup dirty tx descriptors */ 1866 /* find and cleanup dirty tx descriptors */
@@ -1831,10 +1904,10 @@ static void rhine_tx(struct net_device *dev)
1831 } 1904 }
1832 /* Free the original skb. */ 1905 /* Free the original skb. */
1833 if (rp->tx_skbuff_dma[entry]) { 1906 if (rp->tx_skbuff_dma[entry]) {
1834 pci_unmap_single(rp->pdev, 1907 dma_unmap_single(hwdev,
1835 rp->tx_skbuff_dma[entry], 1908 rp->tx_skbuff_dma[entry],
1836 rp->tx_skbuff[entry]->len, 1909 rp->tx_skbuff[entry]->len,
1837 PCI_DMA_TODEVICE); 1910 DMA_TO_DEVICE);
1838 } 1911 }
1839 dev_consume_skb_any(rp->tx_skbuff[entry]); 1912 dev_consume_skb_any(rp->tx_skbuff[entry]);
1840 rp->tx_skbuff[entry] = NULL; 1913 rp->tx_skbuff[entry] = NULL;
@@ -1863,6 +1936,7 @@ static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1863static int rhine_rx(struct net_device *dev, int limit) 1936static int rhine_rx(struct net_device *dev, int limit)
1864{ 1937{
1865 struct rhine_private *rp = netdev_priv(dev); 1938 struct rhine_private *rp = netdev_priv(dev);
1939 struct device *hwdev = dev->dev.parent;
1866 int count; 1940 int count;
1867 int entry = rp->cur_rx % RX_RING_SIZE; 1941 int entry = rp->cur_rx % RX_RING_SIZE;
1868 1942
@@ -1924,19 +1998,19 @@ static int rhine_rx(struct net_device *dev, int limit)
1924 if (pkt_len < rx_copybreak) 1998 if (pkt_len < rx_copybreak)
1925 skb = netdev_alloc_skb_ip_align(dev, pkt_len); 1999 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1926 if (skb) { 2000 if (skb) {
1927 pci_dma_sync_single_for_cpu(rp->pdev, 2001 dma_sync_single_for_cpu(hwdev,
1928 rp->rx_skbuff_dma[entry], 2002 rp->rx_skbuff_dma[entry],
1929 rp->rx_buf_sz, 2003 rp->rx_buf_sz,
1930 PCI_DMA_FROMDEVICE); 2004 DMA_FROM_DEVICE);
1931 2005
1932 skb_copy_to_linear_data(skb, 2006 skb_copy_to_linear_data(skb,
1933 rp->rx_skbuff[entry]->data, 2007 rp->rx_skbuff[entry]->data,
1934 pkt_len); 2008 pkt_len);
1935 skb_put(skb, pkt_len); 2009 skb_put(skb, pkt_len);
1936 pci_dma_sync_single_for_device(rp->pdev, 2010 dma_sync_single_for_device(hwdev,
1937 rp->rx_skbuff_dma[entry], 2011 rp->rx_skbuff_dma[entry],
1938 rp->rx_buf_sz, 2012 rp->rx_buf_sz,
1939 PCI_DMA_FROMDEVICE); 2013 DMA_FROM_DEVICE);
1940 } else { 2014 } else {
1941 skb = rp->rx_skbuff[entry]; 2015 skb = rp->rx_skbuff[entry];
1942 if (skb == NULL) { 2016 if (skb == NULL) {
@@ -1945,10 +2019,10 @@ static int rhine_rx(struct net_device *dev, int limit)
1945 } 2019 }
1946 rp->rx_skbuff[entry] = NULL; 2020 rp->rx_skbuff[entry] = NULL;
1947 skb_put(skb, pkt_len); 2021 skb_put(skb, pkt_len);
1948 pci_unmap_single(rp->pdev, 2022 dma_unmap_single(hwdev,
1949 rp->rx_skbuff_dma[entry], 2023 rp->rx_skbuff_dma[entry],
1950 rp->rx_buf_sz, 2024 rp->rx_buf_sz,
1951 PCI_DMA_FROMDEVICE); 2025 DMA_FROM_DEVICE);
1952 } 2026 }
1953 2027
1954 if (unlikely(desc_length & DescTag)) 2028 if (unlikely(desc_length & DescTag))
@@ -1979,10 +2053,11 @@ static int rhine_rx(struct net_device *dev, int limit)
1979 if (skb == NULL) 2053 if (skb == NULL)
1980 break; /* Better luck next round. */ 2054 break; /* Better luck next round. */
1981 rp->rx_skbuff_dma[entry] = 2055 rp->rx_skbuff_dma[entry] =
1982 pci_map_single(rp->pdev, skb->data, 2056 dma_map_single(hwdev, skb->data,
1983 rp->rx_buf_sz, 2057 rp->rx_buf_sz,
1984 PCI_DMA_FROMDEVICE); 2058 DMA_FROM_DEVICE);
1985 if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) { 2059 if (dma_mapping_error(hwdev,
2060 rp->rx_skbuff_dma[entry])) {
1986 dev_kfree_skb(skb); 2061 dev_kfree_skb(skb);
1987 rp->rx_skbuff_dma[entry] = 0; 2062 rp->rx_skbuff_dma[entry] = 0;
1988 break; 2063 break;
@@ -2103,7 +2178,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
2103 /* Too many to match, or accept all multicasts. */ 2178 /* Too many to match, or accept all multicasts. */
2104 iowrite32(0xffffffff, ioaddr + MulticastFilter0); 2179 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2105 iowrite32(0xffffffff, ioaddr + MulticastFilter1); 2180 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2106 } else if (rp->pdev->revision >= VT6105M) { 2181 } else if (rp->quirks & rqMgmt) {
2107 int i = 0; 2182 int i = 0;
2108 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */ 2183 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2109 netdev_for_each_mc_addr(ha, dev) { 2184 netdev_for_each_mc_addr(ha, dev) {
@@ -2125,7 +2200,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
2125 iowrite32(mc_filter[1], ioaddr + MulticastFilter1); 2200 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2126 } 2201 }
2127 /* enable/disable VLAN receive filtering */ 2202 /* enable/disable VLAN receive filtering */
2128 if (rp->pdev->revision >= VT6105M) { 2203 if (rp->quirks & rqMgmt) {
2129 if (dev->flags & IFF_PROMISC) 2204 if (dev->flags & IFF_PROMISC)
2130 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); 2205 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2131 else 2206 else
@@ -2136,11 +2211,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
2136 2211
2137static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2212static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2138{ 2213{
2139 struct rhine_private *rp = netdev_priv(dev); 2214 struct device *hwdev = dev->dev.parent;
2140 2215
2141 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 2216 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2142 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 2217 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2143 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info)); 2218 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2144} 2219}
2145 2220
2146static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2221static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2277,7 +2352,7 @@ static int rhine_close(struct net_device *dev)
2277 /* Stop the chip's Tx and Rx processes. */ 2352 /* Stop the chip's Tx and Rx processes. */
2278 iowrite16(CmdStop, ioaddr + ChipCmd); 2353 iowrite16(CmdStop, ioaddr + ChipCmd);
2279 2354
2280 free_irq(rp->pdev->irq, dev); 2355 free_irq(rp->irq, dev);
2281 free_rbufs(dev); 2356 free_rbufs(dev);
2282 free_tbufs(dev); 2357 free_tbufs(dev);
2283 free_ring(dev); 2358 free_ring(dev);
@@ -2286,7 +2361,7 @@ static int rhine_close(struct net_device *dev)
2286} 2361}
2287 2362
2288 2363
2289static void rhine_remove_one(struct pci_dev *pdev) 2364static void rhine_remove_one_pci(struct pci_dev *pdev)
2290{ 2365{
2291 struct net_device *dev = pci_get_drvdata(pdev); 2366 struct net_device *dev = pci_get_drvdata(pdev);
2292 struct rhine_private *rp = netdev_priv(dev); 2367 struct rhine_private *rp = netdev_priv(dev);
@@ -2300,7 +2375,21 @@ static void rhine_remove_one(struct pci_dev *pdev)
2300 pci_disable_device(pdev); 2375 pci_disable_device(pdev);
2301} 2376}
2302 2377
2303static void rhine_shutdown (struct pci_dev *pdev) 2378static int rhine_remove_one_platform(struct platform_device *pdev)
2379{
2380 struct net_device *dev = platform_get_drvdata(pdev);
2381 struct rhine_private *rp = netdev_priv(dev);
2382
2383 unregister_netdev(dev);
2384
2385 iounmap(rp->base);
2386
2387 free_netdev(dev);
2388
2389 return 0;
2390}
2391
2392static void rhine_shutdown_pci(struct pci_dev *pdev)
2304{ 2393{
2305 struct net_device *dev = pci_get_drvdata(pdev); 2394 struct net_device *dev = pci_get_drvdata(pdev);
2306 struct rhine_private *rp = netdev_priv(dev); 2395 struct rhine_private *rp = netdev_priv(dev);
@@ -2354,8 +2443,7 @@ static void rhine_shutdown (struct pci_dev *pdev)
2354#ifdef CONFIG_PM_SLEEP 2443#ifdef CONFIG_PM_SLEEP
2355static int rhine_suspend(struct device *device) 2444static int rhine_suspend(struct device *device)
2356{ 2445{
2357 struct pci_dev *pdev = to_pci_dev(device); 2446 struct net_device *dev = dev_get_drvdata(device);
2358 struct net_device *dev = pci_get_drvdata(pdev);
2359 struct rhine_private *rp = netdev_priv(dev); 2447 struct rhine_private *rp = netdev_priv(dev);
2360 2448
2361 if (!netif_running(dev)) 2449 if (!netif_running(dev))
@@ -2367,23 +2455,21 @@ static int rhine_suspend(struct device *device)
2367 2455
2368 netif_device_detach(dev); 2456 netif_device_detach(dev);
2369 2457
2370 rhine_shutdown(pdev); 2458 if (dev_is_pci(device))
2459 rhine_shutdown_pci(to_pci_dev(device));
2371 2460
2372 return 0; 2461 return 0;
2373} 2462}
2374 2463
2375static int rhine_resume(struct device *device) 2464static int rhine_resume(struct device *device)
2376{ 2465{
2377 struct pci_dev *pdev = to_pci_dev(device); 2466 struct net_device *dev = dev_get_drvdata(device);
2378 struct net_device *dev = pci_get_drvdata(pdev);
2379 struct rhine_private *rp = netdev_priv(dev); 2467 struct rhine_private *rp = netdev_priv(dev);
2380 2468
2381 if (!netif_running(dev)) 2469 if (!netif_running(dev))
2382 return 0; 2470 return 0;
2383 2471
2384#ifdef USE_MMIO
2385 enable_mmio(rp->pioaddr, rp->quirks); 2472 enable_mmio(rp->pioaddr, rp->quirks);
2386#endif
2387 rhine_power_init(dev); 2473 rhine_power_init(dev);
2388 free_tbufs(dev); 2474 free_tbufs(dev);
2389 free_rbufs(dev); 2475 free_rbufs(dev);
@@ -2408,15 +2494,26 @@ static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2408 2494
2409#endif /* !CONFIG_PM_SLEEP */ 2495#endif /* !CONFIG_PM_SLEEP */
2410 2496
2411static struct pci_driver rhine_driver = { 2497static struct pci_driver rhine_driver_pci = {
2412 .name = DRV_NAME, 2498 .name = DRV_NAME,
2413 .id_table = rhine_pci_tbl, 2499 .id_table = rhine_pci_tbl,
2414 .probe = rhine_init_one, 2500 .probe = rhine_init_one_pci,
2415 .remove = rhine_remove_one, 2501 .remove = rhine_remove_one_pci,
2416 .shutdown = rhine_shutdown, 2502 .shutdown = rhine_shutdown_pci,
2417 .driver.pm = RHINE_PM_OPS, 2503 .driver.pm = RHINE_PM_OPS,
2418}; 2504};
2419 2505
2506static struct platform_driver rhine_driver_platform = {
2507 .probe = rhine_init_one_platform,
2508 .remove = rhine_remove_one_platform,
2509 .driver = {
2510 .name = DRV_NAME,
2511 .owner = THIS_MODULE,
2512 .of_match_table = rhine_of_tbl,
2513 .pm = RHINE_PM_OPS,
2514 }
2515};
2516
2420static struct dmi_system_id rhine_dmi_table[] __initdata = { 2517static struct dmi_system_id rhine_dmi_table[] __initdata = {
2421 { 2518 {
2422 .ident = "EPIA-M", 2519 .ident = "EPIA-M",
@@ -2437,6 +2534,8 @@ static struct dmi_system_id rhine_dmi_table[] __initdata = {
2437 2534
2438static int __init rhine_init(void) 2535static int __init rhine_init(void)
2439{ 2536{
2537 int ret_pci, ret_platform;
2538
2440/* when a module, this is printed whether or not devices are found in probe */ 2539/* when a module, this is printed whether or not devices are found in probe */
2441#ifdef MODULE 2540#ifdef MODULE
2442 pr_info("%s\n", version); 2541 pr_info("%s\n", version);
@@ -2449,13 +2548,19 @@ static int __init rhine_init(void)
2449 else if (avoid_D3) 2548 else if (avoid_D3)
2450 pr_info("avoid_D3 set\n"); 2549 pr_info("avoid_D3 set\n");
2451 2550
2452 return pci_register_driver(&rhine_driver); 2551 ret_pci = pci_register_driver(&rhine_driver_pci);
2552 ret_platform = platform_driver_register(&rhine_driver_platform);
2553 if ((ret_pci < 0) && (ret_platform < 0))
2554 return ret_pci;
2555
2556 return 0;
2453} 2557}
2454 2558
2455 2559
2456static void __exit rhine_cleanup(void) 2560static void __exit rhine_cleanup(void)
2457{ 2561{
2458 pci_unregister_driver(&rhine_driver); 2562 platform_driver_unregister(&rhine_driver_platform);
2563 pci_unregister_driver(&rhine_driver_pci);
2459} 2564}
2460 2565
2461 2566
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index fa193c4688da..4ef818a7a6c6 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -75,7 +75,7 @@ int temac_indirect_busywait(struct temac_local *lp)
75 long end = jiffies + 2; 75 long end = jiffies + 2;
76 76
77 while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) { 77 while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
78 if (end - jiffies <= 0) { 78 if (time_before_eq(end, jiffies)) {
79 WARN_ON(1); 79 WARN_ON(1);
80 return -ETIMEDOUT; 80 return -ETIMEDOUT;
81 } 81 }
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 64b4639f43b6..d4abf478e2bb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -22,7 +22,7 @@ int axienet_mdio_wait_until_ready(struct axienet_local *lp)
22 long end = jiffies + 2; 22 long end = jiffies + 2;
23 while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) & 23 while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) &
24 XAE_MDIO_MCR_READY_MASK)) { 24 XAE_MDIO_MCR_READY_MASK)) {
25 if (end - jiffies <= 0) { 25 if (time_before_eq(end, jiffies)) {
26 WARN_ON(1); 26 WARN_ON(1);
27 return -ETIMEDOUT; 27 return -ETIMEDOUT;
28 } 28 }
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0d87c67a5ff7..8c4aed3053eb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -702,7 +702,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
702 */ 702 */
703 while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & 703 while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
704 XEL_MDIOCTRL_MDIOSTS_MASK) { 704 XEL_MDIOCTRL_MDIOSTS_MASK) {
705 if (end - jiffies <= 0) { 705 if (time_before_eq(end, jiffies)) {
706 WARN_ON(1); 706 WARN_ON(1);
707 return -ETIMEDOUT; 707 return -ETIMEDOUT;
708 } 708 }
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d18f711d0b0c..6cc37c15e0bf 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -28,50 +28,119 @@
28#include <linux/hyperv.h> 28#include <linux/hyperv.h>
29#include <linux/rndis.h> 29#include <linux/rndis.h>
30 30
31/* Fwd declaration */ 31/* RSS related */
32struct hv_netvsc_packet; 32#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 /* query only */
33struct ndis_tcp_ip_checksum_info; 33#define OID_GEN_RECEIVE_SCALE_PARAMETERS 0x00010204 /* query and set */
34 34
35/* Represent the xfer page packet which contains 1 or more netvsc packet */ 35#define NDIS_OBJECT_TYPE_RSS_CAPABILITIES 0x88
36struct xferpage_packet { 36#define NDIS_OBJECT_TYPE_RSS_PARAMETERS 0x89
37 struct list_head list_ent;
38 u32 status;
39 37
40 /* # of netvsc packets this xfer packet contains */ 38#define NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2 2
41 u32 count; 39#define NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2 2
40
41struct ndis_obj_header {
42 u8 type;
43 u8 rev;
44 u16 size;
45} __packed;
46
47/* ndis_recv_scale_cap/cap_flag */
48#define NDIS_RSS_CAPS_MESSAGE_SIGNALED_INTERRUPTS 0x01000000
49#define NDIS_RSS_CAPS_CLASSIFICATION_AT_ISR 0x02000000
50#define NDIS_RSS_CAPS_CLASSIFICATION_AT_DPC 0x04000000
51#define NDIS_RSS_CAPS_USING_MSI_X 0x08000000
52#define NDIS_RSS_CAPS_RSS_AVAILABLE_ON_PORTS 0x10000000
53#define NDIS_RSS_CAPS_SUPPORTS_MSI_X 0x20000000
54#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV4 0x00000100
55#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV6 0x00000200
56#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV6_EX 0x00000400
57
58struct ndis_recv_scale_cap { /* NDIS_RECEIVE_SCALE_CAPABILITIES */
59 struct ndis_obj_header hdr;
60 u32 cap_flag;
61 u32 num_int_msg;
62 u32 num_recv_que;
63 u16 num_indirect_tabent;
64} __packed;
65
66
67/* ndis_recv_scale_param flags */
68#define NDIS_RSS_PARAM_FLAG_BASE_CPU_UNCHANGED 0x0001
69#define NDIS_RSS_PARAM_FLAG_HASH_INFO_UNCHANGED 0x0002
70#define NDIS_RSS_PARAM_FLAG_ITABLE_UNCHANGED 0x0004
71#define NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED 0x0008
72#define NDIS_RSS_PARAM_FLAG_DISABLE_RSS 0x0010
73
74/* Hash info bits */
75#define NDIS_HASH_FUNC_TOEPLITZ 0x00000001
76#define NDIS_HASH_IPV4 0x00000100
77#define NDIS_HASH_TCP_IPV4 0x00000200
78#define NDIS_HASH_IPV6 0x00000400
79#define NDIS_HASH_IPV6_EX 0x00000800
80#define NDIS_HASH_TCP_IPV6 0x00001000
81#define NDIS_HASH_TCP_IPV6_EX 0x00002000
82
83#define NDIS_RSS_INDIRECTION_TABLE_MAX_SIZE_REVISION_2 (128 * 4)
84#define NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2 40
85
86#define ITAB_NUM 128
87#define HASH_KEYLEN NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2
88extern u8 netvsc_hash_key[];
89
90struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
91 struct ndis_obj_header hdr;
92
93 /* Qualifies the rest of the information */
94 u16 flag;
95
96 /* The base CPU number to do receive processing. not used */
97 u16 base_cpu_number;
98
99 /* This describes the hash function and type being enabled */
100 u32 hashinfo;
101
102 /* The size of indirection table array */
103 u16 indirect_tabsize;
104
105 /* The offset of the indirection table from the beginning of this
106 * structure
107 */
108 u32 indirect_taboffset;
109
110 /* The size of the hash secret key */
111 u16 hashkey_size;
112
113 /* The offset of the secret key from the beginning of this structure */
114 u32 kashkey_offset;
115
116 u32 processor_masks_offset;
117 u32 num_processor_masks;
118 u32 processor_masks_entry_size;
42}; 119};
43 120
121/* Fwd declaration */
122struct ndis_tcp_ip_checksum_info;
123
44/* 124/*
45 * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame 125 * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
46 * within the RNDIS 126 * within the RNDIS
47 */ 127 */
48struct hv_netvsc_packet { 128struct hv_netvsc_packet {
49 /* Bookkeeping stuff */ 129 /* Bookkeeping stuff */
50 struct list_head list_ent;
51 u32 status; 130 u32 status;
52 131
53 struct hv_device *device; 132 struct hv_device *device;
54 bool is_data_pkt; 133 bool is_data_pkt;
55 u16 vlan_tci; 134 u16 vlan_tci;
56 135
57 /* 136 u16 q_idx;
58 * Valid only for receives when we break a xfer page packet 137 struct vmbus_channel *channel;
59 * into multiple netvsc packets
60 */
61 struct xferpage_packet *xfer_page_pkt;
62 138
63 union { 139 u64 send_completion_tid;
64 struct { 140 void *send_completion_ctx;
65 u64 recv_completion_tid; 141 void (*send_completion)(void *context);
66 void *recv_completion_ctx; 142
67 void (*recv_completion)(void *context); 143 u32 send_buf_index;
68 } recv;
69 struct {
70 u64 send_completion_tid;
71 void *send_completion_ctx;
72 void (*send_completion)(void *context);
73 } send;
74 } completion;
75 144
76 /* This points to the memory after page_buf */ 145 /* This points to the memory after page_buf */
77 struct rndis_message *rndis_msg; 146 struct rndis_message *rndis_msg;
@@ -120,6 +189,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
120int netvsc_recv_callback(struct hv_device *device_obj, 189int netvsc_recv_callback(struct hv_device *device_obj,
121 struct hv_netvsc_packet *packet, 190 struct hv_netvsc_packet *packet,
122 struct ndis_tcp_ip_checksum_info *csum_info); 191 struct ndis_tcp_ip_checksum_info *csum_info);
192void netvsc_channel_cb(void *context);
123int rndis_filter_open(struct hv_device *dev); 193int rndis_filter_open(struct hv_device *dev);
124int rndis_filter_close(struct hv_device *dev); 194int rndis_filter_close(struct hv_device *dev);
125int rndis_filter_device_add(struct hv_device *dev, 195int rndis_filter_device_add(struct hv_device *dev,
@@ -514,14 +584,16 @@ struct nvsp_message {
514 584
515#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */ 585#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
516#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */ 586#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
587#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024) /* 1MB */
588#define NETVSC_INVALID_INDEX -1
517 589
518#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
519 590
520/* Preallocated receive packets */ 591#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
521#define NETVSC_RECEIVE_PACKETLIST_COUNT 256
522 592
523#define NETVSC_PACKET_SIZE 2048 593#define NETVSC_PACKET_SIZE 2048
524 594
595#define VRSS_SEND_TAB_SIZE 16
596
525/* Per netvsc channel-specific */ 597/* Per netvsc channel-specific */
526struct netvsc_device { 598struct netvsc_device {
527 struct hv_device *dev; 599 struct hv_device *dev;
@@ -532,12 +604,6 @@ struct netvsc_device {
532 wait_queue_head_t wait_drain; 604 wait_queue_head_t wait_drain;
533 bool start_remove; 605 bool start_remove;
534 bool destroy; 606 bool destroy;
535 /*
536 * List of free preallocated hv_netvsc_packet to represent receive
537 * packet
538 */
539 struct list_head recv_pkt_list;
540 spinlock_t recv_pkt_list_lock;
541 607
542 /* Receive buffer allocated by us but manages by NetVSP */ 608 /* Receive buffer allocated by us but manages by NetVSP */
543 void *recv_buf; 609 void *recv_buf;
@@ -546,6 +612,15 @@ struct netvsc_device {
546 u32 recv_section_cnt; 612 u32 recv_section_cnt;
547 struct nvsp_1_receive_buffer_section *recv_section; 613 struct nvsp_1_receive_buffer_section *recv_section;
548 614
615 /* Send buffer allocated by us */
616 void *send_buf;
617 u32 send_buf_size;
618 u32 send_buf_gpadl_handle;
619 u32 send_section_cnt;
620 u32 send_section_size;
621 unsigned long *send_section_map;
622 int map_words;
623
549 /* Used for NetVSP initialization protocol */ 624 /* Used for NetVSP initialization protocol */
550 struct completion channel_init_wait; 625 struct completion channel_init_wait;
551 struct nvsp_message channel_init_pkt; 626 struct nvsp_message channel_init_pkt;
@@ -555,10 +630,20 @@ struct netvsc_device {
555 630
556 struct net_device *ndev; 631 struct net_device *ndev;
557 632
633 struct vmbus_channel *chn_table[NR_CPUS];
634 u32 send_table[VRSS_SEND_TAB_SIZE];
635 u32 num_chn;
636 atomic_t queue_sends[NR_CPUS];
637
558 /* Holds rndis device info */ 638 /* Holds rndis device info */
559 void *extension; 639 void *extension;
560 /* The recive buffer for this device */ 640
641 int ring_size;
642
643 /* The primary channel callback buffer */
561 unsigned char cb_buffer[NETVSC_PACKET_SIZE]; 644 unsigned char cb_buffer[NETVSC_PACKET_SIZE];
645 /* The sub channel callback buffer */
646 unsigned char *sub_cb_buf;
562}; 647};
563 648
564/* NdisInitialize message */ 649/* NdisInitialize message */
@@ -706,6 +791,7 @@ enum ndis_per_pkt_info_type {
706 IEEE_8021Q_INFO, 791 IEEE_8021Q_INFO,
707 ORIGINAL_PKTINFO, 792 ORIGINAL_PKTINFO,
708 PACKET_CANCEL_ID, 793 PACKET_CANCEL_ID,
794 NBL_HASH_VALUE = PACKET_CANCEL_ID,
709 ORIGINAL_NET_BUFLIST, 795 ORIGINAL_NET_BUFLIST,
710 CACHED_NET_BUFLIST, 796 CACHED_NET_BUFLIST,
711 SHORT_PKT_PADINFO, 797 SHORT_PKT_PADINFO,
@@ -852,6 +938,9 @@ struct ndis_tcp_lso_info {
852#define NDIS_LSO_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \ 938#define NDIS_LSO_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
853 sizeof(struct ndis_tcp_lso_info)) 939 sizeof(struct ndis_tcp_lso_info))
854 940
941#define NDIS_HASH_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
942 sizeof(u32))
943
855/* Format of Information buffer passed in a SetRequest for the OID */ 944/* Format of Information buffer passed in a SetRequest for the OID */
856/* OID_GEN_RNDIS_CONFIG_PARAMETER. */ 945/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
857struct rndis_config_parameter_info { 946struct rndis_config_parameter_info {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index f7629ecefa84..c041f63a6d30 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -28,6 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/if_ether.h> 30#include <linux/if_ether.h>
31#include <asm/sync_bitops.h>
31 32
32#include "hyperv_net.h" 33#include "hyperv_net.h"
33 34
@@ -80,7 +81,7 @@ get_in_err:
80} 81}
81 82
82 83
83static int netvsc_destroy_recv_buf(struct netvsc_device *net_device) 84static int netvsc_destroy_buf(struct netvsc_device *net_device)
84{ 85{
85 struct nvsp_message *revoke_packet; 86 struct nvsp_message *revoke_packet;
86 int ret = 0; 87 int ret = 0;
@@ -146,10 +147,62 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
146 net_device->recv_section = NULL; 147 net_device->recv_section = NULL;
147 } 148 }
148 149
150 /* Deal with the send buffer we may have setup.
151 * If we got a send section size, it means we received a
152 * SendsendBufferComplete msg (ie sent
153 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
154 * to send a revoke msg here
155 */
156 if (net_device->send_section_size) {
157 /* Send the revoke receive buffer */
158 revoke_packet = &net_device->revoke_packet;
159 memset(revoke_packet, 0, sizeof(struct nvsp_message));
160
161 revoke_packet->hdr.msg_type =
162 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
163 revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
164
165 ret = vmbus_sendpacket(net_device->dev->channel,
166 revoke_packet,
167 sizeof(struct nvsp_message),
168 (unsigned long)revoke_packet,
169 VM_PKT_DATA_INBAND, 0);
170 /* If we failed here, we might as well return and
171 * have a leak rather than continue and a bugchk
172 */
173 if (ret != 0) {
174 netdev_err(ndev, "unable to send "
175 "revoke send buffer to netvsp\n");
176 return ret;
177 }
178 }
179 /* Teardown the gpadl on the vsp end */
180 if (net_device->send_buf_gpadl_handle) {
181 ret = vmbus_teardown_gpadl(net_device->dev->channel,
182 net_device->send_buf_gpadl_handle);
183
184 /* If we failed here, we might as well return and have a leak
185 * rather than continue and a bugchk
186 */
187 if (ret != 0) {
188 netdev_err(ndev,
189 "unable to teardown send buffer's gpadl\n");
190 return ret;
191 }
192 net_device->recv_buf_gpadl_handle = 0;
193 }
194 if (net_device->send_buf) {
195 /* Free up the receive buffer */
196 free_pages((unsigned long)net_device->send_buf,
197 get_order(net_device->send_buf_size));
198 net_device->send_buf = NULL;
199 }
200 kfree(net_device->send_section_map);
201
149 return ret; 202 return ret;
150} 203}
151 204
152static int netvsc_init_recv_buf(struct hv_device *device) 205static int netvsc_init_buf(struct hv_device *device)
153{ 206{
154 int ret = 0; 207 int ret = 0;
155 int t; 208 int t;
@@ -248,10 +301,90 @@ static int netvsc_init_recv_buf(struct hv_device *device)
248 goto cleanup; 301 goto cleanup;
249 } 302 }
250 303
304 /* Now setup the send buffer.
305 */
306 net_device->send_buf =
307 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
308 get_order(net_device->send_buf_size));
309 if (!net_device->send_buf) {
310 netdev_err(ndev, "unable to allocate send "
311 "buffer of size %d\n", net_device->send_buf_size);
312 ret = -ENOMEM;
313 goto cleanup;
314 }
315
316 /* Establish the gpadl handle for this buffer on this
317 * channel. Note: This call uses the vmbus connection rather
318 * than the channel to establish the gpadl handle.
319 */
320 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
321 net_device->send_buf_size,
322 &net_device->send_buf_gpadl_handle);
323 if (ret != 0) {
324 netdev_err(ndev,
325 "unable to establish send buffer's gpadl\n");
326 goto cleanup;
327 }
328
329 /* Notify the NetVsp of the gpadl handle */
330 init_packet = &net_device->channel_init_pkt;
331 memset(init_packet, 0, sizeof(struct nvsp_message));
332 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
333 init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
334 net_device->send_buf_gpadl_handle;
335 init_packet->msg.v1_msg.send_recv_buf.id = 0;
336
337 /* Send the gpadl notification request */
338 ret = vmbus_sendpacket(device->channel, init_packet,
339 sizeof(struct nvsp_message),
340 (unsigned long)init_packet,
341 VM_PKT_DATA_INBAND,
342 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
343 if (ret != 0) {
344 netdev_err(ndev,
345 "unable to send send buffer's gpadl to netvsp\n");
346 goto cleanup;
347 }
348
349 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
350 BUG_ON(t == 0);
351
352 /* Check the response */
353 if (init_packet->msg.v1_msg.
354 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
355 netdev_err(ndev, "Unable to complete send buffer "
356 "initialization with NetVsp - status %d\n",
357 init_packet->msg.v1_msg.
358 send_recv_buf_complete.status);
359 ret = -EINVAL;
360 goto cleanup;
361 }
362
363 /* Parse the response */
364 net_device->send_section_size = init_packet->msg.
365 v1_msg.send_send_buf_complete.section_size;
366
367 /* Section count is simply the size divided by the section size.
368 */
369 net_device->send_section_cnt =
370 net_device->send_buf_size/net_device->send_section_size;
371
372 dev_info(&device->device, "Send section size: %d, Section count:%d\n",
373 net_device->send_section_size, net_device->send_section_cnt);
374
375 /* Setup state for managing the send buffer. */
376 net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
377 BITS_PER_LONG);
378
379 net_device->send_section_map =
380 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
381 if (net_device->send_section_map == NULL)
382 goto cleanup;
383
251 goto exit; 384 goto exit;
252 385
253cleanup: 386cleanup:
254 netvsc_destroy_recv_buf(net_device); 387 netvsc_destroy_buf(net_device);
255 388
256exit: 389exit:
257 return ret; 390 return ret;
@@ -369,8 +502,9 @@ static int netvsc_connect_vsp(struct hv_device *device)
369 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; 502 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
370 else 503 else
371 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; 504 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
505 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
372 506
373 ret = netvsc_init_recv_buf(device); 507 ret = netvsc_init_buf(device);
374 508
375cleanup: 509cleanup:
376 return ret; 510 return ret;
@@ -378,7 +512,7 @@ cleanup:
378 512
379static void netvsc_disconnect_vsp(struct netvsc_device *net_device) 513static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
380{ 514{
381 netvsc_destroy_recv_buf(net_device); 515 netvsc_destroy_buf(net_device);
382} 516}
383 517
384/* 518/*
@@ -387,7 +521,6 @@ static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
387int netvsc_device_remove(struct hv_device *device) 521int netvsc_device_remove(struct hv_device *device)
388{ 522{
389 struct netvsc_device *net_device; 523 struct netvsc_device *net_device;
390 struct hv_netvsc_packet *netvsc_packet, *pos;
391 unsigned long flags; 524 unsigned long flags;
392 525
393 net_device = hv_get_drvdata(device); 526 net_device = hv_get_drvdata(device);
@@ -416,11 +549,8 @@ int netvsc_device_remove(struct hv_device *device)
416 vmbus_close(device->channel); 549 vmbus_close(device->channel);
417 550
418 /* Release all resources */ 551 /* Release all resources */
419 list_for_each_entry_safe(netvsc_packet, pos, 552 if (net_device->sub_cb_buf)
420 &net_device->recv_pkt_list, list_ent) { 553 vfree(net_device->sub_cb_buf);
421 list_del(&netvsc_packet->list_ent);
422 kfree(netvsc_packet);
423 }
424 554
425 kfree(net_device); 555 kfree(net_device);
426 return 0; 556 return 0;
@@ -444,6 +574,12 @@ static inline u32 hv_ringbuf_avail_percent(
444 return avail_write * 100 / ring_info->ring_datasize; 574 return avail_write * 100 / ring_info->ring_datasize;
445} 575}
446 576
577static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
578 u32 index)
579{
580 sync_change_bit(index, net_device->send_section_map);
581}
582
447static void netvsc_send_completion(struct netvsc_device *net_device, 583static void netvsc_send_completion(struct netvsc_device *net_device,
448 struct hv_device *device, 584 struct hv_device *device,
449 struct vmpacket_descriptor *packet) 585 struct vmpacket_descriptor *packet)
@@ -451,6 +587,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
451 struct nvsp_message *nvsp_packet; 587 struct nvsp_message *nvsp_packet;
452 struct hv_netvsc_packet *nvsc_packet; 588 struct hv_netvsc_packet *nvsc_packet;
453 struct net_device *ndev; 589 struct net_device *ndev;
590 u32 send_index;
454 591
455 ndev = net_device->ndev; 592 ndev = net_device->ndev;
456 593
@@ -461,7 +598,9 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
461 (nvsp_packet->hdr.msg_type == 598 (nvsp_packet->hdr.msg_type ==
462 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) || 599 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
463 (nvsp_packet->hdr.msg_type == 600 (nvsp_packet->hdr.msg_type ==
464 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) { 601 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
602 (nvsp_packet->hdr.msg_type ==
603 NVSP_MSG5_TYPE_SUBCHANNEL)) {
465 /* Copy the response back */ 604 /* Copy the response back */
466 memcpy(&net_device->channel_init_pkt, nvsp_packet, 605 memcpy(&net_device->channel_init_pkt, nvsp_packet,
467 sizeof(struct nvsp_message)); 606 sizeof(struct nvsp_message));
@@ -469,28 +608,39 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
469 } else if (nvsp_packet->hdr.msg_type == 608 } else if (nvsp_packet->hdr.msg_type ==
470 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) { 609 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
471 int num_outstanding_sends; 610 int num_outstanding_sends;
611 u16 q_idx = 0;
612 struct vmbus_channel *channel = device->channel;
613 int queue_sends;
472 614
473 /* Get the send context */ 615 /* Get the send context */
474 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long) 616 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
475 packet->trans_id; 617 packet->trans_id;
476 618
477 /* Notify the layer above us */ 619 /* Notify the layer above us */
478 if (nvsc_packet) 620 if (nvsc_packet) {
479 nvsc_packet->completion.send.send_completion( 621 send_index = nvsc_packet->send_buf_index;
480 nvsc_packet->completion.send. 622 if (send_index != NETVSC_INVALID_INDEX)
481 send_completion_ctx); 623 netvsc_free_send_slot(net_device, send_index);
624 q_idx = nvsc_packet->q_idx;
625 channel = nvsc_packet->channel;
626 nvsc_packet->send_completion(nvsc_packet->
627 send_completion_ctx);
628 }
482 629
483 num_outstanding_sends = 630 num_outstanding_sends =
484 atomic_dec_return(&net_device->num_outstanding_sends); 631 atomic_dec_return(&net_device->num_outstanding_sends);
632 queue_sends = atomic_dec_return(&net_device->
633 queue_sends[q_idx]);
485 634
486 if (net_device->destroy && num_outstanding_sends == 0) 635 if (net_device->destroy && num_outstanding_sends == 0)
487 wake_up(&net_device->wait_drain); 636 wake_up(&net_device->wait_drain);
488 637
489 if (netif_queue_stopped(ndev) && !net_device->start_remove && 638 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
490 (hv_ringbuf_avail_percent(&device->channel->outbound) 639 !net_device->start_remove &&
491 > RING_AVAIL_PERCENT_HIWATER || 640 (hv_ringbuf_avail_percent(&channel->outbound) >
492 num_outstanding_sends < 1)) 641 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
493 netif_wake_queue(ndev); 642 netif_tx_wake_queue(netdev_get_tx_queue(
643 ndev, q_idx));
494 } else { 644 } else {
495 netdev_err(ndev, "Unknown send completion packet type- " 645 netdev_err(ndev, "Unknown send completion packet type- "
496 "%d received!!\n", nvsp_packet->hdr.msg_type); 646 "%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -498,6 +648,52 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
498 648
499} 649}
500 650
651static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
652{
653 unsigned long index;
654 u32 max_words = net_device->map_words;
655 unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
656 u32 section_cnt = net_device->send_section_cnt;
657 int ret_val = NETVSC_INVALID_INDEX;
658 int i;
659 int prev_val;
660
661 for (i = 0; i < max_words; i++) {
662 if (!~(map_addr[i]))
663 continue;
664 index = ffz(map_addr[i]);
665 prev_val = sync_test_and_set_bit(index, &map_addr[i]);
666 if (prev_val)
667 continue;
668 if ((index + (i * BITS_PER_LONG)) >= section_cnt)
669 break;
670 ret_val = (index + (i * BITS_PER_LONG));
671 break;
672 }
673 return ret_val;
674}
675
676u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
677 unsigned int section_index,
678 struct hv_netvsc_packet *packet)
679{
680 char *start = net_device->send_buf;
681 char *dest = (start + (section_index * net_device->send_section_size));
682 int i;
683 u32 msg_size = 0;
684
685 for (i = 0; i < packet->page_buf_cnt; i++) {
686 char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
687 u32 offset = packet->page_buf[i].offset;
688 u32 len = packet->page_buf[i].len;
689
690 memcpy(dest, (src + offset), len);
691 msg_size += len;
692 dest += len;
693 }
694 return msg_size;
695}
696
501int netvsc_send(struct hv_device *device, 697int netvsc_send(struct hv_device *device,
502 struct hv_netvsc_packet *packet) 698 struct hv_netvsc_packet *packet)
503{ 699{
@@ -505,7 +701,12 @@ int netvsc_send(struct hv_device *device,
505 int ret = 0; 701 int ret = 0;
506 struct nvsp_message sendMessage; 702 struct nvsp_message sendMessage;
507 struct net_device *ndev; 703 struct net_device *ndev;
704 struct vmbus_channel *out_channel = NULL;
508 u64 req_id; 705 u64 req_id;
706 unsigned int section_index = NETVSC_INVALID_INDEX;
707 u32 msg_size = 0;
708 struct sk_buff *skb;
709
509 710
510 net_device = get_outbound_net_device(device); 711 net_device = get_outbound_net_device(device);
511 if (!net_device) 712 if (!net_device)
@@ -521,25 +722,46 @@ int netvsc_send(struct hv_device *device,
521 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1; 722 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
522 } 723 }
523 724
524 /* Not using send buffer section */ 725 /* Attempt to send via sendbuf */
726 if (packet->total_data_buflen < net_device->send_section_size) {
727 section_index = netvsc_get_next_send_section(net_device);
728 if (section_index != NETVSC_INVALID_INDEX) {
729 msg_size = netvsc_copy_to_send_buf(net_device,
730 section_index,
731 packet);
732 skb = (struct sk_buff *)
733 (unsigned long)packet->send_completion_tid;
734 if (skb)
735 dev_kfree_skb_any(skb);
736 packet->page_buf_cnt = 0;
737 }
738 }
739 packet->send_buf_index = section_index;
740
741
525 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index = 742 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
526 0xFFFFFFFF; 743 section_index;
527 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; 744 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size;
528 745
529 if (packet->completion.send.send_completion) 746 if (packet->send_completion)
530 req_id = (ulong)packet; 747 req_id = (ulong)packet;
531 else 748 else
532 req_id = 0; 749 req_id = 0;
533 750
751 out_channel = net_device->chn_table[packet->q_idx];
752 if (out_channel == NULL)
753 out_channel = device->channel;
754 packet->channel = out_channel;
755
534 if (packet->page_buf_cnt) { 756 if (packet->page_buf_cnt) {
535 ret = vmbus_sendpacket_pagebuffer(device->channel, 757 ret = vmbus_sendpacket_pagebuffer(out_channel,
536 packet->page_buf, 758 packet->page_buf,
537 packet->page_buf_cnt, 759 packet->page_buf_cnt,
538 &sendMessage, 760 &sendMessage,
539 sizeof(struct nvsp_message), 761 sizeof(struct nvsp_message),
540 req_id); 762 req_id);
541 } else { 763 } else {
542 ret = vmbus_sendpacket(device->channel, &sendMessage, 764 ret = vmbus_sendpacket(out_channel, &sendMessage,
543 sizeof(struct nvsp_message), 765 sizeof(struct nvsp_message),
544 req_id, 766 req_id,
545 VM_PKT_DATA_INBAND, 767 VM_PKT_DATA_INBAND,
@@ -548,17 +770,24 @@ int netvsc_send(struct hv_device *device,
548 770
549 if (ret == 0) { 771 if (ret == 0) {
550 atomic_inc(&net_device->num_outstanding_sends); 772 atomic_inc(&net_device->num_outstanding_sends);
551 if (hv_ringbuf_avail_percent(&device->channel->outbound) < 773 atomic_inc(&net_device->queue_sends[packet->q_idx]);
774
775 if (hv_ringbuf_avail_percent(&out_channel->outbound) <
552 RING_AVAIL_PERCENT_LOWATER) { 776 RING_AVAIL_PERCENT_LOWATER) {
553 netif_stop_queue(ndev); 777 netif_tx_stop_queue(netdev_get_tx_queue(
778 ndev, packet->q_idx));
779
554 if (atomic_read(&net_device-> 780 if (atomic_read(&net_device->
555 num_outstanding_sends) < 1) 781 queue_sends[packet->q_idx]) < 1)
556 netif_wake_queue(ndev); 782 netif_tx_wake_queue(netdev_get_tx_queue(
783 ndev, packet->q_idx));
557 } 784 }
558 } else if (ret == -EAGAIN) { 785 } else if (ret == -EAGAIN) {
559 netif_stop_queue(ndev); 786 netif_tx_stop_queue(netdev_get_tx_queue(
560 if (atomic_read(&net_device->num_outstanding_sends) < 1) { 787 ndev, packet->q_idx));
561 netif_wake_queue(ndev); 788 if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) {
789 netif_tx_wake_queue(netdev_get_tx_queue(
790 ndev, packet->q_idx));
562 ret = -ENOSPC; 791 ret = -ENOSPC;
563 } 792 }
564 } else { 793 } else {
@@ -570,6 +799,7 @@ int netvsc_send(struct hv_device *device,
570} 799}
571 800
572static void netvsc_send_recv_completion(struct hv_device *device, 801static void netvsc_send_recv_completion(struct hv_device *device,
802 struct vmbus_channel *channel,
573 struct netvsc_device *net_device, 803 struct netvsc_device *net_device,
574 u64 transaction_id, u32 status) 804 u64 transaction_id, u32 status)
575{ 805{
@@ -587,7 +817,7 @@ static void netvsc_send_recv_completion(struct hv_device *device,
587 817
588retry_send_cmplt: 818retry_send_cmplt:
589 /* Send the completion */ 819 /* Send the completion */
590 ret = vmbus_sendpacket(device->channel, &recvcompMessage, 820 ret = vmbus_sendpacket(channel, &recvcompMessage,
591 sizeof(struct nvsp_message), transaction_id, 821 sizeof(struct nvsp_message), transaction_id,
592 VM_PKT_COMP, 0); 822 VM_PKT_COMP, 0);
593 if (ret == 0) { 823 if (ret == 0) {
@@ -613,76 +843,20 @@ retry_send_cmplt:
613 } 843 }
614} 844}
615 845
616/* Send a receive completion packet to RNDIS device (ie NetVsp) */
617static void netvsc_receive_completion(void *context)
618{
619 struct hv_netvsc_packet *packet = context;
620 struct hv_device *device = packet->device;
621 struct netvsc_device *net_device;
622 u64 transaction_id = 0;
623 bool fsend_receive_comp = false;
624 unsigned long flags;
625 struct net_device *ndev;
626 u32 status = NVSP_STAT_NONE;
627
628 /*
629 * Even though it seems logical to do a GetOutboundNetDevice() here to
630 * send out receive completion, we are using GetInboundNetDevice()
631 * since we may have disable outbound traffic already.
632 */
633 net_device = get_inbound_net_device(device);
634 if (!net_device)
635 return;
636 ndev = net_device->ndev;
637
638 /* Overloading use of the lock. */
639 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
640
641 if (packet->status != NVSP_STAT_SUCCESS)
642 packet->xfer_page_pkt->status = NVSP_STAT_FAIL;
643
644 packet->xfer_page_pkt->count--;
645
646 /*
647 * Last one in the line that represent 1 xfer page packet.
648 * Return the xfer page packet itself to the freelist
649 */
650 if (packet->xfer_page_pkt->count == 0) {
651 fsend_receive_comp = true;
652 transaction_id = packet->completion.recv.recv_completion_tid;
653 status = packet->xfer_page_pkt->status;
654 list_add_tail(&packet->xfer_page_pkt->list_ent,
655 &net_device->recv_pkt_list);
656
657 }
658
659 /* Put the packet back */
660 list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
661 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
662
663 /* Send a receive completion for the xfer page packet */
664 if (fsend_receive_comp)
665 netvsc_send_recv_completion(device, net_device, transaction_id,
666 status);
667
668}
669
670static void netvsc_receive(struct netvsc_device *net_device, 846static void netvsc_receive(struct netvsc_device *net_device,
847 struct vmbus_channel *channel,
671 struct hv_device *device, 848 struct hv_device *device,
672 struct vmpacket_descriptor *packet) 849 struct vmpacket_descriptor *packet)
673{ 850{
674 struct vmtransfer_page_packet_header *vmxferpage_packet; 851 struct vmtransfer_page_packet_header *vmxferpage_packet;
675 struct nvsp_message *nvsp_packet; 852 struct nvsp_message *nvsp_packet;
676 struct hv_netvsc_packet *netvsc_packet = NULL; 853 struct hv_netvsc_packet nv_pkt;
677 /* struct netvsc_driver *netvscDriver; */ 854 struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
678 struct xferpage_packet *xferpage_packet = NULL; 855 u32 status = NVSP_STAT_SUCCESS;
679 int i; 856 int i;
680 int count = 0; 857 int count = 0;
681 unsigned long flags;
682 struct net_device *ndev; 858 struct net_device *ndev;
683 859
684 LIST_HEAD(listHead);
685
686 ndev = net_device->ndev; 860 ndev = net_device->ndev;
687 861
688 /* 862 /*
@@ -715,77 +889,14 @@ static void netvsc_receive(struct netvsc_device *net_device,
715 return; 889 return;
716 } 890 }
717 891
718 /* 892 count = vmxferpage_packet->range_cnt;
719 * Grab free packets (range count + 1) to represent this xfer 893 netvsc_packet->device = device;
720 * page packet. +1 to represent the xfer page packet itself. 894 netvsc_packet->channel = channel;
721 * We grab it here so that we know exactly how many we can
722 * fulfil
723 */
724 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
725 while (!list_empty(&net_device->recv_pkt_list)) {
726 list_move_tail(net_device->recv_pkt_list.next, &listHead);
727 if (++count == vmxferpage_packet->range_cnt + 1)
728 break;
729 }
730 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
731
732 /*
733 * We need at least 2 netvsc pkts (1 to represent the xfer
734 * page and at least 1 for the range) i.e. we can handled
735 * some of the xfer page packet ranges...
736 */
737 if (count < 2) {
738 netdev_err(ndev, "Got only %d netvsc pkt...needed "
739 "%d pkts. Dropping this xfer page packet completely!\n",
740 count, vmxferpage_packet->range_cnt + 1);
741
742 /* Return it to the freelist */
743 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
744 for (i = count; i != 0; i--) {
745 list_move_tail(listHead.next,
746 &net_device->recv_pkt_list);
747 }
748 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
749 flags);
750
751 netvsc_send_recv_completion(device, net_device,
752 vmxferpage_packet->d.trans_id,
753 NVSP_STAT_FAIL);
754
755 return;
756 }
757
758 /* Remove the 1st packet to represent the xfer page packet itself */
759 xferpage_packet = (struct xferpage_packet *)listHead.next;
760 list_del(&xferpage_packet->list_ent);
761 xferpage_packet->status = NVSP_STAT_SUCCESS;
762
763 /* This is how much we can satisfy */
764 xferpage_packet->count = count - 1;
765
766 if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
767 netdev_err(ndev, "Needed %d netvsc pkts to satisfy "
768 "this xfer page...got %d\n",
769 vmxferpage_packet->range_cnt, xferpage_packet->count);
770 }
771 895
772 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ 896 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
773 for (i = 0; i < (count - 1); i++) { 897 for (i = 0; i < count; i++) {
774 netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
775 list_del(&netvsc_packet->list_ent);
776
777 /* Initialize the netvsc packet */ 898 /* Initialize the netvsc packet */
778 netvsc_packet->status = NVSP_STAT_SUCCESS; 899 netvsc_packet->status = NVSP_STAT_SUCCESS;
779 netvsc_packet->xfer_page_pkt = xferpage_packet;
780 netvsc_packet->completion.recv.recv_completion =
781 netvsc_receive_completion;
782 netvsc_packet->completion.recv.recv_completion_ctx =
783 netvsc_packet;
784 netvsc_packet->device = device;
785 /* Save this so that we can send it back */
786 netvsc_packet->completion.recv.recv_completion_tid =
787 vmxferpage_packet->d.trans_id;
788
789 netvsc_packet->data = (void *)((unsigned long)net_device-> 900 netvsc_packet->data = (void *)((unsigned long)net_device->
790 recv_buf + vmxferpage_packet->ranges[i].byte_offset); 901 recv_buf + vmxferpage_packet->ranges[i].byte_offset);
791 netvsc_packet->total_data_buflen = 902 netvsc_packet->total_data_buflen =
@@ -794,16 +905,53 @@ static void netvsc_receive(struct netvsc_device *net_device,
794 /* Pass it to the upper layer */ 905 /* Pass it to the upper layer */
795 rndis_filter_receive(device, netvsc_packet); 906 rndis_filter_receive(device, netvsc_packet);
796 907
797 netvsc_receive_completion(netvsc_packet-> 908 if (netvsc_packet->status != NVSP_STAT_SUCCESS)
798 completion.recv.recv_completion_ctx); 909 status = NVSP_STAT_FAIL;
910 }
911
912 netvsc_send_recv_completion(device, channel, net_device,
913 vmxferpage_packet->d.trans_id, status);
914}
915
916
917static void netvsc_send_table(struct hv_device *hdev,
918 struct vmpacket_descriptor *vmpkt)
919{
920 struct netvsc_device *nvscdev;
921 struct net_device *ndev;
922 struct nvsp_message *nvmsg;
923 int i;
924 u32 count, *tab;
925
926 nvscdev = get_outbound_net_device(hdev);
927 if (!nvscdev)
928 return;
929 ndev = nvscdev->ndev;
930
931 nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
932 (vmpkt->offset8 << 3));
933
934 if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
935 return;
936
937 count = nvmsg->msg.v5_msg.send_table.count;
938 if (count != VRSS_SEND_TAB_SIZE) {
939 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
940 return;
799 } 941 }
800 942
943 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
944 nvmsg->msg.v5_msg.send_table.offset);
945
946 for (i = 0; i < count; i++)
947 nvscdev->send_table[i] = tab[i];
801} 948}
802 949
803static void netvsc_channel_cb(void *context) 950void netvsc_channel_cb(void *context)
804{ 951{
805 int ret; 952 int ret;
806 struct hv_device *device = context; 953 struct vmbus_channel *channel = (struct vmbus_channel *)context;
954 struct hv_device *device;
807 struct netvsc_device *net_device; 955 struct netvsc_device *net_device;
808 u32 bytes_recvd; 956 u32 bytes_recvd;
809 u64 request_id; 957 u64 request_id;
@@ -812,14 +960,19 @@ static void netvsc_channel_cb(void *context)
812 int bufferlen = NETVSC_PACKET_SIZE; 960 int bufferlen = NETVSC_PACKET_SIZE;
813 struct net_device *ndev; 961 struct net_device *ndev;
814 962
963 if (channel->primary_channel != NULL)
964 device = channel->primary_channel->device_obj;
965 else
966 device = channel->device_obj;
967
815 net_device = get_inbound_net_device(device); 968 net_device = get_inbound_net_device(device);
816 if (!net_device) 969 if (!net_device)
817 return; 970 return;
818 ndev = net_device->ndev; 971 ndev = net_device->ndev;
819 buffer = net_device->cb_buffer; 972 buffer = get_per_channel_state(channel);
820 973
821 do { 974 do {
822 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen, 975 ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
823 &bytes_recvd, &request_id); 976 &bytes_recvd, &request_id);
824 if (ret == 0) { 977 if (ret == 0) {
825 if (bytes_recvd > 0) { 978 if (bytes_recvd > 0) {
@@ -831,8 +984,12 @@ static void netvsc_channel_cb(void *context)
831 break; 984 break;
832 985
833 case VM_PKT_DATA_USING_XFER_PAGES: 986 case VM_PKT_DATA_USING_XFER_PAGES:
834 netvsc_receive(net_device, 987 netvsc_receive(net_device, channel,
835 device, desc); 988 device, desc);
989 break;
990
991 case VM_PKT_DATA_INBAND:
992 netvsc_send_table(device, desc);
836 break; 993 break;
837 994
838 default: 995 default:
@@ -880,11 +1037,9 @@ static void netvsc_channel_cb(void *context)
880int netvsc_device_add(struct hv_device *device, void *additional_info) 1037int netvsc_device_add(struct hv_device *device, void *additional_info)
881{ 1038{
882 int ret = 0; 1039 int ret = 0;
883 int i;
884 int ring_size = 1040 int ring_size =
885 ((struct netvsc_device_info *)additional_info)->ring_size; 1041 ((struct netvsc_device_info *)additional_info)->ring_size;
886 struct netvsc_device *net_device; 1042 struct netvsc_device *net_device;
887 struct hv_netvsc_packet *packet, *pos;
888 struct net_device *ndev; 1043 struct net_device *ndev;
889 1044
890 net_device = alloc_net_device(device); 1045 net_device = alloc_net_device(device);
@@ -893,6 +1048,8 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
893 goto cleanup; 1048 goto cleanup;
894 } 1049 }
895 1050
1051 net_device->ring_size = ring_size;
1052
896 /* 1053 /*
897 * Coming into this function, struct net_device * is 1054 * Coming into this function, struct net_device * is
898 * registered as the driver private data. 1055 * registered as the driver private data.
@@ -903,24 +1060,14 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
903 ndev = net_device->ndev; 1060 ndev = net_device->ndev;
904 1061
905 /* Initialize the NetVSC channel extension */ 1062 /* Initialize the NetVSC channel extension */
906 spin_lock_init(&net_device->recv_pkt_list_lock);
907
908 INIT_LIST_HEAD(&net_device->recv_pkt_list);
909
910 for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
911 packet = kzalloc(sizeof(struct hv_netvsc_packet), GFP_KERNEL);
912 if (!packet)
913 break;
914
915 list_add_tail(&packet->list_ent,
916 &net_device->recv_pkt_list);
917 }
918 init_completion(&net_device->channel_init_wait); 1063 init_completion(&net_device->channel_init_wait);
919 1064
1065 set_per_channel_state(device->channel, net_device->cb_buffer);
1066
920 /* Open the channel */ 1067 /* Open the channel */
921 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE, 1068 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
922 ring_size * PAGE_SIZE, NULL, 0, 1069 ring_size * PAGE_SIZE, NULL, 0,
923 netvsc_channel_cb, device); 1070 netvsc_channel_cb, device->channel);
924 1071
925 if (ret != 0) { 1072 if (ret != 0) {
926 netdev_err(ndev, "unable to open channel: %d\n", ret); 1073 netdev_err(ndev, "unable to open channel: %d\n", ret);
@@ -930,6 +1077,8 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
930 /* Channel is opened */ 1077 /* Channel is opened */
931 pr_info("hv_netvsc channel opened successfully\n"); 1078 pr_info("hv_netvsc channel opened successfully\n");
932 1079
1080 net_device->chn_table[0] = device->channel;
1081
933 /* Connect with the NetVsp */ 1082 /* Connect with the NetVsp */
934 ret = netvsc_connect_vsp(device); 1083 ret = netvsc_connect_vsp(device);
935 if (ret != 0) { 1084 if (ret != 0) {
@@ -946,16 +1095,8 @@ close:
946 1095
947cleanup: 1096cleanup:
948 1097
949 if (net_device) { 1098 if (net_device)
950 list_for_each_entry_safe(packet, pos,
951 &net_device->recv_pkt_list,
952 list_ent) {
953 list_del(&packet->list_ent);
954 kfree(packet);
955 }
956
957 kfree(net_device); 1099 kfree(net_device);
958 }
959 1100
960 return ret; 1101 return ret;
961} 1102}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7918d5132c1f..4fd71b75e666 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -101,7 +101,7 @@ static int netvsc_open(struct net_device *net)
101 return ret; 101 return ret;
102 } 102 }
103 103
104 netif_start_queue(net); 104 netif_tx_start_all_queues(net);
105 105
106 nvdev = hv_get_drvdata(device_obj); 106 nvdev = hv_get_drvdata(device_obj);
107 rdev = nvdev->extension; 107 rdev = nvdev->extension;
@@ -149,15 +149,100 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
149 return ppi; 149 return ppi;
150} 150}
151 151
152union sub_key {
153 u64 k;
154 struct {
155 u8 pad[3];
156 u8 kb;
157 u32 ka;
158 };
159};
160
161/* Toeplitz hash function
162 * data: network byte order
163 * return: host byte order
164 */
165static u32 comp_hash(u8 *key, int klen, u8 *data, int dlen)
166{
167 union sub_key subk;
168 int k_next = 4;
169 u8 dt;
170 int i, j;
171 u32 ret = 0;
172
173 subk.k = 0;
174 subk.ka = ntohl(*(u32 *)key);
175
176 for (i = 0; i < dlen; i++) {
177 subk.kb = key[k_next];
178 k_next = (k_next + 1) % klen;
179 dt = data[i];
180 for (j = 0; j < 8; j++) {
181 if (dt & 0x80)
182 ret ^= subk.ka;
183 dt <<= 1;
184 subk.k <<= 1;
185 }
186 }
187
188 return ret;
189}
190
191static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
192{
193 struct iphdr *iphdr;
194 int data_len;
195 bool ret = false;
196
197 if (eth_hdr(skb)->h_proto != htons(ETH_P_IP))
198 return false;
199
200 iphdr = ip_hdr(skb);
201
202 if (iphdr->version == 4) {
203 if (iphdr->protocol == IPPROTO_TCP)
204 data_len = 12;
205 else
206 data_len = 8;
207 *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN,
208 (u8 *)&iphdr->saddr, data_len);
209 ret = true;
210 }
211
212 return ret;
213}
214
215static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
216 void *accel_priv, select_queue_fallback_t fallback)
217{
218 struct net_device_context *net_device_ctx = netdev_priv(ndev);
219 struct hv_device *hdev = net_device_ctx->device_ctx;
220 struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
221 u32 hash;
222 u16 q_idx = 0;
223
224 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
225 return 0;
226
227 if (netvsc_set_hash(&hash, skb)) {
228 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
229 ndev->real_num_tx_queues;
230 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
231 }
232
233 return q_idx;
234}
235
152static void netvsc_xmit_completion(void *context) 236static void netvsc_xmit_completion(void *context)
153{ 237{
154 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context; 238 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
155 struct sk_buff *skb = (struct sk_buff *) 239 struct sk_buff *skb = (struct sk_buff *)
156 (unsigned long)packet->completion.send.send_completion_tid; 240 (unsigned long)packet->send_completion_tid;
241 u32 index = packet->send_buf_index;
157 242
158 kfree(packet); 243 kfree(packet);
159 244
160 if (skb) 245 if (skb && (index == NETVSC_INVALID_INDEX))
161 dev_kfree_skb_any(skb); 246 dev_kfree_skb_any(skb);
162} 247}
163 248
@@ -301,6 +386,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
301 struct ndis_tcp_lso_info *lso_info; 386 struct ndis_tcp_lso_info *lso_info;
302 int hdr_offset; 387 int hdr_offset;
303 u32 net_trans_info; 388 u32 net_trans_info;
389 u32 hash;
304 390
305 391
306 /* We will atmost need two pages to describe the rndis 392 /* We will atmost need two pages to describe the rndis
@@ -319,9 +405,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
319 packet = kzalloc(sizeof(struct hv_netvsc_packet) + 405 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
320 (num_data_pgs * sizeof(struct hv_page_buffer)) + 406 (num_data_pgs * sizeof(struct hv_page_buffer)) +
321 sizeof(struct rndis_message) + 407 sizeof(struct rndis_message) +
322 NDIS_VLAN_PPI_SIZE + 408 NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
323 NDIS_CSUM_PPI_SIZE + 409 NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE, GFP_ATOMIC);
324 NDIS_LSO_PPI_SIZE, GFP_ATOMIC);
325 if (!packet) { 410 if (!packet) {
326 /* out of memory, drop packet */ 411 /* out of memory, drop packet */
327 netdev_err(net, "unable to allocate hv_netvsc_packet\n"); 412 netdev_err(net, "unable to allocate hv_netvsc_packet\n");
@@ -333,6 +418,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
333 418
334 packet->vlan_tci = skb->vlan_tci; 419 packet->vlan_tci = skb->vlan_tci;
335 420
421 packet->q_idx = skb_get_queue_mapping(skb);
422
336 packet->is_data_pkt = true; 423 packet->is_data_pkt = true;
337 packet->total_data_buflen = skb->len; 424 packet->total_data_buflen = skb->len;
338 425
@@ -341,9 +428,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
341 (num_data_pgs * sizeof(struct hv_page_buffer))); 428 (num_data_pgs * sizeof(struct hv_page_buffer)));
342 429
343 /* Set the completion routine */ 430 /* Set the completion routine */
344 packet->completion.send.send_completion = netvsc_xmit_completion; 431 packet->send_completion = netvsc_xmit_completion;
345 packet->completion.send.send_completion_ctx = packet; 432 packet->send_completion_ctx = packet;
346 packet->completion.send.send_completion_tid = (unsigned long)skb; 433 packet->send_completion_tid = (unsigned long)skb;
347 434
348 isvlan = packet->vlan_tci & VLAN_TAG_PRESENT; 435 isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
349 436
@@ -358,6 +445,14 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
358 445
359 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); 446 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
360 447
448 hash = skb_get_hash_raw(skb);
449 if (hash != 0 && net->real_num_tx_queues > 1) {
450 rndis_msg_size += NDIS_HASH_PPI_SIZE;
451 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
452 NBL_HASH_VALUE);
453 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
454 }
455
361 if (isvlan) { 456 if (isvlan) {
362 struct ndis_pkt_8021q_info *vlan; 457 struct ndis_pkt_8021q_info *vlan;
363 458
@@ -558,6 +653,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
558 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 653 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
559 packet->vlan_tci); 654 packet->vlan_tci);
560 655
656 skb_record_rx_queue(skb, packet->channel->
657 offermsg.offer.sub_channel_index);
658
561 net->stats.rx_packets++; 659 net->stats.rx_packets++;
562 net->stats.rx_bytes += packet->total_data_buflen; 660 net->stats.rx_bytes += packet->total_data_buflen;
563 661
@@ -606,7 +704,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
606 hv_set_drvdata(hdev, ndev); 704 hv_set_drvdata(hdev, ndev);
607 device_info.ring_size = ring_size; 705 device_info.ring_size = ring_size;
608 rndis_filter_device_add(hdev, &device_info); 706 rndis_filter_device_add(hdev, &device_info);
609 netif_wake_queue(ndev); 707 netif_tx_wake_all_queues(ndev);
610 708
611 return 0; 709 return 0;
612} 710}
@@ -652,6 +750,7 @@ static const struct net_device_ops device_ops = {
652 .ndo_change_mtu = netvsc_change_mtu, 750 .ndo_change_mtu = netvsc_change_mtu,
653 .ndo_validate_addr = eth_validate_addr, 751 .ndo_validate_addr = eth_validate_addr,
654 .ndo_set_mac_address = netvsc_set_mac_addr, 752 .ndo_set_mac_address = netvsc_set_mac_addr,
753 .ndo_select_queue = netvsc_select_queue,
655}; 754};
656 755
657/* 756/*
@@ -698,9 +797,11 @@ static int netvsc_probe(struct hv_device *dev,
698 struct net_device *net = NULL; 797 struct net_device *net = NULL;
699 struct net_device_context *net_device_ctx; 798 struct net_device_context *net_device_ctx;
700 struct netvsc_device_info device_info; 799 struct netvsc_device_info device_info;
800 struct netvsc_device *nvdev;
701 int ret; 801 int ret;
702 802
703 net = alloc_etherdev(sizeof(struct net_device_context)); 803 net = alloc_etherdev_mq(sizeof(struct net_device_context),
804 num_online_cpus());
704 if (!net) 805 if (!net)
705 return -ENOMEM; 806 return -ENOMEM;
706 807
@@ -719,7 +820,7 @@ static int netvsc_probe(struct hv_device *dev,
719 net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM | 820 net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
720 NETIF_F_IP_CSUM | NETIF_F_TSO; 821 NETIF_F_IP_CSUM | NETIF_F_TSO;
721 822
722 SET_ETHTOOL_OPS(net, &ethtool_ops); 823 net->ethtool_ops = &ethtool_ops;
723 SET_NETDEV_DEV(net, &dev->device); 824 SET_NETDEV_DEV(net, &dev->device);
724 825
725 /* Notify the netvsc driver of the new device */ 826 /* Notify the netvsc driver of the new device */
@@ -733,6 +834,10 @@ static int netvsc_probe(struct hv_device *dev,
733 } 834 }
734 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 835 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
735 836
837 nvdev = hv_get_drvdata(dev);
838 netif_set_real_num_tx_queues(net, nvdev->num_chn);
839 netif_set_real_num_rx_queues(net, nvdev->num_chn);
840
736 ret = register_netdev(net); 841 ret = register_netdev(net);
737 if (ret != 0) { 842 if (ret != 0) {
738 pr_err("Unable to register netdev.\n"); 843 pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 143a98caf618..99c527adae5b 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -31,7 +31,7 @@
31#include "hyperv_net.h" 31#include "hyperv_net.h"
32 32
33 33
34#define RNDIS_EXT_LEN 100 34#define RNDIS_EXT_LEN PAGE_SIZE
35struct rndis_request { 35struct rndis_request {
36 struct list_head list_ent; 36 struct list_head list_ent;
37 struct completion wait_event; 37 struct completion wait_event;
@@ -94,6 +94,8 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
94 rndis_msg->ndis_msg_type = msg_type; 94 rndis_msg->ndis_msg_type = msg_type;
95 rndis_msg->msg_len = msg_len; 95 rndis_msg->msg_len = msg_len;
96 96
97 request->pkt.q_idx = 0;
98
97 /* 99 /*
98 * Set the request id. This field is always after the rndis header for 100 * Set the request id. This field is always after the rndis header for
99 * request/response packet types so we just used the SetRequest as a 101 * request/response packet types so we just used the SetRequest as a
@@ -234,7 +236,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
234 packet->page_buf[0].len; 236 packet->page_buf[0].len;
235 } 237 }
236 238
237 packet->completion.send.send_completion = NULL; 239 packet->send_completion = NULL;
238 240
239 ret = netvsc_send(dev->net_dev->dev, packet); 241 ret = netvsc_send(dev->net_dev->dev, packet);
240 return ret; 242 return ret;
@@ -399,8 +401,6 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
399 pkt->total_data_buflen = rndis_pkt->data_len; 401 pkt->total_data_buflen = rndis_pkt->data_len;
400 pkt->data = (void *)((unsigned long)pkt->data + data_offset); 402 pkt->data = (void *)((unsigned long)pkt->data + data_offset);
401 403
402 pkt->is_data_pkt = true;
403
404 vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO); 404 vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
405 if (vlan) { 405 if (vlan) {
406 pkt->vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid | 406 pkt->vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid |
@@ -509,6 +509,19 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
509 query->info_buflen = 0; 509 query->info_buflen = 0;
510 query->dev_vc_handle = 0; 510 query->dev_vc_handle = 0;
511 511
512 if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
513 struct ndis_recv_scale_cap *cap;
514
515 request->request_msg.msg_len +=
516 sizeof(struct ndis_recv_scale_cap);
517 query->info_buflen = sizeof(struct ndis_recv_scale_cap);
518 cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
519 query->info_buf_offset);
520 cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
521 cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
522 cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
523 }
524
512 ret = rndis_filter_send_request(dev, request); 525 ret = rndis_filter_send_request(dev, request);
513 if (ret != 0) 526 if (ret != 0)
514 goto cleanup; 527 goto cleanup;
@@ -695,6 +708,89 @@ cleanup:
695 return ret; 708 return ret;
696} 709}
697 710
711u8 netvsc_hash_key[HASH_KEYLEN] = {
712 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
713 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
714 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
715 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
716 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
717};
718
719int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
720{
721 struct net_device *ndev = rdev->net_dev->ndev;
722 struct rndis_request *request;
723 struct rndis_set_request *set;
724 struct rndis_set_complete *set_complete;
725 u32 extlen = sizeof(struct ndis_recv_scale_param) +
726 4*ITAB_NUM + HASH_KEYLEN;
727 struct ndis_recv_scale_param *rssp;
728 u32 *itab;
729 u8 *keyp;
730 int i, t, ret;
731
732 request = get_rndis_request(
733 rdev, RNDIS_MSG_SET,
734 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
735 if (!request)
736 return -ENOMEM;
737
738 set = &request->request_msg.msg.set_req;
739 set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
740 set->info_buflen = extlen;
741 set->info_buf_offset = sizeof(struct rndis_set_request);
742 set->dev_vc_handle = 0;
743
744 rssp = (struct ndis_recv_scale_param *)(set + 1);
745 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
746 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
747 rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
748 rssp->flag = 0;
749 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
750 NDIS_HASH_TCP_IPV4;
751 rssp->indirect_tabsize = 4*ITAB_NUM;
752 rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
753 rssp->hashkey_size = HASH_KEYLEN;
754 rssp->kashkey_offset = rssp->indirect_taboffset +
755 rssp->indirect_tabsize;
756
757 /* Set indirection table entries */
758 itab = (u32 *)(rssp + 1);
759 for (i = 0; i < ITAB_NUM; i++)
760 itab[i] = i % num_queue;
761
762 /* Set hask key values */
763 keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
764 for (i = 0; i < HASH_KEYLEN; i++)
765 keyp[i] = netvsc_hash_key[i];
766
767
768 ret = rndis_filter_send_request(rdev, request);
769 if (ret != 0)
770 goto cleanup;
771
772 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
773 if (t == 0) {
774 netdev_err(ndev, "timeout before we got a set response...\n");
775 /* can't put_rndis_request, since we may still receive a
776 * send-completion.
777 */
778 return -ETIMEDOUT;
779 } else {
780 set_complete = &request->response_msg.msg.set_complete;
781 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
782 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
783 set_complete->status);
784 ret = -EINVAL;
785 }
786 }
787
788cleanup:
789 put_rndis_request(rdev, request);
790 return ret;
791}
792
793
698static int rndis_filter_query_device_link_status(struct rndis_device *dev) 794static int rndis_filter_query_device_link_status(struct rndis_device *dev)
699{ 795{
700 u32 size = sizeof(u32); 796 u32 size = sizeof(u32);
@@ -886,6 +982,28 @@ static int rndis_filter_close_device(struct rndis_device *dev)
886 return ret; 982 return ret;
887} 983}
888 984
985static void netvsc_sc_open(struct vmbus_channel *new_sc)
986{
987 struct netvsc_device *nvscdev;
988 u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
989 int ret;
990
991 nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
992
993 if (chn_index >= nvscdev->num_chn)
994 return;
995
996 set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) *
997 NETVSC_PACKET_SIZE);
998
999 ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
1000 nvscdev->ring_size * PAGE_SIZE, NULL, 0,
1001 netvsc_channel_cb, new_sc);
1002
1003 if (ret == 0)
1004 nvscdev->chn_table[chn_index] = new_sc;
1005}
1006
889int rndis_filter_device_add(struct hv_device *dev, 1007int rndis_filter_device_add(struct hv_device *dev,
890 void *additional_info) 1008 void *additional_info)
891{ 1009{
@@ -894,6 +1012,10 @@ int rndis_filter_device_add(struct hv_device *dev,
894 struct rndis_device *rndis_device; 1012 struct rndis_device *rndis_device;
895 struct netvsc_device_info *device_info = additional_info; 1013 struct netvsc_device_info *device_info = additional_info;
896 struct ndis_offload_params offloads; 1014 struct ndis_offload_params offloads;
1015 struct nvsp_message *init_packet;
1016 int t;
1017 struct ndis_recv_scale_cap rsscap;
1018 u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
897 1019
898 rndis_device = get_rndis_device(); 1020 rndis_device = get_rndis_device();
899 if (!rndis_device) 1021 if (!rndis_device)
@@ -913,6 +1035,7 @@ int rndis_filter_device_add(struct hv_device *dev,
913 1035
914 /* Initialize the rndis device */ 1036 /* Initialize the rndis device */
915 net_device = hv_get_drvdata(dev); 1037 net_device = hv_get_drvdata(dev);
1038 net_device->num_chn = 1;
916 1039
917 net_device->extension = rndis_device; 1040 net_device->extension = rndis_device;
918 rndis_device->net_dev = net_device; 1041 rndis_device->net_dev = net_device;
@@ -952,7 +1075,6 @@ int rndis_filter_device_add(struct hv_device *dev,
952 if (ret) 1075 if (ret)
953 goto err_dev_remv; 1076 goto err_dev_remv;
954 1077
955
956 rndis_filter_query_device_link_status(rndis_device); 1078 rndis_filter_query_device_link_status(rndis_device);
957 1079
958 device_info->link_state = rndis_device->link_state; 1080 device_info->link_state = rndis_device->link_state;
@@ -961,7 +1083,66 @@ int rndis_filter_device_add(struct hv_device *dev,
961 rndis_device->hw_mac_adr, 1083 rndis_device->hw_mac_adr,
962 device_info->link_state ? "down" : "up"); 1084 device_info->link_state ? "down" : "up");
963 1085
964 return ret; 1086 if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1087 return 0;
1088
1089 /* vRSS setup */
1090 memset(&rsscap, 0, rsscap_size);
1091 ret = rndis_filter_query_device(rndis_device,
1092 OID_GEN_RECEIVE_SCALE_CAPABILITIES,
1093 &rsscap, &rsscap_size);
1094 if (ret || rsscap.num_recv_que < 2)
1095 goto out;
1096
1097 net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ?
1098 num_online_cpus() : rsscap.num_recv_que;
1099 if (net_device->num_chn == 1)
1100 goto out;
1101
1102 net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) *
1103 NETVSC_PACKET_SIZE);
1104 if (!net_device->sub_cb_buf) {
1105 net_device->num_chn = 1;
1106 dev_info(&dev->device, "No memory for subchannels.\n");
1107 goto out;
1108 }
1109
1110 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1111
1112 init_packet = &net_device->channel_init_pkt;
1113 memset(init_packet, 0, sizeof(struct nvsp_message));
1114 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
1115 init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
1116 init_packet->msg.v5_msg.subchn_req.num_subchannels =
1117 net_device->num_chn - 1;
1118 ret = vmbus_sendpacket(dev->channel, init_packet,
1119 sizeof(struct nvsp_message),
1120 (unsigned long)init_packet,
1121 VM_PKT_DATA_INBAND,
1122 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1123 if (ret)
1124 goto out;
1125 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
1126 if (t == 0) {
1127 ret = -ETIMEDOUT;
1128 goto out;
1129 }
1130 if (init_packet->msg.v5_msg.subchn_comp.status !=
1131 NVSP_STAT_SUCCESS) {
1132 ret = -ENODEV;
1133 goto out;
1134 }
1135 net_device->num_chn = 1 +
1136 init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1137
1138 vmbus_are_subchannels_present(dev->channel);
1139
1140 ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
1141
1142out:
1143 if (ret)
1144 net_device->num_chn = 1;
1145 return 0; /* return 0 because primary channel can be used alone */
965 1146
966err_dev_remv: 1147err_dev_remv:
967 rndis_filter_device_remove(dev); 1148 rndis_filter_device_remove(dev);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index e36f194673a4..4517b149ed07 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -23,6 +23,7 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/irq.h>
26#include <linux/gpio.h> 27#include <linux/gpio.h>
27#include <linux/delay.h> 28#include <linux/delay.h>
28#include <linux/mutex.h> 29#include <linux/mutex.h>
@@ -692,10 +693,7 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
692 if (rc < 0) 693 if (rc < 0)
693 goto err_rx; 694 goto err_rx;
694 695
695 rc = at86rf230_start(dev); 696 return at86rf230_start(dev);
696
697 return rc;
698
699err_rx: 697err_rx:
700 at86rf230_start(dev); 698 at86rf230_start(dev);
701err: 699err:
@@ -963,33 +961,24 @@ static irqreturn_t at86rf230_isr_level(int irq, void *data)
963 return at86rf230_isr(irq, data); 961 return at86rf230_isr(irq, data);
964} 962}
965 963
966static int at86rf230_irq_polarity(struct at86rf230_local *lp, int pol)
967{
968 return at86rf230_write_subreg(lp, SR_IRQ_POLARITY, pol);
969}
970
971static int at86rf230_hw_init(struct at86rf230_local *lp) 964static int at86rf230_hw_init(struct at86rf230_local *lp)
972{ 965{
973 struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data; 966 int rc, irq_pol, irq_type;
974 int rc, irq_pol; 967 u8 dvdd;
975 u8 status;
976 u8 csma_seed[2]; 968 u8 csma_seed[2];
977 969
978 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
979 if (rc)
980 return rc;
981
982 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF); 970 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
983 if (rc) 971 if (rc)
984 return rc; 972 return rc;
985 973
974 irq_type = irq_get_trigger_type(lp->spi->irq);
986 /* configure irq polarity, defaults to high active */ 975 /* configure irq polarity, defaults to high active */
987 if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW)) 976 if (irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
988 irq_pol = IRQ_ACTIVE_LOW; 977 irq_pol = IRQ_ACTIVE_LOW;
989 else 978 else
990 irq_pol = IRQ_ACTIVE_HIGH; 979 irq_pol = IRQ_ACTIVE_HIGH;
991 980
992 rc = at86rf230_irq_polarity(lp, irq_pol); 981 rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol);
993 if (rc) 982 if (rc)
994 return rc; 983 return rc;
995 984
@@ -1017,10 +1006,10 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
1017 /* Wait the next SLEEP cycle */ 1006 /* Wait the next SLEEP cycle */
1018 msleep(100); 1007 msleep(100);
1019 1008
1020 rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status); 1009 rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd);
1021 if (rc) 1010 if (rc)
1022 return rc; 1011 return rc;
1023 if (!status) { 1012 if (!dvdd) {
1024 dev_err(&lp->spi->dev, "DVDD error\n"); 1013 dev_err(&lp->spi->dev, "DVDD error\n");
1025 return -EINVAL; 1014 return -EINVAL;
1026 } 1015 }
@@ -1032,7 +1021,6 @@ static struct at86rf230_platform_data *
1032at86rf230_get_pdata(struct spi_device *spi) 1021at86rf230_get_pdata(struct spi_device *spi)
1033{ 1022{
1034 struct at86rf230_platform_data *pdata; 1023 struct at86rf230_platform_data *pdata;
1035 const char *irq_type;
1036 1024
1037 if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node) 1025 if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node)
1038 return spi->dev.platform_data; 1026 return spi->dev.platform_data;
@@ -1044,19 +1032,6 @@ at86rf230_get_pdata(struct spi_device *spi)
1044 pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0); 1032 pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
1045 pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0); 1033 pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
1046 1034
1047 pdata->irq_type = IRQF_TRIGGER_RISING;
1048 of_property_read_string(spi->dev.of_node, "irq-type", &irq_type);
1049 if (!strcmp(irq_type, "level-high"))
1050 pdata->irq_type = IRQF_TRIGGER_HIGH;
1051 else if (!strcmp(irq_type, "level-low"))
1052 pdata->irq_type = IRQF_TRIGGER_LOW;
1053 else if (!strcmp(irq_type, "edge-rising"))
1054 pdata->irq_type = IRQF_TRIGGER_RISING;
1055 else if (!strcmp(irq_type, "edge-falling"))
1056 pdata->irq_type = IRQF_TRIGGER_FALLING;
1057 else
1058 dev_warn(&spi->dev, "wrong irq-type specified using edge-rising\n");
1059
1060 spi->dev.platform_data = pdata; 1035 spi->dev.platform_data = pdata;
1061done: 1036done:
1062 return pdata; 1037 return pdata;
@@ -1071,7 +1046,7 @@ static int at86rf230_probe(struct spi_device *spi)
1071 u8 part = 0, version = 0, status; 1046 u8 part = 0, version = 0, status;
1072 irq_handler_t irq_handler; 1047 irq_handler_t irq_handler;
1073 work_func_t irq_worker; 1048 work_func_t irq_worker;
1074 int rc; 1049 int rc, irq_type;
1075 const char *chip; 1050 const char *chip;
1076 struct ieee802154_ops *ops = NULL; 1051 struct ieee802154_ops *ops = NULL;
1077 1052
@@ -1087,27 +1062,17 @@ static int at86rf230_probe(struct spi_device *spi)
1087 } 1062 }
1088 1063
1089 if (gpio_is_valid(pdata->rstn)) { 1064 if (gpio_is_valid(pdata->rstn)) {
1090 rc = gpio_request(pdata->rstn, "rstn"); 1065 rc = devm_gpio_request_one(&spi->dev, pdata->rstn,
1066 GPIOF_OUT_INIT_HIGH, "rstn");
1091 if (rc) 1067 if (rc)
1092 return rc; 1068 return rc;
1093 } 1069 }
1094 1070
1095 if (gpio_is_valid(pdata->slp_tr)) { 1071 if (gpio_is_valid(pdata->slp_tr)) {
1096 rc = gpio_request(pdata->slp_tr, "slp_tr"); 1072 rc = devm_gpio_request_one(&spi->dev, pdata->slp_tr,
1097 if (rc) 1073 GPIOF_OUT_INIT_LOW, "slp_tr");
1098 goto err_slp_tr;
1099 }
1100
1101 if (gpio_is_valid(pdata->rstn)) {
1102 rc = gpio_direction_output(pdata->rstn, 1);
1103 if (rc)
1104 goto err_gpio_dir;
1105 }
1106
1107 if (gpio_is_valid(pdata->slp_tr)) {
1108 rc = gpio_direction_output(pdata->slp_tr, 0);
1109 if (rc) 1074 if (rc)
1110 goto err_gpio_dir; 1075 return rc;
1111 } 1076 }
1112 1077
1113 /* Reset */ 1078 /* Reset */
@@ -1121,13 +1086,12 @@ static int at86rf230_probe(struct spi_device *spi)
1121 1086
1122 rc = __at86rf230_detect_device(spi, &man_id, &part, &version); 1087 rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
1123 if (rc < 0) 1088 if (rc < 0)
1124 goto err_gpio_dir; 1089 return rc;
1125 1090
1126 if (man_id != 0x001f) { 1091 if (man_id != 0x001f) {
1127 dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n", 1092 dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
1128 man_id >> 8, man_id & 0xFF); 1093 man_id >> 8, man_id & 0xFF);
1129 rc = -EINVAL; 1094 return -EINVAL;
1130 goto err_gpio_dir;
1131 } 1095 }
1132 1096
1133 switch (part) { 1097 switch (part) {
@@ -1154,16 +1118,12 @@ static int at86rf230_probe(struct spi_device *spi)
1154 } 1118 }
1155 1119
1156 dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version); 1120 dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
1157 if (!ops) { 1121 if (!ops)
1158 rc = -ENOTSUPP; 1122 return -ENOTSUPP;
1159 goto err_gpio_dir;
1160 }
1161 1123
1162 dev = ieee802154_alloc_device(sizeof(*lp), ops); 1124 dev = ieee802154_alloc_device(sizeof(*lp), ops);
1163 if (!dev) { 1125 if (!dev)
1164 rc = -ENOMEM; 1126 return -ENOMEM;
1165 goto err_gpio_dir;
1166 }
1167 1127
1168 lp = dev->priv; 1128 lp = dev->priv;
1169 lp->dev = dev; 1129 lp->dev = dev;
@@ -1176,7 +1136,8 @@ static int at86rf230_probe(struct spi_device *spi)
1176 dev->extra_tx_headroom = 0; 1136 dev->extra_tx_headroom = 0;
1177 dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK; 1137 dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
1178 1138
1179 if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { 1139 irq_type = irq_get_trigger_type(spi->irq);
1140 if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
1180 irq_worker = at86rf230_irqwork; 1141 irq_worker = at86rf230_irqwork;
1181 irq_handler = at86rf230_isr; 1142 irq_handler = at86rf230_isr;
1182 } else { 1143 } else {
@@ -1202,75 +1163,65 @@ static int at86rf230_probe(struct spi_device *spi)
1202 if (rc) 1163 if (rc)
1203 goto err_hw_init; 1164 goto err_hw_init;
1204 1165
1205 rc = request_irq(spi->irq, irq_handler, 1166 /* Read irq status register to reset irq line */
1206 IRQF_SHARED | pdata->irq_type, 1167 rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
1207 dev_name(&spi->dev), lp);
1208 if (rc) 1168 if (rc)
1209 goto err_hw_init; 1169 goto err_hw_init;
1210 1170
1211 /* Read irq status register to reset irq line */ 1171 rc = devm_request_irq(&spi->dev, spi->irq, irq_handler, IRQF_SHARED,
1212 rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status); 1172 dev_name(&spi->dev), lp);
1213 if (rc) 1173 if (rc)
1214 goto err_irq; 1174 goto err_hw_init;
1215 1175
1216 rc = ieee802154_register_device(lp->dev); 1176 rc = ieee802154_register_device(lp->dev);
1217 if (rc) 1177 if (rc)
1218 goto err_irq; 1178 goto err_hw_init;
1219 1179
1220 return rc; 1180 return rc;
1221 1181
1222err_irq:
1223 free_irq(spi->irq, lp);
1224err_hw_init: 1182err_hw_init:
1225 flush_work(&lp->irqwork); 1183 flush_work(&lp->irqwork);
1226 spi_set_drvdata(spi, NULL);
1227 mutex_destroy(&lp->bmux); 1184 mutex_destroy(&lp->bmux);
1228 ieee802154_free_device(lp->dev); 1185 ieee802154_free_device(lp->dev);
1229 1186
1230err_gpio_dir:
1231 if (gpio_is_valid(pdata->slp_tr))
1232 gpio_free(pdata->slp_tr);
1233err_slp_tr:
1234 if (gpio_is_valid(pdata->rstn))
1235 gpio_free(pdata->rstn);
1236 return rc; 1187 return rc;
1237} 1188}
1238 1189
1239static int at86rf230_remove(struct spi_device *spi) 1190static int at86rf230_remove(struct spi_device *spi)
1240{ 1191{
1241 struct at86rf230_local *lp = spi_get_drvdata(spi); 1192 struct at86rf230_local *lp = spi_get_drvdata(spi);
1242 struct at86rf230_platform_data *pdata = spi->dev.platform_data;
1243 1193
1244 /* mask all at86rf230 irq's */ 1194 /* mask all at86rf230 irq's */
1245 at86rf230_write_subreg(lp, SR_IRQ_MASK, 0); 1195 at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
1246 ieee802154_unregister_device(lp->dev); 1196 ieee802154_unregister_device(lp->dev);
1247
1248 free_irq(spi->irq, lp);
1249 flush_work(&lp->irqwork); 1197 flush_work(&lp->irqwork);
1250
1251 if (gpio_is_valid(pdata->slp_tr))
1252 gpio_free(pdata->slp_tr);
1253 if (gpio_is_valid(pdata->rstn))
1254 gpio_free(pdata->rstn);
1255
1256 mutex_destroy(&lp->bmux); 1198 mutex_destroy(&lp->bmux);
1257 ieee802154_free_device(lp->dev); 1199 ieee802154_free_device(lp->dev);
1258
1259 dev_dbg(&spi->dev, "unregistered at86rf230\n"); 1200 dev_dbg(&spi->dev, "unregistered at86rf230\n");
1201
1260 return 0; 1202 return 0;
1261} 1203}
1262 1204
1263#if IS_ENABLED(CONFIG_OF) 1205static const struct of_device_id at86rf230_of_match[] = {
1264static struct of_device_id at86rf230_of_match[] = {
1265 { .compatible = "atmel,at86rf230", }, 1206 { .compatible = "atmel,at86rf230", },
1266 { .compatible = "atmel,at86rf231", }, 1207 { .compatible = "atmel,at86rf231", },
1267 { .compatible = "atmel,at86rf233", }, 1208 { .compatible = "atmel,at86rf233", },
1268 { .compatible = "atmel,at86rf212", }, 1209 { .compatible = "atmel,at86rf212", },
1269 { }, 1210 { },
1270}; 1211};
1271#endif 1212MODULE_DEVICE_TABLE(of, at86rf230_of_match);
1213
1214static const struct spi_device_id at86rf230_device_id[] = {
1215 { .name = "at86rf230", },
1216 { .name = "at86rf231", },
1217 { .name = "at86rf233", },
1218 { .name = "at86rf212", },
1219 { },
1220};
1221MODULE_DEVICE_TABLE(spi, at86rf230_device_id);
1272 1222
1273static struct spi_driver at86rf230_driver = { 1223static struct spi_driver at86rf230_driver = {
1224 .id_table = at86rf230_device_id,
1274 .driver = { 1225 .driver = {
1275 .of_match_table = of_match_ptr(at86rf230_of_match), 1226 .of_match_table = of_match_ptr(at86rf230_of_match),
1276 .name = "at86rf230", 1227 .name = "at86rf230",
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index b8d22173925d..27d83207d24c 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -26,6 +26,7 @@
26#include <linux/timer.h> 26#include <linux/timer.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/device.h>
29#include <linux/spinlock.h> 30#include <linux/spinlock.h>
30#include <net/mac802154.h> 31#include <net/mac802154.h>
31#include <net/wpan-phy.h> 32#include <net/wpan-phy.h>
@@ -228,7 +229,8 @@ static int fakelb_probe(struct platform_device *pdev)
228 int err = -ENOMEM; 229 int err = -ENOMEM;
229 int i; 230 int i;
230 231
231 priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL); 232 priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv),
233 GFP_KERNEL);
232 if (!priv) 234 if (!priv)
233 goto err_alloc; 235 goto err_alloc;
234 236
@@ -248,7 +250,6 @@ static int fakelb_probe(struct platform_device *pdev)
248err_slave: 250err_slave:
249 list_for_each_entry(dp, &priv->list, list) 251 list_for_each_entry(dp, &priv->list, list)
250 fakelb_del(dp); 252 fakelb_del(dp);
251 kfree(priv);
252err_alloc: 253err_alloc:
253 return err; 254 return err;
254} 255}
@@ -260,7 +261,6 @@ static int fakelb_remove(struct platform_device *pdev)
260 261
261 list_for_each_entry_safe(dp, temp, &priv->list, list) 262 list_for_each_entry_safe(dp, temp, &priv->list, list)
262 fakelb_del(dp); 263 fakelb_del(dp);
263 kfree(priv);
264 264
265 return 0; 265 return 0;
266} 266}
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 78a6552ed707..4048062011ba 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -618,12 +618,12 @@ static int mrf24j40_probe(struct spi_device *spi)
618 618
619 printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq); 619 printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
620 620
621 devrec = kzalloc(sizeof(struct mrf24j40), GFP_KERNEL); 621 devrec = devm_kzalloc(&spi->dev, sizeof(struct mrf24j40), GFP_KERNEL);
622 if (!devrec) 622 if (!devrec)
623 goto err_devrec; 623 goto err_ret;
624 devrec->buf = kzalloc(3, GFP_KERNEL); 624 devrec->buf = devm_kzalloc(&spi->dev, 3, GFP_KERNEL);
625 if (!devrec->buf) 625 if (!devrec->buf)
626 goto err_buf; 626 goto err_ret;
627 627
628 spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */ 628 spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
629 if (spi->max_speed_hz > MAX_SPI_SPEED_HZ) 629 if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
@@ -638,7 +638,7 @@ static int mrf24j40_probe(struct spi_device *spi)
638 638
639 devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops); 639 devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops);
640 if (!devrec->dev) 640 if (!devrec->dev)
641 goto err_alloc_dev; 641 goto err_ret;
642 642
643 devrec->dev->priv = devrec; 643 devrec->dev->priv = devrec;
644 devrec->dev->parent = &devrec->spi->dev; 644 devrec->dev->parent = &devrec->spi->dev;
@@ -676,12 +676,13 @@ static int mrf24j40_probe(struct spi_device *spi)
676 val &= ~0x3; /* Clear RX mode (normal) */ 676 val &= ~0x3; /* Clear RX mode (normal) */
677 write_short_reg(devrec, REG_RXMCR, val); 677 write_short_reg(devrec, REG_RXMCR, val);
678 678
679 ret = request_threaded_irq(spi->irq, 679 ret = devm_request_threaded_irq(&spi->dev,
680 NULL, 680 spi->irq,
681 mrf24j40_isr, 681 NULL,
682 IRQF_TRIGGER_LOW|IRQF_ONESHOT, 682 mrf24j40_isr,
683 dev_name(&spi->dev), 683 IRQF_TRIGGER_LOW|IRQF_ONESHOT,
684 devrec); 684 dev_name(&spi->dev),
685 devrec);
685 686
686 if (ret) { 687 if (ret) {
687 dev_err(printdev(devrec), "Unable to get IRQ"); 688 dev_err(printdev(devrec), "Unable to get IRQ");
@@ -695,11 +696,7 @@ err_read_reg:
695 ieee802154_unregister_device(devrec->dev); 696 ieee802154_unregister_device(devrec->dev);
696err_register_device: 697err_register_device:
697 ieee802154_free_device(devrec->dev); 698 ieee802154_free_device(devrec->dev);
698err_alloc_dev: 699err_ret:
699 kfree(devrec->buf);
700err_buf:
701 kfree(devrec);
702err_devrec:
703 return ret; 700 return ret;
704} 701}
705 702
@@ -709,15 +706,11 @@ static int mrf24j40_remove(struct spi_device *spi)
709 706
710 dev_dbg(printdev(devrec), "remove\n"); 707 dev_dbg(printdev(devrec), "remove\n");
711 708
712 free_irq(spi->irq, devrec);
713 ieee802154_unregister_device(devrec->dev); 709 ieee802154_unregister_device(devrec->dev);
714 ieee802154_free_device(devrec->dev); 710 ieee802154_free_device(devrec->dev);
715 /* TODO: Will ieee802154_free_device() wait until ->xmit() is 711 /* TODO: Will ieee802154_free_device() wait until ->xmit() is
716 * complete? */ 712 * complete? */
717 713
718 /* Clean up the SPI stuff. */
719 kfree(devrec->buf);
720 kfree(devrec);
721 return 0; 714 return 0;
722} 715}
723 716
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 3da44d5d9149..8d101d63abca 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -396,7 +396,8 @@ config MCS_FIR
396 396
397config SH_IRDA 397config SH_IRDA
398 tristate "SuperH IrDA driver" 398 tristate "SuperH IrDA driver"
399 depends on IRDA && ARCH_SHMOBILE 399 depends on IRDA
400 depends on ARCH_SHMOBILE || COMPILE_TEST
400 help 401 help
401 Say Y here if your want to enable SuperH IrDA devices. 402 Say Y here if your want to enable SuperH IrDA devices.
402 403
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 2900af091c2d..998bb89ede71 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -510,10 +510,8 @@ static void via_hw_init(struct via_ircc_cb *self)
510 */ 510 */
511static int via_ircc_read_dongle_id(int iobase) 511static int via_ircc_read_dongle_id(int iobase)
512{ 512{
513 int dongle_id = 9; /* Default to IBM */
514
515 IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n"); 513 IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
516 return dongle_id; 514 return 9; /* Default to IBM */
517} 515}
518 516
519/* 517/*
@@ -926,7 +924,6 @@ static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
926static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self) 924static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
927{ 925{
928 int iobase; 926 int iobase;
929 int ret = TRUE;
930 u8 Tx_status; 927 u8 Tx_status;
931 928
932 IRDA_DEBUG(3, "%s()\n", __func__); 929 IRDA_DEBUG(3, "%s()\n", __func__);
@@ -983,7 +980,7 @@ F01_E*/
983 // Tell the network layer, that we can accept more frames 980 // Tell the network layer, that we can accept more frames
984 netif_wake_queue(self->netdev); 981 netif_wake_queue(self->netdev);
985//F01 } 982//F01 }
986 return ret; 983 return TRUE;
987} 984}
988 985
989/* 986/*
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index e641bb240362..11dbdf36d9c1 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -62,10 +62,6 @@
62#include "w83977af.h" 62#include "w83977af.h"
63#include "w83977af_ir.h" 63#include "w83977af_ir.h"
64 64
65#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
66#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
67#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
68#endif
69#define CONFIG_USE_W977_PNP /* Currently needed */ 65#define CONFIG_USE_W977_PNP /* Currently needed */
70#define PIO_MAX_SPEED 115200 66#define PIO_MAX_SPEED 115200
71 67
@@ -332,7 +328,7 @@ static int w83977af_probe(int iobase, int irq, int dma)
332 w977_write_reg(0x74, dma+1, efbase[i]); 328 w977_write_reg(0x74, dma+1, efbase[i]);
333#else 329#else
334 w977_write_reg(0x74, dma, efbase[i]); 330 w977_write_reg(0x74, dma, efbase[i]);
335#endif /*CONFIG_ARCH_NETWINDER */ 331#endif /* CONFIG_ARCH_NETWINDER */
336 w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */ 332 w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
337 333
338 /* Set append hardware CRC, enable IR bank selection */ 334 /* Set append hardware CRC, enable IR bank selection */
@@ -563,10 +559,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
563static void w83977af_dma_write(struct w83977af_ir *self, int iobase) 559static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
564{ 560{
565 __u8 set; 561 __u8 set;
566#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
567 unsigned long flags;
568 __u8 hcr;
569#endif
570 IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len); 562 IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
571 563
572 /* Save current set */ 564 /* Save current set */
@@ -579,30 +571,13 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
579 /* Choose transmit DMA channel */ 571 /* Choose transmit DMA channel */
580 switch_bank(iobase, SET2); 572 switch_bank(iobase, SET2);
581 outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1); 573 outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
582#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
583 spin_lock_irqsave(&self->lock, flags);
584
585 disable_dma(self->io.dma);
586 clear_dma_ff(self->io.dma);
587 set_dma_mode(self->io.dma, DMA_MODE_READ);
588 set_dma_addr(self->io.dma, self->tx_buff_dma);
589 set_dma_count(self->io.dma, self->tx_buff.len);
590#else
591 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len, 574 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
592 DMA_MODE_WRITE); 575 DMA_MODE_WRITE);
593#endif
594 self->io.direction = IO_XMIT; 576 self->io.direction = IO_XMIT;
595 577
596 /* Enable DMA */ 578 /* Enable DMA */
597 switch_bank(iobase, SET0); 579 switch_bank(iobase, SET0);
598#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
599 hcr = inb(iobase+HCR);
600 outb(hcr | HCR_EN_DMA, iobase+HCR);
601 enable_dma(self->io.dma);
602 spin_unlock_irqrestore(&self->lock, flags);
603#else
604 outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR); 580 outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
605#endif
606 581
607 /* Restore set register */ 582 /* Restore set register */
608 outb(set, iobase+SSR); 583 outb(set, iobase+SSR);
@@ -711,7 +686,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
711{ 686{
712 int iobase; 687 int iobase;
713 __u8 set; 688 __u8 set;
714#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS 689#ifdef CONFIG_ARCH_NETWINDER
715 unsigned long flags; 690 unsigned long flags;
716 __u8 hcr; 691 __u8 hcr;
717#endif 692#endif
@@ -736,7 +711,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
736 self->io.direction = IO_RECV; 711 self->io.direction = IO_RECV;
737 self->rx_buff.data = self->rx_buff.head; 712 self->rx_buff.data = self->rx_buff.head;
738 713
739#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS 714#ifdef CONFIG_ARCH_NETWINDER
740 spin_lock_irqsave(&self->lock, flags); 715 spin_lock_irqsave(&self->lock, flags);
741 716
742 disable_dma(self->io.dma); 717 disable_dma(self->io.dma);
@@ -759,7 +734,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
759 734
760 /* Enable DMA */ 735 /* Enable DMA */
761 switch_bank(iobase, SET0); 736 switch_bank(iobase, SET0);
762#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS 737#ifdef CONFIG_ARCH_NETWINDER
763 hcr = inb(iobase+HCR); 738 hcr = inb(iobase+HCR);
764 outb(hcr | HCR_EN_DMA, iobase+HCR); 739 outb(hcr | HCR_EN_DMA, iobase+HCR);
765 enable_dma(self->io.dma); 740 enable_dma(self->io.dma);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d53e299ae1d9..958df383068a 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -30,8 +30,10 @@
30#include <linux/if_link.h> 30#include <linux/if_link.h>
31#include <linux/if_macvlan.h> 31#include <linux/if_macvlan.h>
32#include <linux/hash.h> 32#include <linux/hash.h>
33#include <linux/workqueue.h>
33#include <net/rtnetlink.h> 34#include <net/rtnetlink.h>
34#include <net/xfrm.h> 35#include <net/xfrm.h>
36#include <linux/netpoll.h>
35 37
36#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) 38#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
37 39
@@ -40,10 +42,19 @@ struct macvlan_port {
40 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; 42 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
41 struct list_head vlans; 43 struct list_head vlans;
42 struct rcu_head rcu; 44 struct rcu_head rcu;
45 struct sk_buff_head bc_queue;
46 struct work_struct bc_work;
43 bool passthru; 47 bool passthru;
44 int count;
45}; 48};
46 49
50#define MACVLAN_PORT_IS_EMPTY(port) list_empty(&port->vlans)
51
52struct macvlan_skb_cb {
53 const struct macvlan_dev *src;
54};
55
56#define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0]))
57
47static void macvlan_port_destroy(struct net_device *dev); 58static void macvlan_port_destroy(struct net_device *dev);
48 59
49static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev) 60static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
@@ -120,7 +131,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
120 struct net_device *dev = vlan->dev; 131 struct net_device *dev = vlan->dev;
121 132
122 if (local) 133 if (local)
123 return dev_forward_skb(dev, skb); 134 return __dev_forward_skb(dev, skb);
124 135
125 skb->dev = dev; 136 skb->dev = dev;
126 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast)) 137 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
@@ -128,7 +139,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
128 else 139 else
129 skb->pkt_type = PACKET_MULTICAST; 140 skb->pkt_type = PACKET_MULTICAST;
130 141
131 return netif_rx(skb); 142 return 0;
132} 143}
133 144
134static u32 macvlan_hash_mix(const struct macvlan_dev *vlan) 145static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@ -175,32 +186,32 @@ static void macvlan_broadcast(struct sk_buff *skb,
175 if (likely(nskb)) 186 if (likely(nskb))
176 err = macvlan_broadcast_one( 187 err = macvlan_broadcast_one(
177 nskb, vlan, eth, 188 nskb, vlan, eth,
178 mode == MACVLAN_MODE_BRIDGE); 189 mode == MACVLAN_MODE_BRIDGE) ?:
190 netif_rx_ni(nskb);
179 macvlan_count_rx(vlan, skb->len + ETH_HLEN, 191 macvlan_count_rx(vlan, skb->len + ETH_HLEN,
180 err == NET_RX_SUCCESS, 1); 192 err == NET_RX_SUCCESS, 1);
181 } 193 }
182 } 194 }
183} 195}
184 196
185/* called under rcu_read_lock() from netif_receive_skb */ 197static void macvlan_process_broadcast(struct work_struct *w)
186static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
187{ 198{
188 struct macvlan_port *port; 199 struct macvlan_port *port = container_of(w, struct macvlan_port,
189 struct sk_buff *skb = *pskb; 200 bc_work);
190 const struct ethhdr *eth = eth_hdr(skb); 201 struct sk_buff *skb;
191 const struct macvlan_dev *vlan; 202 struct sk_buff_head list;
192 const struct macvlan_dev *src; 203
193 struct net_device *dev; 204 skb_queue_head_init(&list);
194 unsigned int len = 0; 205
195 int ret = NET_RX_DROP; 206 spin_lock_bh(&port->bc_queue.lock);
207 skb_queue_splice_tail_init(&port->bc_queue, &list);
208 spin_unlock_bh(&port->bc_queue.lock);
209
210 while ((skb = __skb_dequeue(&list))) {
211 const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
212
213 rcu_read_lock();
196 214
197 port = macvlan_port_get_rcu(skb->dev);
198 if (is_multicast_ether_addr(eth->h_dest)) {
199 skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
200 if (!skb)
201 return RX_HANDLER_CONSUMED;
202 eth = eth_hdr(skb);
203 src = macvlan_hash_lookup(port, eth->h_source);
204 if (!src) 215 if (!src)
205 /* frame comes from an external address */ 216 /* frame comes from an external address */
206 macvlan_broadcast(skb, port, NULL, 217 macvlan_broadcast(skb, port, NULL,
@@ -213,20 +224,80 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
213 macvlan_broadcast(skb, port, src->dev, 224 macvlan_broadcast(skb, port, src->dev,
214 MACVLAN_MODE_VEPA | 225 MACVLAN_MODE_VEPA |
215 MACVLAN_MODE_BRIDGE); 226 MACVLAN_MODE_BRIDGE);
216 else if (src->mode == MACVLAN_MODE_BRIDGE) 227 else
217 /* 228 /*
218 * flood only to VEPA ports, bridge ports 229 * flood only to VEPA ports, bridge ports
219 * already saw the frame on the way out. 230 * already saw the frame on the way out.
220 */ 231 */
221 macvlan_broadcast(skb, port, src->dev, 232 macvlan_broadcast(skb, port, src->dev,
222 MACVLAN_MODE_VEPA); 233 MACVLAN_MODE_VEPA);
223 else { 234
235 rcu_read_unlock();
236
237 kfree_skb(skb);
238 }
239}
240
241static void macvlan_broadcast_enqueue(struct macvlan_port *port,
242 struct sk_buff *skb)
243{
244 struct sk_buff *nskb;
245 int err = -ENOMEM;
246
247 nskb = skb_clone(skb, GFP_ATOMIC);
248 if (!nskb)
249 goto err;
250
251 spin_lock(&port->bc_queue.lock);
252 if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
253 __skb_queue_tail(&port->bc_queue, nskb);
254 err = 0;
255 }
256 spin_unlock(&port->bc_queue.lock);
257
258 if (err)
259 goto free_nskb;
260
261 schedule_work(&port->bc_work);
262 return;
263
264free_nskb:
265 kfree_skb(nskb);
266err:
267 atomic_long_inc(&skb->dev->rx_dropped);
268}
269
270/* called under rcu_read_lock() from netif_receive_skb */
271static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
272{
273 struct macvlan_port *port;
274 struct sk_buff *skb = *pskb;
275 const struct ethhdr *eth = eth_hdr(skb);
276 const struct macvlan_dev *vlan;
277 const struct macvlan_dev *src;
278 struct net_device *dev;
279 unsigned int len = 0;
280 int ret = NET_RX_DROP;
281
282 port = macvlan_port_get_rcu(skb->dev);
283 if (is_multicast_ether_addr(eth->h_dest)) {
284 skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
285 if (!skb)
286 return RX_HANDLER_CONSUMED;
287 eth = eth_hdr(skb);
288 src = macvlan_hash_lookup(port, eth->h_source);
289 if (src && src->mode != MACVLAN_MODE_VEPA &&
290 src->mode != MACVLAN_MODE_BRIDGE) {
224 /* forward to original port. */ 291 /* forward to original port. */
225 vlan = src; 292 vlan = src;
226 ret = macvlan_broadcast_one(skb, vlan, eth, 0); 293 ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
294 netif_rx(skb);
227 goto out; 295 goto out;
228 } 296 }
229 297
298 MACVLAN_SKB_CB(skb)->src = src;
299 macvlan_broadcast_enqueue(port, skb);
300
230 return RX_HANDLER_PASS; 301 return RX_HANDLER_PASS;
231 } 302 }
232 303
@@ -287,12 +358,26 @@ xmit_world:
287 return dev_queue_xmit(skb); 358 return dev_queue_xmit(skb);
288} 359}
289 360
361static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
362{
363#ifdef CONFIG_NET_POLL_CONTROLLER
364 if (vlan->netpoll)
365 netpoll_send_skb(vlan->netpoll, skb);
366#else
367 BUG();
368#endif
369 return NETDEV_TX_OK;
370}
371
290static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, 372static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
291 struct net_device *dev) 373 struct net_device *dev)
292{ 374{
293 unsigned int len = skb->len; 375 unsigned int len = skb->len;
294 int ret; 376 int ret;
295 const struct macvlan_dev *vlan = netdev_priv(dev); 377 struct macvlan_dev *vlan = netdev_priv(dev);
378
379 if (unlikely(netpoll_tx_running(dev)))
380 return macvlan_netpoll_send_skb(vlan, skb);
296 381
297 if (vlan->fwd_priv) { 382 if (vlan->fwd_priv) {
298 skb->dev = vlan->lowerdev; 383 skb->dev = vlan->lowerdev;
@@ -424,35 +509,49 @@ hash_del:
424 return 0; 509 return 0;
425} 510}
426 511
427static int macvlan_set_mac_address(struct net_device *dev, void *p) 512static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
428{ 513{
429 struct macvlan_dev *vlan = netdev_priv(dev); 514 struct macvlan_dev *vlan = netdev_priv(dev);
430 struct net_device *lowerdev = vlan->lowerdev; 515 struct net_device *lowerdev = vlan->lowerdev;
431 struct sockaddr *addr = p;
432 int err; 516 int err;
433 517
434 if (!is_valid_ether_addr(addr->sa_data))
435 return -EADDRNOTAVAIL;
436
437 if (!(dev->flags & IFF_UP)) { 518 if (!(dev->flags & IFF_UP)) {
438 /* Just copy in the new address */ 519 /* Just copy in the new address */
439 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 520 ether_addr_copy(dev->dev_addr, addr);
440 } else { 521 } else {
441 /* Rehash and update the device filters */ 522 /* Rehash and update the device filters */
442 if (macvlan_addr_busy(vlan->port, addr->sa_data)) 523 if (macvlan_addr_busy(vlan->port, addr))
443 return -EBUSY; 524 return -EBUSY;
444 525
445 err = dev_uc_add(lowerdev, addr->sa_data); 526 if (!vlan->port->passthru) {
446 if (err) 527 err = dev_uc_add(lowerdev, addr);
447 return err; 528 if (err)
529 return err;
448 530
449 dev_uc_del(lowerdev, dev->dev_addr); 531 dev_uc_del(lowerdev, dev->dev_addr);
532 }
450 533
451 macvlan_hash_change_addr(vlan, addr->sa_data); 534 macvlan_hash_change_addr(vlan, addr);
452 } 535 }
453 return 0; 536 return 0;
454} 537}
455 538
539static int macvlan_set_mac_address(struct net_device *dev, void *p)
540{
541 struct macvlan_dev *vlan = netdev_priv(dev);
542 struct sockaddr *addr = p;
543
544 if (!is_valid_ether_addr(addr->sa_data))
545 return -EADDRNOTAVAIL;
546
547 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
548 dev_set_mac_address(vlan->lowerdev, addr);
549 return 0;
550 }
551
552 return macvlan_sync_address(dev, addr->sa_data);
553}
554
456static void macvlan_change_rx_flags(struct net_device *dev, int change) 555static void macvlan_change_rx_flags(struct net_device *dev, int change)
457{ 556{
458 struct macvlan_dev *vlan = netdev_priv(dev); 557 struct macvlan_dev *vlan = netdev_priv(dev);
@@ -567,8 +666,7 @@ static void macvlan_uninit(struct net_device *dev)
567 666
568 free_percpu(vlan->pcpu_stats); 667 free_percpu(vlan->pcpu_stats);
569 668
570 port->count -= 1; 669 if (MACVLAN_PORT_IS_EMPTY(port))
571 if (!port->count)
572 macvlan_port_destroy(port->dev); 670 macvlan_port_destroy(port->dev);
573} 671}
574 672
@@ -705,6 +803,50 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
705 return features; 803 return features;
706} 804}
707 805
806#ifdef CONFIG_NET_POLL_CONTROLLER
807static void macvlan_dev_poll_controller(struct net_device *dev)
808{
809 return;
810}
811
812static int macvlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
813{
814 struct macvlan_dev *vlan = netdev_priv(dev);
815 struct net_device *real_dev = vlan->lowerdev;
816 struct netpoll *netpoll;
817 int err = 0;
818
819 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
820 err = -ENOMEM;
821 if (!netpoll)
822 goto out;
823
824 err = __netpoll_setup(netpoll, real_dev);
825 if (err) {
826 kfree(netpoll);
827 goto out;
828 }
829
830 vlan->netpoll = netpoll;
831
832out:
833 return err;
834}
835
836static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
837{
838 struct macvlan_dev *vlan = netdev_priv(dev);
839 struct netpoll *netpoll = vlan->netpoll;
840
841 if (!netpoll)
842 return;
843
844 vlan->netpoll = NULL;
845
846 __netpoll_free_async(netpoll);
847}
848#endif /* CONFIG_NET_POLL_CONTROLLER */
849
708static const struct ethtool_ops macvlan_ethtool_ops = { 850static const struct ethtool_ops macvlan_ethtool_ops = {
709 .get_link = ethtool_op_get_link, 851 .get_link = ethtool_op_get_link,
710 .get_settings = macvlan_ethtool_get_settings, 852 .get_settings = macvlan_ethtool_get_settings,
@@ -730,6 +872,11 @@ static const struct net_device_ops macvlan_netdev_ops = {
730 .ndo_fdb_del = macvlan_fdb_del, 872 .ndo_fdb_del = macvlan_fdb_del,
731 .ndo_fdb_dump = ndo_dflt_fdb_dump, 873 .ndo_fdb_dump = ndo_dflt_fdb_dump,
732 .ndo_get_lock_subclass = macvlan_get_nest_level, 874 .ndo_get_lock_subclass = macvlan_get_nest_level,
875#ifdef CONFIG_NET_POLL_CONTROLLER
876 .ndo_poll_controller = macvlan_dev_poll_controller,
877 .ndo_netpoll_setup = macvlan_dev_netpoll_setup,
878 .ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup,
879#endif
733}; 880};
734 881
735void macvlan_common_setup(struct net_device *dev) 882void macvlan_common_setup(struct net_device *dev)
@@ -770,6 +917,9 @@ static int macvlan_port_create(struct net_device *dev)
770 for (i = 0; i < MACVLAN_HASH_SIZE; i++) 917 for (i = 0; i < MACVLAN_HASH_SIZE; i++)
771 INIT_HLIST_HEAD(&port->vlan_hash[i]); 918 INIT_HLIST_HEAD(&port->vlan_hash[i]);
772 919
920 skb_queue_head_init(&port->bc_queue);
921 INIT_WORK(&port->bc_work, macvlan_process_broadcast);
922
773 err = netdev_rx_handler_register(dev, macvlan_handle_frame, port); 923 err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
774 if (err) 924 if (err)
775 kfree(port); 925 kfree(port);
@@ -782,6 +932,7 @@ static void macvlan_port_destroy(struct net_device *dev)
782{ 932{
783 struct macvlan_port *port = macvlan_port_get_rtnl(dev); 933 struct macvlan_port *port = macvlan_port_get_rtnl(dev);
784 934
935 cancel_work_sync(&port->bc_work);
785 dev->priv_flags &= ~IFF_MACVLAN_PORT; 936 dev->priv_flags &= ~IFF_MACVLAN_PORT;
786 netdev_rx_handler_unregister(dev); 937 netdev_rx_handler_unregister(dev);
787 kfree_rcu(port, rcu); 938 kfree_rcu(port, rcu);
@@ -868,13 +1019,12 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
868 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 1019 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
869 1020
870 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 1021 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
871 if (port->count) 1022 if (!MACVLAN_PORT_IS_EMPTY(port))
872 return -EINVAL; 1023 return -EINVAL;
873 port->passthru = true; 1024 port->passthru = true;
874 eth_hw_addr_inherit(dev, lowerdev); 1025 eth_hw_addr_inherit(dev, lowerdev);
875 } 1026 }
876 1027
877 port->count += 1;
878 err = register_netdevice(dev); 1028 err = register_netdevice(dev);
879 if (err < 0) 1029 if (err < 0)
880 goto destroy_port; 1030 goto destroy_port;
@@ -892,8 +1042,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
892unregister_netdev: 1042unregister_netdev:
893 unregister_netdevice(dev); 1043 unregister_netdevice(dev);
894destroy_port: 1044destroy_port:
895 port->count -= 1; 1045 if (MACVLAN_PORT_IS_EMPTY(port))
896 if (!port->count)
897 macvlan_port_destroy(lowerdev); 1046 macvlan_port_destroy(lowerdev);
898 1047
899 return err; 1048 return err;
@@ -1028,6 +1177,25 @@ static int macvlan_device_event(struct notifier_block *unused,
1028 netdev_update_features(vlan->dev); 1177 netdev_update_features(vlan->dev);
1029 } 1178 }
1030 break; 1179 break;
1180 case NETDEV_CHANGEMTU:
1181 list_for_each_entry(vlan, &port->vlans, list) {
1182 if (vlan->dev->mtu <= dev->mtu)
1183 continue;
1184 dev_set_mtu(vlan->dev, dev->mtu);
1185 }
1186 break;
1187 case NETDEV_CHANGEADDR:
1188 if (!port->passthru)
1189 return NOTIFY_DONE;
1190
1191 vlan = list_first_entry_or_null(&port->vlans,
1192 struct macvlan_dev,
1193 list);
1194
1195 if (macvlan_sync_address(vlan->dev, dev->dev_addr))
1196 return NOTIFY_BAD;
1197
1198 break;
1031 case NETDEV_UNREGISTER: 1199 case NETDEV_UNREGISTER:
1032 /* twiddle thumbs on netns device moves */ 1200 /* twiddle thumbs on netns device moves */
1033 if (dev->reg_state != NETREG_UNREGISTERING) 1201 if (dev->reg_state != NETREG_UNREGISTERING)
@@ -1036,11 +1204,17 @@ static int macvlan_device_event(struct notifier_block *unused,
1036 list_for_each_entry_safe(vlan, next, &port->vlans, list) 1204 list_for_each_entry_safe(vlan, next, &port->vlans, list)
1037 vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill); 1205 vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
1038 unregister_netdevice_many(&list_kill); 1206 unregister_netdevice_many(&list_kill);
1039 list_del(&list_kill);
1040 break; 1207 break;
1041 case NETDEV_PRE_TYPE_CHANGE: 1208 case NETDEV_PRE_TYPE_CHANGE:
1042 /* Forbid underlaying device to change its type. */ 1209 /* Forbid underlaying device to change its type. */
1043 return NOTIFY_BAD; 1210 return NOTIFY_BAD;
1211
1212 case NETDEV_NOTIFY_PEERS:
1213 case NETDEV_BONDING_FAILOVER:
1214 case NETDEV_RESEND_IGMP:
1215 /* Propagate to all vlans */
1216 list_for_each_entry(vlan, &port->vlans, list)
1217 call_netdevice_notifiers(event, vlan->dev);
1044 } 1218 }
1045 return NOTIFY_DONE; 1219 return NOTIFY_DONE;
1046} 1220}
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 63aa9d9e34c5..5a7e6397440a 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -298,7 +298,6 @@ static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
298{ 298{
299 cmd->supported = SUPPORTED_Backplane; 299 cmd->supported = SUPPORTED_Backplane;
300 cmd->advertising = ADVERTISED_Backplane; 300 cmd->advertising = ADVERTISED_Backplane;
301 cmd->speed = SPEED_UNKNOWN;
302 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 301 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
303 cmd->duplex = DUPLEX_FULL; 302 cmd->duplex = DUPLEX_FULL;
304 cmd->port = PORT_OTHER; 303 cmd->port = PORT_OTHER;
@@ -348,7 +347,7 @@ static int ntb_netdev_probe(struct pci_dev *pdev)
348 memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len); 347 memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
349 348
350 ndev->netdev_ops = &ntb_netdev_ops; 349 ndev->netdev_ops = &ntb_netdev_ops;
351 SET_ETHTOOL_OPS(ndev, &ntb_ethtool_ops); 350 ndev->ethtool_ops = &ntb_ethtool_ops;
352 351
353 dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers); 352 dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
354 if (!dev->qp) { 353 if (!dev->qp) {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 6a17f92153b3..65de0cab8d07 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -24,6 +24,12 @@ config AMD_PHY
24 ---help--- 24 ---help---
25 Currently supports the am79c874 25 Currently supports the am79c874
26 26
27config AMD_XGBE_PHY
28 tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
29 depends on OF
30 ---help---
31 Currently supports the AMD 10GbE PHY
32
27config MARVELL_PHY 33config MARVELL_PHY
28 tristate "Drivers for Marvell PHYs" 34 tristate "Drivers for Marvell PHYs"
29 ---help--- 35 ---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 07d24024863e..7dc3d5b304cf 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -33,3 +33,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
33obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o 33obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
34obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o 34obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
35obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o 35obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
36obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
new file mode 100644
index 000000000000..b57c22442867
--- /dev/null
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -0,0 +1,1357 @@
1/*
2 * AMD 10Gb Ethernet PHY driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 *
25 * License 2: Modified BSD
26 *
27 * Copyright (c) 2014 Advanced Micro Devices, Inc.
28 * All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions are met:
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in the
36 * documentation and/or other materials provided with the distribution.
37 * * Neither the name of Advanced Micro Devices, Inc. nor the
38 * names of its contributors may be used to endorse or promote products
39 * derived from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
42 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 */
52
53#include <linux/kernel.h>
54#include <linux/device.h>
55#include <linux/platform_device.h>
56#include <linux/string.h>
57#include <linux/errno.h>
58#include <linux/unistd.h>
59#include <linux/slab.h>
60#include <linux/interrupt.h>
61#include <linux/init.h>
62#include <linux/delay.h>
63#include <linux/netdevice.h>
64#include <linux/etherdevice.h>
65#include <linux/skbuff.h>
66#include <linux/mm.h>
67#include <linux/module.h>
68#include <linux/mii.h>
69#include <linux/ethtool.h>
70#include <linux/phy.h>
71#include <linux/mdio.h>
72#include <linux/io.h>
73#include <linux/of.h>
74#include <linux/of_platform.h>
75#include <linux/of_device.h>
76#include <linux/uaccess.h>
77#include <asm/irq.h>
78
79
80MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
81MODULE_LICENSE("Dual BSD/GPL");
82MODULE_VERSION("1.0.0-a");
83MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
84
85#define XGBE_PHY_ID 0x000162d0
86#define XGBE_PHY_MASK 0xfffffff0
87
88#define XGBE_AN_INT_CMPLT 0x01
89#define XGBE_AN_INC_LINK 0x02
90#define XGBE_AN_PG_RCV 0x04
91
92#define XNP_MCF_NULL_MESSAGE 0x001
93#define XNP_ACK_PROCESSED (1 << 12)
94#define XNP_MP_FORMATTED (1 << 13)
95#define XNP_NP_EXCHANGE (1 << 15)
96
97#ifndef MDIO_PMA_10GBR_PMD_CTRL
98#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
99#endif
100#ifndef MDIO_PMA_10GBR_FEC_CTRL
101#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
102#endif
103#ifndef MDIO_AN_XNP
104#define MDIO_AN_XNP 0x0016
105#endif
106
107#ifndef MDIO_AN_INTMASK
108#define MDIO_AN_INTMASK 0x8001
109#endif
110#ifndef MDIO_AN_INT
111#define MDIO_AN_INT 0x8002
112#endif
113
114#ifndef MDIO_CTRL1_SPEED1G
115#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
116#endif
117
118/* SerDes integration register offsets */
119#define SIR0_STATUS 0x0040
120#define SIR1_SPEED 0x0000
121
122/* SerDes integration register entry bit positions and sizes */
123#define SIR0_STATUS_RX_READY_INDEX 0
124#define SIR0_STATUS_RX_READY_WIDTH 1
125#define SIR0_STATUS_TX_READY_INDEX 8
126#define SIR0_STATUS_TX_READY_WIDTH 1
127#define SIR1_SPEED_DATARATE_INDEX 4
128#define SIR1_SPEED_DATARATE_WIDTH 2
129#define SIR1_SPEED_PI_SPD_SEL_INDEX 12
130#define SIR1_SPEED_PI_SPD_SEL_WIDTH 4
131#define SIR1_SPEED_PLLSEL_INDEX 3
132#define SIR1_SPEED_PLLSEL_WIDTH 1
133#define SIR1_SPEED_RATECHANGE_INDEX 6
134#define SIR1_SPEED_RATECHANGE_WIDTH 1
135#define SIR1_SPEED_TXAMP_INDEX 8
136#define SIR1_SPEED_TXAMP_WIDTH 4
137#define SIR1_SPEED_WORDMODE_INDEX 0
138#define SIR1_SPEED_WORDMODE_WIDTH 3
139
140#define SPEED_10000_CDR 0x7
141#define SPEED_10000_PLL 0x1
142#define SPEED_10000_RATE 0x0
143#define SPEED_10000_TXAMP 0xa
144#define SPEED_10000_WORD 0x7
145
146#define SPEED_2500_CDR 0x2
147#define SPEED_2500_PLL 0x0
148#define SPEED_2500_RATE 0x2
149#define SPEED_2500_TXAMP 0xf
150#define SPEED_2500_WORD 0x1
151
152#define SPEED_1000_CDR 0x2
153#define SPEED_1000_PLL 0x0
154#define SPEED_1000_RATE 0x3
155#define SPEED_1000_TXAMP 0xf
156#define SPEED_1000_WORD 0x1
157
158
159/* SerDes RxTx register offsets */
160#define RXTX_REG20 0x0050
161#define RXTX_REG114 0x01c8
162
163/* SerDes RxTx register entry bit positions and sizes */
164#define RXTX_REG20_BLWC_ENA_INDEX 2
165#define RXTX_REG20_BLWC_ENA_WIDTH 1
166#define RXTX_REG114_PQ_REG_INDEX 9
167#define RXTX_REG114_PQ_REG_WIDTH 7
168
169#define RXTX_10000_BLWC 0
170#define RXTX_10000_PQ 0x1e
171
172#define RXTX_2500_BLWC 1
173#define RXTX_2500_PQ 0xa
174
175#define RXTX_1000_BLWC 1
176#define RXTX_1000_PQ 0xa
177
178/* Bit setting and getting macros
179 * The get macro will extract the current bit field value from within
180 * the variable
181 *
182 * The set macro will clear the current bit field value within the
183 * variable and then set the bit field of the variable to the
184 * specified value
185 */
186#define GET_BITS(_var, _index, _width) \
187 (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
188
189#define SET_BITS(_var, _index, _width, _val) \
190do { \
191 (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
192 (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
193} while (0)
194
195/* Macros for reading or writing SerDes integration registers
196 * The ioread macros will get bit fields or full values using the
197 * register definitions formed using the input names
198 *
199 * The iowrite macros will set bit fields or full values using the
200 * register definitions formed using the input names
201 */
202#define XSIR0_IOREAD(_priv, _reg) \
203 ioread16((_priv)->sir0_regs + _reg)
204
205#define XSIR0_IOREAD_BITS(_priv, _reg, _field) \
206 GET_BITS(XSIR0_IOREAD((_priv), _reg), \
207 _reg##_##_field##_INDEX, \
208 _reg##_##_field##_WIDTH)
209
210#define XSIR0_IOWRITE(_priv, _reg, _val) \
211 iowrite16((_val), (_priv)->sir0_regs + _reg)
212
213#define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val) \
214do { \
215 u16 reg_val = XSIR0_IOREAD((_priv), _reg); \
216 SET_BITS(reg_val, \
217 _reg##_##_field##_INDEX, \
218 _reg##_##_field##_WIDTH, (_val)); \
219 XSIR0_IOWRITE((_priv), _reg, reg_val); \
220} while (0)
221
222#define XSIR1_IOREAD(_priv, _reg) \
223 ioread16((_priv)->sir1_regs + _reg)
224
225#define XSIR1_IOREAD_BITS(_priv, _reg, _field) \
226 GET_BITS(XSIR1_IOREAD((_priv), _reg), \
227 _reg##_##_field##_INDEX, \
228 _reg##_##_field##_WIDTH)
229
230#define XSIR1_IOWRITE(_priv, _reg, _val) \
231 iowrite16((_val), (_priv)->sir1_regs + _reg)
232
233#define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val) \
234do { \
235 u16 reg_val = XSIR1_IOREAD((_priv), _reg); \
236 SET_BITS(reg_val, \
237 _reg##_##_field##_INDEX, \
238 _reg##_##_field##_WIDTH, (_val)); \
239 XSIR1_IOWRITE((_priv), _reg, reg_val); \
240} while (0)
241
242
243/* Macros for reading or writing SerDes RxTx registers
244 * The ioread macros will get bit fields or full values using the
245 * register definitions formed using the input names
246 *
247 * The iowrite macros will set bit fields or full values using the
248 * register definitions formed using the input names
249 */
250#define XRXTX_IOREAD(_priv, _reg) \
251 ioread16((_priv)->rxtx_regs + _reg)
252
253#define XRXTX_IOREAD_BITS(_priv, _reg, _field) \
254 GET_BITS(XRXTX_IOREAD((_priv), _reg), \
255 _reg##_##_field##_INDEX, \
256 _reg##_##_field##_WIDTH)
257
258#define XRXTX_IOWRITE(_priv, _reg, _val) \
259 iowrite16((_val), (_priv)->rxtx_regs + _reg)
260
261#define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val) \
262do { \
263 u16 reg_val = XRXTX_IOREAD((_priv), _reg); \
264 SET_BITS(reg_val, \
265 _reg##_##_field##_INDEX, \
266 _reg##_##_field##_WIDTH, (_val)); \
267 XRXTX_IOWRITE((_priv), _reg, reg_val); \
268} while (0)
269
270
271enum amd_xgbe_phy_an {
272 AMD_XGBE_AN_READY = 0,
273 AMD_XGBE_AN_START,
274 AMD_XGBE_AN_EVENT,
275 AMD_XGBE_AN_PAGE_RECEIVED,
276 AMD_XGBE_AN_INCOMPAT_LINK,
277 AMD_XGBE_AN_COMPLETE,
278 AMD_XGBE_AN_NO_LINK,
279 AMD_XGBE_AN_EXIT,
280 AMD_XGBE_AN_ERROR,
281};
282
283enum amd_xgbe_phy_rx {
284 AMD_XGBE_RX_READY = 0,
285 AMD_XGBE_RX_BPA,
286 AMD_XGBE_RX_XNP,
287 AMD_XGBE_RX_COMPLETE,
288};
289
290enum amd_xgbe_phy_mode {
291 AMD_XGBE_MODE_KR,
292 AMD_XGBE_MODE_KX,
293};
294
295struct amd_xgbe_phy_priv {
296 struct platform_device *pdev;
297 struct device *dev;
298
299 struct phy_device *phydev;
300
301 /* SerDes related mmio resources */
302 struct resource *rxtx_res;
303 struct resource *sir0_res;
304 struct resource *sir1_res;
305
306 /* SerDes related mmio registers */
307 void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
308 void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
309 void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
310
311 /* Maintain link status for re-starting auto-negotiation */
312 unsigned int link;
313 enum amd_xgbe_phy_mode mode;
314
315 /* Auto-negotiation state machine support */
316 struct mutex an_mutex;
317 enum amd_xgbe_phy_an an_result;
318 enum amd_xgbe_phy_an an_state;
319 enum amd_xgbe_phy_rx kr_state;
320 enum amd_xgbe_phy_rx kx_state;
321 struct work_struct an_work;
322 struct workqueue_struct *an_workqueue;
323};
324
325static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
326{
327 int ret;
328
329 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
330 if (ret < 0)
331 return ret;
332
333 ret |= 0x02;
334 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
335
336 return 0;
337}
338
339static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
340{
341 int ret;
342
343 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
344 if (ret < 0)
345 return ret;
346
347 ret &= ~0x02;
348 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
349
350 return 0;
351}
352
353static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
354{
355 int ret;
356
357 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
358 if (ret < 0)
359 return ret;
360
361 ret |= MDIO_CTRL1_LPOWER;
362 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
363
364 usleep_range(75, 100);
365
366 ret &= ~MDIO_CTRL1_LPOWER;
367 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
368
369 return 0;
370}
371
372static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
373{
374 struct amd_xgbe_phy_priv *priv = phydev->priv;
375
376 /* Assert Rx and Tx ratechange */
377 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
378}
379
380static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
381{
382 struct amd_xgbe_phy_priv *priv = phydev->priv;
383
384 /* Release Rx and Tx ratechange */
385 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
386
387 /* Wait for Rx and Tx ready */
388 while (!XSIR0_IOREAD_BITS(priv, SIR0_STATUS, RX_READY) &&
389 !XSIR0_IOREAD_BITS(priv, SIR0_STATUS, TX_READY))
390 usleep_range(10, 20);
391}
392
393static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
394{
395 struct amd_xgbe_phy_priv *priv = phydev->priv;
396 int ret;
397
398 /* Enable KR training */
399 ret = amd_xgbe_an_enable_kr_training(phydev);
400 if (ret < 0)
401 return ret;
402
403 /* Set PCS to KR/10G speed */
404 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
405 if (ret < 0)
406 return ret;
407
408 ret &= ~MDIO_PCS_CTRL2_TYPE;
409 ret |= MDIO_PCS_CTRL2_10GBR;
410 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
411
412 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
413 if (ret < 0)
414 return ret;
415
416 ret &= ~MDIO_CTRL1_SPEEDSEL;
417 ret |= MDIO_CTRL1_SPEED10G;
418 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
419
420 ret = amd_xgbe_phy_pcs_power_cycle(phydev);
421 if (ret < 0)
422 return ret;
423
424 /* Set SerDes to 10G speed */
425 amd_xgbe_phy_serdes_start_ratechange(phydev);
426
427 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
428 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
429 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_10000_TXAMP);
430 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
431 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_10000_CDR);
432
433 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_10000_BLWC);
434 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10000_PQ);
435
436 amd_xgbe_phy_serdes_complete_ratechange(phydev);
437
438 priv->mode = AMD_XGBE_MODE_KR;
439
440 return 0;
441}
442
443static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
444{
445 struct amd_xgbe_phy_priv *priv = phydev->priv;
446 int ret;
447
448 /* Disable KR training */
449 ret = amd_xgbe_an_disable_kr_training(phydev);
450 if (ret < 0)
451 return ret;
452
453 /* Set PCS to KX/1G speed */
454 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
455 if (ret < 0)
456 return ret;
457
458 ret &= ~MDIO_PCS_CTRL2_TYPE;
459 ret |= MDIO_PCS_CTRL2_10GBX;
460 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
461
462 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
463 if (ret < 0)
464 return ret;
465
466 ret &= ~MDIO_CTRL1_SPEEDSEL;
467 ret |= MDIO_CTRL1_SPEED1G;
468 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
469
470 ret = amd_xgbe_phy_pcs_power_cycle(phydev);
471 if (ret < 0)
472 return ret;
473
474 /* Set SerDes to 2.5G speed */
475 amd_xgbe_phy_serdes_start_ratechange(phydev);
476
477 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
478 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
479 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_2500_TXAMP);
480 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
481 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_2500_CDR);
482
483 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_2500_BLWC);
484 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_2500_PQ);
485
486 amd_xgbe_phy_serdes_complete_ratechange(phydev);
487
488 priv->mode = AMD_XGBE_MODE_KX;
489
490 return 0;
491}
492
493static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
494{
495 struct amd_xgbe_phy_priv *priv = phydev->priv;
496 int ret;
497
498 /* Disable KR training */
499 ret = amd_xgbe_an_disable_kr_training(phydev);
500 if (ret < 0)
501 return ret;
502
503 /* Set PCS to KX/1G speed */
504 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
505 if (ret < 0)
506 return ret;
507
508 ret &= ~MDIO_PCS_CTRL2_TYPE;
509 ret |= MDIO_PCS_CTRL2_10GBX;
510 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
511
512 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
513 if (ret < 0)
514 return ret;
515
516 ret &= ~MDIO_CTRL1_SPEEDSEL;
517 ret |= MDIO_CTRL1_SPEED1G;
518 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
519
520 ret = amd_xgbe_phy_pcs_power_cycle(phydev);
521 if (ret < 0)
522 return ret;
523
524 /* Set SerDes to 1G speed */
525 amd_xgbe_phy_serdes_start_ratechange(phydev);
526
527 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
528 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
529 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_1000_TXAMP);
530 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
531 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_1000_CDR);
532
533 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_1000_BLWC);
534 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1000_PQ);
535
536 amd_xgbe_phy_serdes_complete_ratechange(phydev);
537
538 priv->mode = AMD_XGBE_MODE_KX;
539
540 return 0;
541}
542
543static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
544{
545 struct amd_xgbe_phy_priv *priv = phydev->priv;
546 int ret;
547
548 /* If we are in KR switch to KX, and vice-versa */
549 if (priv->mode == AMD_XGBE_MODE_KR)
550 ret = amd_xgbe_phy_gmii_mode(phydev);
551 else
552 ret = amd_xgbe_phy_xgmii_mode(phydev);
553
554 return ret;
555}
556
557static enum amd_xgbe_phy_an amd_xgbe_an_switch_mode(struct phy_device *phydev)
558{
559 int ret;
560
561 ret = amd_xgbe_phy_switch_mode(phydev);
562 if (ret < 0)
563 return AMD_XGBE_AN_ERROR;
564
565 return AMD_XGBE_AN_START;
566}
567
568static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
569 enum amd_xgbe_phy_rx *state)
570{
571 struct amd_xgbe_phy_priv *priv = phydev->priv;
572 int ad_reg, lp_reg, ret;
573
574 *state = AMD_XGBE_RX_COMPLETE;
575
576 /* If we're in KX mode then we're done */
577 if (priv->mode == AMD_XGBE_MODE_KX)
578 return AMD_XGBE_AN_EVENT;
579
580 /* Enable/Disable FEC */
581 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
582 if (ad_reg < 0)
583 return AMD_XGBE_AN_ERROR;
584
585 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
586 if (lp_reg < 0)
587 return AMD_XGBE_AN_ERROR;
588
589 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
590 if (ret < 0)
591 return AMD_XGBE_AN_ERROR;
592
593 if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
594 ret |= 0x01;
595 else
596 ret &= ~0x01;
597
598 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
599
600 /* Start KR training */
601 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
602 if (ret < 0)
603 return AMD_XGBE_AN_ERROR;
604
605 ret |= 0x01;
606 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
607
608 return AMD_XGBE_AN_EVENT;
609}
610
611static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
612 enum amd_xgbe_phy_rx *state)
613{
614 u16 msg;
615
616 *state = AMD_XGBE_RX_XNP;
617
618 msg = XNP_MCF_NULL_MESSAGE;
619 msg |= XNP_MP_FORMATTED;
620
621 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
622 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
623 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
624
625 return AMD_XGBE_AN_EVENT;
626}
627
628static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
629 enum amd_xgbe_phy_rx *state)
630{
631 struct amd_xgbe_phy_priv *priv = phydev->priv;
632 unsigned int link_support;
633 int ret, ad_reg, lp_reg;
634
635 /* Read Base Ability register 2 first */
636 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
637 if (ret < 0)
638 return AMD_XGBE_AN_ERROR;
639
640 /* Check for a supported mode, otherwise restart in a different one */
641 link_support = (priv->mode == AMD_XGBE_MODE_KR) ? 0x80 : 0x20;
642 if (!(ret & link_support))
643 return amd_xgbe_an_switch_mode(phydev);
644
645 /* Check Extended Next Page support */
646 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
647 if (ad_reg < 0)
648 return AMD_XGBE_AN_ERROR;
649
650 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
651 if (lp_reg < 0)
652 return AMD_XGBE_AN_ERROR;
653
654 return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
655 amd_xgbe_an_tx_xnp(phydev, state) :
656 amd_xgbe_an_tx_training(phydev, state);
657}
658
659static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
660 enum amd_xgbe_phy_rx *state)
661{
662 int ad_reg, lp_reg;
663
664 /* Check Extended Next Page support */
665 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
666 if (ad_reg < 0)
667 return AMD_XGBE_AN_ERROR;
668
669 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
670 if (lp_reg < 0)
671 return AMD_XGBE_AN_ERROR;
672
673 return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
674 amd_xgbe_an_tx_xnp(phydev, state) :
675 amd_xgbe_an_tx_training(phydev, state);
676}
677
678static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
679{
680 struct amd_xgbe_phy_priv *priv = phydev->priv;
681 int ret;
682
683 /* Be sure we aren't looping trying to negotiate */
684 if (priv->mode == AMD_XGBE_MODE_KR) {
685 if (priv->kr_state != AMD_XGBE_RX_READY)
686 return AMD_XGBE_AN_NO_LINK;
687 priv->kr_state = AMD_XGBE_RX_BPA;
688 } else {
689 if (priv->kx_state != AMD_XGBE_RX_READY)
690 return AMD_XGBE_AN_NO_LINK;
691 priv->kx_state = AMD_XGBE_RX_BPA;
692 }
693
694 /* Set up Advertisement register 3 first */
695 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
696 if (ret < 0)
697 return AMD_XGBE_AN_ERROR;
698
699 if (phydev->supported & SUPPORTED_10000baseR_FEC)
700 ret |= 0xc000;
701 else
702 ret &= ~0xc000;
703
704 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
705
706 /* Set up Advertisement register 2 next */
707 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
708 if (ret < 0)
709 return AMD_XGBE_AN_ERROR;
710
711 if (phydev->supported & SUPPORTED_10000baseKR_Full)
712 ret |= 0x80;
713 else
714 ret &= ~0x80;
715
716 if (phydev->supported & SUPPORTED_1000baseKX_Full)
717 ret |= 0x20;
718 else
719 ret &= ~0x20;
720
721 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
722
723 /* Set up Advertisement register 1 last */
724 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
725 if (ret < 0)
726 return AMD_XGBE_AN_ERROR;
727
728 if (phydev->supported & SUPPORTED_Pause)
729 ret |= 0x400;
730 else
731 ret &= ~0x400;
732
733 if (phydev->supported & SUPPORTED_Asym_Pause)
734 ret |= 0x800;
735 else
736 ret &= ~0x800;
737
738 /* We don't intend to perform XNP */
739 ret &= ~XNP_NP_EXCHANGE;
740
741 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
742
743 /* Enable and start auto-negotiation */
744 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
745
746 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
747 if (ret < 0)
748 return AMD_XGBE_AN_ERROR;
749
750 ret |= MDIO_AN_CTRL1_ENABLE;
751 ret |= MDIO_AN_CTRL1_RESTART;
752 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
753
754 return AMD_XGBE_AN_EVENT;
755}
756
757static enum amd_xgbe_phy_an amd_xgbe_an_event(struct phy_device *phydev)
758{
759 enum amd_xgbe_phy_an new_state;
760 int ret;
761
762 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
763 if (ret < 0)
764 return AMD_XGBE_AN_ERROR;
765
766 new_state = AMD_XGBE_AN_EVENT;
767 if (ret & XGBE_AN_PG_RCV)
768 new_state = AMD_XGBE_AN_PAGE_RECEIVED;
769 else if (ret & XGBE_AN_INC_LINK)
770 new_state = AMD_XGBE_AN_INCOMPAT_LINK;
771 else if (ret & XGBE_AN_INT_CMPLT)
772 new_state = AMD_XGBE_AN_COMPLETE;
773
774 if (new_state != AMD_XGBE_AN_EVENT)
775 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
776
777 return new_state;
778}
779
780static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
781{
782 struct amd_xgbe_phy_priv *priv = phydev->priv;
783 enum amd_xgbe_phy_rx *state;
784 int ret;
785
786 state = (priv->mode == AMD_XGBE_MODE_KR) ? &priv->kr_state
787 : &priv->kx_state;
788
789 switch (*state) {
790 case AMD_XGBE_RX_BPA:
791 ret = amd_xgbe_an_rx_bpa(phydev, state);
792 break;
793
794 case AMD_XGBE_RX_XNP:
795 ret = amd_xgbe_an_rx_xnp(phydev, state);
796 break;
797
798 default:
799 ret = AMD_XGBE_AN_ERROR;
800 }
801
802 return ret;
803}
804
805static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
806{
807 return amd_xgbe_an_switch_mode(phydev);
808}
809
810static void amd_xgbe_an_state_machine(struct work_struct *work)
811{
812 struct amd_xgbe_phy_priv *priv = container_of(work,
813 struct amd_xgbe_phy_priv,
814 an_work);
815 struct phy_device *phydev = priv->phydev;
816 enum amd_xgbe_phy_an cur_state;
817 int sleep;
818
819 while (1) {
820 mutex_lock(&priv->an_mutex);
821
822 cur_state = priv->an_state;
823
824 switch (priv->an_state) {
825 case AMD_XGBE_AN_START:
826 priv->an_state = amd_xgbe_an_start(phydev);
827 break;
828
829 case AMD_XGBE_AN_EVENT:
830 priv->an_state = amd_xgbe_an_event(phydev);
831 break;
832
833 case AMD_XGBE_AN_PAGE_RECEIVED:
834 priv->an_state = amd_xgbe_an_page_received(phydev);
835 break;
836
837 case AMD_XGBE_AN_INCOMPAT_LINK:
838 priv->an_state = amd_xgbe_an_incompat_link(phydev);
839 break;
840
841 case AMD_XGBE_AN_COMPLETE:
842 case AMD_XGBE_AN_NO_LINK:
843 case AMD_XGBE_AN_EXIT:
844 goto exit_unlock;
845
846 default:
847 priv->an_state = AMD_XGBE_AN_ERROR;
848 }
849
850 if (priv->an_state == AMD_XGBE_AN_ERROR) {
851 netdev_err(phydev->attached_dev,
852 "error during auto-negotiation, state=%u\n",
853 cur_state);
854 goto exit_unlock;
855 }
856
857 sleep = (priv->an_state == AMD_XGBE_AN_EVENT) ? 1 : 0;
858
859 mutex_unlock(&priv->an_mutex);
860
861 if (sleep)
862 usleep_range(20, 50);
863 }
864
865exit_unlock:
866 priv->an_result = priv->an_state;
867 priv->an_state = AMD_XGBE_AN_READY;
868
869 mutex_unlock(&priv->an_mutex);
870}
871
872static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
873{
874 int count, ret;
875
876 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
877 if (ret < 0)
878 return ret;
879
880 ret |= MDIO_CTRL1_RESET;
881 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
882
883 count = 50;
884 do {
885 msleep(20);
886 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
887 if (ret < 0)
888 return ret;
889 } while ((ret & MDIO_CTRL1_RESET) && --count);
890
891 if (ret & MDIO_CTRL1_RESET)
892 return -ETIMEDOUT;
893
894 return 0;
895}
896
897static int amd_xgbe_phy_config_init(struct phy_device *phydev)
898{
899 /* Initialize supported features */
900 phydev->supported = SUPPORTED_Autoneg;
901 phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
902 phydev->supported |= SUPPORTED_Backplane;
903 phydev->supported |= SUPPORTED_1000baseKX_Full |
904 SUPPORTED_2500baseX_Full;
905 phydev->supported |= SUPPORTED_10000baseKR_Full |
906 SUPPORTED_10000baseR_FEC;
907 phydev->advertising = phydev->supported;
908
909 /* Turn off and clear interrupts */
910 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
911 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
912
913 return 0;
914}
915
916static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
917{
918 int ret;
919
920 /* Disable auto-negotiation */
921 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
922 if (ret < 0)
923 return ret;
924
925 ret &= ~MDIO_AN_CTRL1_ENABLE;
926 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
927
928 /* Validate/Set specified speed */
929 switch (phydev->speed) {
930 case SPEED_10000:
931 ret = amd_xgbe_phy_xgmii_mode(phydev);
932 break;
933
934 case SPEED_2500:
935 ret = amd_xgbe_phy_gmii_2500_mode(phydev);
936 break;
937
938 case SPEED_1000:
939 ret = amd_xgbe_phy_gmii_mode(phydev);
940 break;
941
942 default:
943 ret = -EINVAL;
944 }
945
946 if (ret < 0)
947 return ret;
948
949 /* Validate duplex mode */
950 if (phydev->duplex != DUPLEX_FULL)
951 return -EINVAL;
952
953 phydev->pause = 0;
954 phydev->asym_pause = 0;
955
956 return 0;
957}
958
959static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
960{
961 struct amd_xgbe_phy_priv *priv = phydev->priv;
962 u32 mmd_mask = phydev->c45_ids.devices_in_package;
963 int ret;
964
965 if (phydev->autoneg != AUTONEG_ENABLE)
966 return amd_xgbe_phy_setup_forced(phydev);
967
968 /* Make sure we have the AN MMD present */
969 if (!(mmd_mask & MDIO_DEVS_AN))
970 return -EINVAL;
971
972 /* Get the current speed mode */
973 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
974 if (ret < 0)
975 return ret;
976
977 /* Start/Restart the auto-negotiation state machine */
978 mutex_lock(&priv->an_mutex);
979 priv->an_result = AMD_XGBE_AN_READY;
980 priv->an_state = AMD_XGBE_AN_START;
981 priv->kr_state = AMD_XGBE_RX_READY;
982 priv->kx_state = AMD_XGBE_RX_READY;
983 mutex_unlock(&priv->an_mutex);
984
985 queue_work(priv->an_workqueue, &priv->an_work);
986
987 return 0;
988}
989
990static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
991{
992 struct amd_xgbe_phy_priv *priv = phydev->priv;
993 enum amd_xgbe_phy_an state;
994
995 mutex_lock(&priv->an_mutex);
996 state = priv->an_result;
997 mutex_unlock(&priv->an_mutex);
998
999 return (state == AMD_XGBE_AN_COMPLETE);
1000}
1001
1002static int amd_xgbe_phy_update_link(struct phy_device *phydev)
1003{
1004 struct amd_xgbe_phy_priv *priv = phydev->priv;
1005 enum amd_xgbe_phy_an state;
1006 unsigned int check_again, autoneg;
1007 int ret;
1008
1009 /* If we're doing auto-negotiation don't report link down */
1010 mutex_lock(&priv->an_mutex);
1011 state = priv->an_state;
1012 mutex_unlock(&priv->an_mutex);
1013
1014 if (state != AMD_XGBE_AN_READY) {
1015 phydev->link = 1;
1016 return 0;
1017 }
1018
1019 /* Since the device can be in the wrong mode when a link is
1020 * (re-)established (cable connected after the interface is
1021 * up, etc.), the link status may report no link. If there
1022 * is no link, try switching modes and checking the status
1023 * again.
1024 */
1025 check_again = 1;
1026again:
1027 /* Link status is latched low, so read once to clear
1028 * and then read again to get current state
1029 */
1030 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
1031 if (ret < 0)
1032 return ret;
1033
1034 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
1035 if (ret < 0)
1036 return ret;
1037
1038 phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
1039
1040 if (!phydev->link) {
1041 ret = amd_xgbe_phy_switch_mode(phydev);
1042 if (check_again) {
1043 check_again = 0;
1044 goto again;
1045 }
1046 }
1047
1048 autoneg = (phydev->link && !priv->link) ? 1 : 0;
1049 priv->link = phydev->link;
1050 if (autoneg) {
1051 /* Link is (back) up, re-start auto-negotiation */
1052 ret = amd_xgbe_phy_config_aneg(phydev);
1053 if (ret < 0)
1054 return ret;
1055 }
1056
1057 return 0;
1058}
1059
1060static int amd_xgbe_phy_read_status(struct phy_device *phydev)
1061{
1062 u32 mmd_mask = phydev->c45_ids.devices_in_package;
1063 int ret, mode, ad_ret, lp_ret;
1064
1065 ret = amd_xgbe_phy_update_link(phydev);
1066 if (ret)
1067 return ret;
1068
1069 mode = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1070 if (mode < 0)
1071 return mode;
1072 mode &= MDIO_PCS_CTRL2_TYPE;
1073
1074 if (phydev->autoneg == AUTONEG_ENABLE) {
1075 if (!(mmd_mask & MDIO_DEVS_AN))
1076 return -EINVAL;
1077
1078 if (!amd_xgbe_phy_aneg_done(phydev))
1079 return 0;
1080
1081 /* Compare Advertisement and Link Partner register 1 */
1082 ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
1083 if (ad_ret < 0)
1084 return ad_ret;
1085 lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
1086 if (lp_ret < 0)
1087 return lp_ret;
1088
1089 ad_ret &= lp_ret;
1090 phydev->pause = (ad_ret & 0x400) ? 1 : 0;
1091 phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
1092
1093 /* Compare Advertisement and Link Partner register 2 */
1094 ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
1095 MDIO_AN_ADVERTISE + 1);
1096 if (ad_ret < 0)
1097 return ad_ret;
1098 lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
1099 if (lp_ret < 0)
1100 return lp_ret;
1101
1102 ad_ret &= lp_ret;
1103 if (ad_ret & 0x80) {
1104 phydev->speed = SPEED_10000;
1105 if (mode != MDIO_PCS_CTRL2_10GBR) {
1106 ret = amd_xgbe_phy_xgmii_mode(phydev);
1107 if (ret < 0)
1108 return ret;
1109 }
1110 } else {
1111 phydev->speed = SPEED_1000;
1112 if (mode == MDIO_PCS_CTRL2_10GBR) {
1113 ret = amd_xgbe_phy_gmii_mode(phydev);
1114 if (ret < 0)
1115 return ret;
1116 }
1117 }
1118
1119 phydev->duplex = DUPLEX_FULL;
1120 } else {
1121 phydev->speed = (mode == MDIO_PCS_CTRL2_10GBR) ? SPEED_10000
1122 : SPEED_1000;
1123 phydev->duplex = DUPLEX_FULL;
1124 phydev->pause = 0;
1125 phydev->asym_pause = 0;
1126 }
1127
1128 return 0;
1129}
1130
1131static int amd_xgbe_phy_suspend(struct phy_device *phydev)
1132{
1133 int ret;
1134
1135 mutex_lock(&phydev->lock);
1136
1137 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1138 if (ret < 0)
1139 goto unlock;
1140
1141 ret |= MDIO_CTRL1_LPOWER;
1142 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1143
1144 ret = 0;
1145
1146unlock:
1147 mutex_unlock(&phydev->lock);
1148
1149 return ret;
1150}
1151
1152static int amd_xgbe_phy_resume(struct phy_device *phydev)
1153{
1154 int ret;
1155
1156 mutex_lock(&phydev->lock);
1157
1158 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1159 if (ret < 0)
1160 goto unlock;
1161
1162 ret &= ~MDIO_CTRL1_LPOWER;
1163 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1164
1165 ret = 0;
1166
1167unlock:
1168 mutex_unlock(&phydev->lock);
1169
1170 return ret;
1171}
1172
1173static int amd_xgbe_phy_probe(struct phy_device *phydev)
1174{
1175 struct amd_xgbe_phy_priv *priv;
1176 struct platform_device *pdev;
1177 struct device *dev;
1178 char *wq_name;
1179 int ret;
1180
1181 if (!phydev->dev.of_node)
1182 return -EINVAL;
1183
1184 pdev = of_find_device_by_node(phydev->dev.of_node);
1185 if (!pdev)
1186 return -EINVAL;
1187 dev = &pdev->dev;
1188
1189 wq_name = kasprintf(GFP_KERNEL, "%s-amd-xgbe-phy", phydev->bus->name);
1190 if (!wq_name) {
1191 ret = -ENOMEM;
1192 goto err_pdev;
1193 }
1194
1195 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1196 if (!priv) {
1197 ret = -ENOMEM;
1198 goto err_name;
1199 }
1200
1201 priv->pdev = pdev;
1202 priv->dev = dev;
1203 priv->phydev = phydev;
1204
1205 /* Get the device mmio areas */
1206 priv->rxtx_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1207 priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
1208 if (IS_ERR(priv->rxtx_regs)) {
1209 dev_err(dev, "rxtx ioremap failed\n");
1210 ret = PTR_ERR(priv->rxtx_regs);
1211 goto err_priv;
1212 }
1213
1214 priv->sir0_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1215 priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
1216 if (IS_ERR(priv->sir0_regs)) {
1217 dev_err(dev, "sir0 ioremap failed\n");
1218 ret = PTR_ERR(priv->sir0_regs);
1219 goto err_rxtx;
1220 }
1221
1222 priv->sir1_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1223 priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
1224 if (IS_ERR(priv->sir1_regs)) {
1225 dev_err(dev, "sir1 ioremap failed\n");
1226 ret = PTR_ERR(priv->sir1_regs);
1227 goto err_sir0;
1228 }
1229
1230 priv->link = 1;
1231
1232 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1233 if (ret < 0)
1234 goto err_sir1;
1235 if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
1236 priv->mode = AMD_XGBE_MODE_KR;
1237 else
1238 priv->mode = AMD_XGBE_MODE_KX;
1239
1240 mutex_init(&priv->an_mutex);
1241 INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
1242 priv->an_workqueue = create_singlethread_workqueue(wq_name);
1243 if (!priv->an_workqueue) {
1244 ret = -ENOMEM;
1245 goto err_sir1;
1246 }
1247
1248 phydev->priv = priv;
1249
1250 kfree(wq_name);
1251 of_dev_put(pdev);
1252
1253 return 0;
1254
1255err_sir1:
1256 devm_iounmap(dev, priv->sir1_regs);
1257 devm_release_mem_region(dev, priv->sir1_res->start,
1258 resource_size(priv->sir1_res));
1259
1260err_sir0:
1261 devm_iounmap(dev, priv->sir0_regs);
1262 devm_release_mem_region(dev, priv->sir0_res->start,
1263 resource_size(priv->sir0_res));
1264
1265err_rxtx:
1266 devm_iounmap(dev, priv->rxtx_regs);
1267 devm_release_mem_region(dev, priv->rxtx_res->start,
1268 resource_size(priv->rxtx_res));
1269
1270err_priv:
1271 devm_kfree(dev, priv);
1272
1273err_name:
1274 kfree(wq_name);
1275
1276err_pdev:
1277 of_dev_put(pdev);
1278
1279 return ret;
1280}
1281
1282static void amd_xgbe_phy_remove(struct phy_device *phydev)
1283{
1284 struct amd_xgbe_phy_priv *priv = phydev->priv;
1285 struct device *dev = priv->dev;
1286
1287 /* Stop any in process auto-negotiation */
1288 mutex_lock(&priv->an_mutex);
1289 priv->an_state = AMD_XGBE_AN_EXIT;
1290 mutex_unlock(&priv->an_mutex);
1291
1292 flush_workqueue(priv->an_workqueue);
1293 destroy_workqueue(priv->an_workqueue);
1294
1295 /* Release resources */
1296 devm_iounmap(dev, priv->sir1_regs);
1297 devm_release_mem_region(dev, priv->sir1_res->start,
1298 resource_size(priv->sir1_res));
1299
1300 devm_iounmap(dev, priv->sir0_regs);
1301 devm_release_mem_region(dev, priv->sir0_res->start,
1302 resource_size(priv->sir0_res));
1303
1304 devm_iounmap(dev, priv->rxtx_regs);
1305 devm_release_mem_region(dev, priv->rxtx_res->start,
1306 resource_size(priv->rxtx_res));
1307
1308 devm_kfree(dev, priv);
1309}
1310
1311static int amd_xgbe_match_phy_device(struct phy_device *phydev)
1312{
1313 return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
1314}
1315
1316static struct phy_driver amd_xgbe_phy_driver[] = {
1317 {
1318 .phy_id = XGBE_PHY_ID,
1319 .phy_id_mask = XGBE_PHY_MASK,
1320 .name = "AMD XGBE PHY",
1321 .features = 0,
1322 .probe = amd_xgbe_phy_probe,
1323 .remove = amd_xgbe_phy_remove,
1324 .soft_reset = amd_xgbe_phy_soft_reset,
1325 .config_init = amd_xgbe_phy_config_init,
1326 .suspend = amd_xgbe_phy_suspend,
1327 .resume = amd_xgbe_phy_resume,
1328 .config_aneg = amd_xgbe_phy_config_aneg,
1329 .aneg_done = amd_xgbe_phy_aneg_done,
1330 .read_status = amd_xgbe_phy_read_status,
1331 .match_phy_device = amd_xgbe_match_phy_device,
1332 .driver = {
1333 .owner = THIS_MODULE,
1334 },
1335 },
1336};
1337
1338static int __init amd_xgbe_phy_init(void)
1339{
1340 return phy_drivers_register(amd_xgbe_phy_driver,
1341 ARRAY_SIZE(amd_xgbe_phy_driver));
1342}
1343
1344static void __exit amd_xgbe_phy_exit(void)
1345{
1346 phy_drivers_unregister(amd_xgbe_phy_driver,
1347 ARRAY_SIZE(amd_xgbe_phy_driver));
1348}
1349
1350module_init(amd_xgbe_phy_init);
1351module_exit(amd_xgbe_phy_exit);
1352
1353static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
1354 { XGBE_PHY_ID, XGBE_PHY_MASK },
1355 { }
1356};
1357MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 643464d5a727..6c622aedbae1 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -144,41 +144,11 @@ static int at803x_resume(struct phy_device *phydev)
144 144
145static int at803x_config_init(struct phy_device *phydev) 145static int at803x_config_init(struct phy_device *phydev)
146{ 146{
147 int val;
148 int ret; 147 int ret;
149 u32 features;
150
151 features = SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_AUI |
152 SUPPORTED_FIBRE | SUPPORTED_BNC;
153
154 val = phy_read(phydev, MII_BMSR);
155 if (val < 0)
156 return val;
157
158 if (val & BMSR_ANEGCAPABLE)
159 features |= SUPPORTED_Autoneg;
160 if (val & BMSR_100FULL)
161 features |= SUPPORTED_100baseT_Full;
162 if (val & BMSR_100HALF)
163 features |= SUPPORTED_100baseT_Half;
164 if (val & BMSR_10FULL)
165 features |= SUPPORTED_10baseT_Full;
166 if (val & BMSR_10HALF)
167 features |= SUPPORTED_10baseT_Half;
168
169 if (val & BMSR_ESTATEN) {
170 val = phy_read(phydev, MII_ESTATUS);
171 if (val < 0)
172 return val;
173
174 if (val & ESTATUS_1000_TFULL)
175 features |= SUPPORTED_1000baseT_Full;
176 if (val & ESTATUS_1000_THALF)
177 features |= SUPPORTED_1000baseT_Half;
178 }
179 148
180 phydev->supported = features; 149 ret = genphy_config_init(phydev);
181 phydev->advertising = features; 150 if (ret < 0)
151 return ret;
182 152
183 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { 153 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
184 ret = phy_write(phydev, AT803X_DEBUG_ADDR, 154 ret = phy_write(phydev, AT803X_DEBUG_ADDR,
@@ -283,8 +253,7 @@ static int __init atheros_init(void)
283 253
284static void __exit atheros_exit(void) 254static void __exit atheros_exit(void)
285{ 255{
286 return phy_drivers_unregister(at803x_driver, 256 phy_drivers_unregister(at803x_driver, ARRAY_SIZE(at803x_driver));
287 ARRAY_SIZE(at803x_driver));
288} 257}
289 258
290module_init(atheros_init); 259module_init(atheros_init);
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index ba55adfc7aae..d60d875cb445 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -21,6 +21,7 @@
21#include <linux/phy_fixed.h> 21#include <linux/phy_fixed.h>
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/of.h>
24 25
25#define MII_REGS_NUM 29 26#define MII_REGS_NUM 29
26 27
@@ -31,7 +32,7 @@ struct fixed_mdio_bus {
31}; 32};
32 33
33struct fixed_phy { 34struct fixed_phy {
34 int id; 35 int addr;
35 u16 regs[MII_REGS_NUM]; 36 u16 regs[MII_REGS_NUM];
36 struct phy_device *phydev; 37 struct phy_device *phydev;
37 struct fixed_phy_status status; 38 struct fixed_phy_status status;
@@ -104,8 +105,8 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
104 if (fp->status.asym_pause) 105 if (fp->status.asym_pause)
105 lpa |= LPA_PAUSE_ASYM; 106 lpa |= LPA_PAUSE_ASYM;
106 107
107 fp->regs[MII_PHYSID1] = fp->id >> 16; 108 fp->regs[MII_PHYSID1] = 0;
108 fp->regs[MII_PHYSID2] = fp->id; 109 fp->regs[MII_PHYSID2] = 0;
109 110
110 fp->regs[MII_BMSR] = bmsr; 111 fp->regs[MII_BMSR] = bmsr;
111 fp->regs[MII_BMCR] = bmcr; 112 fp->regs[MII_BMCR] = bmcr;
@@ -115,7 +116,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
115 return 0; 116 return 0;
116} 117}
117 118
118static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num) 119static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
119{ 120{
120 struct fixed_mdio_bus *fmb = bus->priv; 121 struct fixed_mdio_bus *fmb = bus->priv;
121 struct fixed_phy *fp; 122 struct fixed_phy *fp;
@@ -124,7 +125,7 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
124 return -1; 125 return -1;
125 126
126 list_for_each_entry(fp, &fmb->phys, node) { 127 list_for_each_entry(fp, &fmb->phys, node) {
127 if (fp->id == phy_id) { 128 if (fp->addr == phy_addr) {
128 /* Issue callback if user registered it. */ 129 /* Issue callback if user registered it. */
129 if (fp->link_update) { 130 if (fp->link_update) {
130 fp->link_update(fp->phydev->attached_dev, 131 fp->link_update(fp->phydev->attached_dev,
@@ -138,7 +139,7 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
138 return 0xFFFF; 139 return 0xFFFF;
139} 140}
140 141
141static int fixed_mdio_write(struct mii_bus *bus, int phy_id, int reg_num, 142static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num,
142 u16 val) 143 u16 val)
143{ 144{
144 return 0; 145 return 0;
@@ -160,7 +161,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
160 return -EINVAL; 161 return -EINVAL;
161 162
162 list_for_each_entry(fp, &fmb->phys, node) { 163 list_for_each_entry(fp, &fmb->phys, node) {
163 if (fp->id == phydev->phy_id) { 164 if (fp->addr == phydev->addr) {
164 fp->link_update = link_update; 165 fp->link_update = link_update;
165 fp->phydev = phydev; 166 fp->phydev = phydev;
166 return 0; 167 return 0;
@@ -171,7 +172,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
171} 172}
172EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); 173EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
173 174
174int fixed_phy_add(unsigned int irq, int phy_id, 175int fixed_phy_add(unsigned int irq, int phy_addr,
175 struct fixed_phy_status *status) 176 struct fixed_phy_status *status)
176{ 177{
177 int ret; 178 int ret;
@@ -184,9 +185,9 @@ int fixed_phy_add(unsigned int irq, int phy_id,
184 185
185 memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); 186 memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM);
186 187
187 fmb->irqs[phy_id] = irq; 188 fmb->irqs[phy_addr] = irq;
188 189
189 fp->id = phy_id; 190 fp->addr = phy_addr;
190 fp->status = *status; 191 fp->status = *status;
191 192
192 ret = fixed_phy_update_regs(fp); 193 ret = fixed_phy_update_regs(fp);
@@ -203,6 +204,66 @@ err_regs:
203} 204}
204EXPORT_SYMBOL_GPL(fixed_phy_add); 205EXPORT_SYMBOL_GPL(fixed_phy_add);
205 206
207void fixed_phy_del(int phy_addr)
208{
209 struct fixed_mdio_bus *fmb = &platform_fmb;
210 struct fixed_phy *fp, *tmp;
211
212 list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
213 if (fp->addr == phy_addr) {
214 list_del(&fp->node);
215 kfree(fp);
216 return;
217 }
218 }
219}
220EXPORT_SYMBOL_GPL(fixed_phy_del);
221
222static int phy_fixed_addr;
223static DEFINE_SPINLOCK(phy_fixed_addr_lock);
224
225int fixed_phy_register(unsigned int irq,
226 struct fixed_phy_status *status,
227 struct device_node *np)
228{
229 struct fixed_mdio_bus *fmb = &platform_fmb;
230 struct phy_device *phy;
231 int phy_addr;
232 int ret;
233
234 /* Get the next available PHY address, up to PHY_MAX_ADDR */
235 spin_lock(&phy_fixed_addr_lock);
236 if (phy_fixed_addr == PHY_MAX_ADDR) {
237 spin_unlock(&phy_fixed_addr_lock);
238 return -ENOSPC;
239 }
240 phy_addr = phy_fixed_addr++;
241 spin_unlock(&phy_fixed_addr_lock);
242
243 ret = fixed_phy_add(PHY_POLL, phy_addr, status);
244 if (ret < 0)
245 return ret;
246
247 phy = get_phy_device(fmb->mii_bus, phy_addr, false);
248 if (!phy || IS_ERR(phy)) {
249 fixed_phy_del(phy_addr);
250 return -EINVAL;
251 }
252
253 of_node_get(np);
254 phy->dev.of_node = np;
255
256 ret = phy_device_register(phy);
257 if (ret) {
258 phy_device_free(phy);
259 of_node_put(np);
260 fixed_phy_del(phy_addr);
261 return ret;
262 }
263
264 return 0;
265}
266
206static int __init fixed_mdio_bus_init(void) 267static int __init fixed_mdio_bus_init(void)
207{ 268{
208 struct fixed_mdio_bus *fmb = &platform_fmb; 269 struct fixed_mdio_bus *fmb = &platform_fmb;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 76f54b32a120..2e58aa54484c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -69,6 +69,73 @@ struct mii_bus *mdiobus_alloc_size(size_t size)
69} 69}
70EXPORT_SYMBOL(mdiobus_alloc_size); 70EXPORT_SYMBOL(mdiobus_alloc_size);
71 71
72static void _devm_mdiobus_free(struct device *dev, void *res)
73{
74 mdiobus_free(*(struct mii_bus **)res);
75}
76
77static int devm_mdiobus_match(struct device *dev, void *res, void *data)
78{
79 struct mii_bus **r = res;
80
81 if (WARN_ON(!r || !*r))
82 return 0;
83
84 return *r == data;
85}
86
87/**
88 * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size()
89 * @dev: Device to allocate mii_bus for
90 * @sizeof_priv: Space to allocate for private structure.
91 *
92 * Managed mdiobus_alloc_size. mii_bus allocated with this function is
93 * automatically freed on driver detach.
94 *
95 * If an mii_bus allocated with this function needs to be freed separately,
96 * devm_mdiobus_free() must be used.
97 *
98 * RETURNS:
99 * Pointer to allocated mii_bus on success, NULL on failure.
100 */
101struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv)
102{
103 struct mii_bus **ptr, *bus;
104
105 ptr = devres_alloc(_devm_mdiobus_free, sizeof(*ptr), GFP_KERNEL);
106 if (!ptr)
107 return NULL;
108
109 /* use raw alloc_dr for kmalloc caller tracing */
110 bus = mdiobus_alloc_size(sizeof_priv);
111 if (bus) {
112 *ptr = bus;
113 devres_add(dev, ptr);
114 } else {
115 devres_free(ptr);
116 }
117
118 return bus;
119}
120EXPORT_SYMBOL_GPL(devm_mdiobus_alloc_size);
121
122/**
123 * devm_mdiobus_free - Resource-managed mdiobus_free()
124 * @dev: Device this mii_bus belongs to
125 * @bus: the mii_bus associated with the device
126 *
127 * Free mii_bus allocated with devm_mdiobus_alloc_size().
128 */
129void devm_mdiobus_free(struct device *dev, struct mii_bus *bus)
130{
131 int rc;
132
133 rc = devres_release(dev, _devm_mdiobus_free,
134 devm_mdiobus_match, bus);
135 WARN_ON(rc);
136}
137EXPORT_SYMBOL_GPL(devm_mdiobus_free);
138
72/** 139/**
73 * mdiobus_release - mii_bus device release callback 140 * mdiobus_release - mii_bus device release callback
74 * @d: the target struct device that contains the mii_bus 141 * @d: the target struct device that contains the mii_bus
@@ -233,6 +300,12 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
233 if (IS_ERR(phydev) || phydev == NULL) 300 if (IS_ERR(phydev) || phydev == NULL)
234 return phydev; 301 return phydev;
235 302
303 /*
304 * For DT, see if the auto-probed phy has a correspoding child
305 * in the bus node, and set the of_node pointer in this case.
306 */
307 of_mdiobus_link_phydev(bus, phydev);
308
236 err = phy_device_register(phydev); 309 err = phy_device_register(phydev);
237 if (err) { 310 if (err) {
238 phy_device_free(phydev); 311 phy_device_free(phydev);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index d849684231c1..bc7c7d2f75f2 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -283,6 +283,110 @@ static int ksz9021_config_init(struct phy_device *phydev)
283 return 0; 283 return 0;
284} 284}
285 285
286#define MII_KSZ9031RN_MMD_CTRL_REG 0x0d
287#define MII_KSZ9031RN_MMD_REGDATA_REG 0x0e
288#define OP_DATA 1
289#define KSZ9031_PS_TO_REG 60
290
291/* Extended registers */
292#define MII_KSZ9031RN_CONTROL_PAD_SKEW 4
293#define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5
294#define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6
295#define MII_KSZ9031RN_CLK_PAD_SKEW 8
296
297static int ksz9031_extended_write(struct phy_device *phydev,
298 u8 mode, u32 dev_addr, u32 regnum, u16 val)
299{
300 phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
301 phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
302 phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
303 return phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, val);
304}
305
306static int ksz9031_extended_read(struct phy_device *phydev,
307 u8 mode, u32 dev_addr, u32 regnum)
308{
309 phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
310 phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
311 phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
312 return phy_read(phydev, MII_KSZ9031RN_MMD_REGDATA_REG);
313}
314
315static int ksz9031_of_load_skew_values(struct phy_device *phydev,
316 struct device_node *of_node,
317 u16 reg, size_t field_sz,
318 char *field[], u8 numfields)
319{
320 int val[4] = {-1, -2, -3, -4};
321 int matches = 0;
322 u16 mask;
323 u16 maxval;
324 u16 newval;
325 int i;
326
327 for (i = 0; i < numfields; i++)
328 if (!of_property_read_u32(of_node, field[i], val + i))
329 matches++;
330
331 if (!matches)
332 return 0;
333
334 if (matches < numfields)
335 newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg);
336 else
337 newval = 0;
338
339 maxval = (field_sz == 4) ? 0xf : 0x1f;
340 for (i = 0; i < numfields; i++)
341 if (val[i] != -(i + 1)) {
342 mask = 0xffff;
343 mask ^= maxval << (field_sz * i);
344 newval = (newval & mask) |
345 (((val[i] / KSZ9031_PS_TO_REG) & maxval)
346 << (field_sz * i));
347 }
348
349 return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
350}
351
352static int ksz9031_config_init(struct phy_device *phydev)
353{
354 struct device *dev = &phydev->dev;
355 struct device_node *of_node = dev->of_node;
356 char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
357 char *rx_data_skews[4] = {
358 "rxd0-skew-ps", "rxd1-skew-ps",
359 "rxd2-skew-ps", "rxd3-skew-ps"
360 };
361 char *tx_data_skews[4] = {
362 "txd0-skew-ps", "txd1-skew-ps",
363 "txd2-skew-ps", "txd3-skew-ps"
364 };
365 char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
366
367 if (!of_node && dev->parent->of_node)
368 of_node = dev->parent->of_node;
369
370 if (of_node) {
371 ksz9031_of_load_skew_values(phydev, of_node,
372 MII_KSZ9031RN_CLK_PAD_SKEW, 5,
373 clk_skews, 2);
374
375 ksz9031_of_load_skew_values(phydev, of_node,
376 MII_KSZ9031RN_CONTROL_PAD_SKEW, 4,
377 control_skews, 2);
378
379 ksz9031_of_load_skew_values(phydev, of_node,
380 MII_KSZ9031RN_RX_DATA_PAD_SKEW, 4,
381 rx_data_skews, 4);
382
383 ksz9031_of_load_skew_values(phydev, of_node,
384 MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
385 tx_data_skews, 4);
386 }
387 return 0;
388}
389
286#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 390#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
287#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6) 391#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6)
288#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4) 392#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4)
@@ -469,7 +573,7 @@ static struct phy_driver ksphy_driver[] = {
469 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause 573 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
470 | SUPPORTED_Asym_Pause), 574 | SUPPORTED_Asym_Pause),
471 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 575 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
472 .config_init = kszphy_config_init, 576 .config_init = ksz9031_config_init,
473 .config_aneg = genphy_config_aneg, 577 .config_aneg = genphy_config_aneg,
474 .read_status = genphy_read_status, 578 .read_status = genphy_read_status,
475 .ack_interrupt = kszphy_ack_interrupt, 579 .ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4987a1c6dc52..35d753d22f78 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -33,6 +33,7 @@
33#include <linux/mdio.h> 33#include <linux/mdio.h>
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/uaccess.h> 35#include <linux/uaccess.h>
36#include <linux/of.h>
36 37
37#include <asm/irq.h> 38#include <asm/irq.h>
38 39
@@ -1067,14 +1068,11 @@ int genphy_soft_reset(struct phy_device *phydev)
1067} 1068}
1068EXPORT_SYMBOL(genphy_soft_reset); 1069EXPORT_SYMBOL(genphy_soft_reset);
1069 1070
1070static int genphy_config_init(struct phy_device *phydev) 1071int genphy_config_init(struct phy_device *phydev)
1071{ 1072{
1072 int val; 1073 int val;
1073 u32 features; 1074 u32 features;
1074 1075
1075 /* For now, I'll claim that the generic driver supports
1076 * all possible port types
1077 */
1078 features = (SUPPORTED_TP | SUPPORTED_MII 1076 features = (SUPPORTED_TP | SUPPORTED_MII
1079 | SUPPORTED_AUI | SUPPORTED_FIBRE | 1077 | SUPPORTED_AUI | SUPPORTED_FIBRE |
1080 SUPPORTED_BNC); 1078 SUPPORTED_BNC);
@@ -1107,8 +1105,8 @@ static int genphy_config_init(struct phy_device *phydev)
1107 features |= SUPPORTED_1000baseT_Half; 1105 features |= SUPPORTED_1000baseT_Half;
1108 } 1106 }
1109 1107
1110 phydev->supported = features; 1108 phydev->supported &= features;
1111 phydev->advertising = features; 1109 phydev->advertising &= features;
1112 1110
1113 return 0; 1111 return 0;
1114} 1112}
@@ -1118,6 +1116,7 @@ static int gen10g_soft_reset(struct phy_device *phydev)
1118 /* Do nothing for now */ 1116 /* Do nothing for now */
1119 return 0; 1117 return 0;
1120} 1118}
1119EXPORT_SYMBOL(genphy_config_init);
1121 1120
1122static int gen10g_config_init(struct phy_device *phydev) 1121static int gen10g_config_init(struct phy_device *phydev)
1123{ 1122{
@@ -1168,6 +1167,38 @@ static int gen10g_resume(struct phy_device *phydev)
1168 return 0; 1167 return 0;
1169} 1168}
1170 1169
1170static void of_set_phy_supported(struct phy_device *phydev)
1171{
1172 struct device_node *node = phydev->dev.of_node;
1173 u32 max_speed;
1174
1175 if (!IS_ENABLED(CONFIG_OF_MDIO))
1176 return;
1177
1178 if (!node)
1179 return;
1180
1181 if (!of_property_read_u32(node, "max-speed", &max_speed)) {
1182 /* The default values for phydev->supported are provided by the PHY
1183 * driver "features" member, we want to reset to sane defaults fist
1184 * before supporting higher speeds.
1185 */
1186 phydev->supported &= PHY_DEFAULT_FEATURES;
1187
1188 switch (max_speed) {
1189 default:
1190 return;
1191
1192 case SPEED_1000:
1193 phydev->supported |= PHY_1000BT_FEATURES;
1194 case SPEED_100:
1195 phydev->supported |= PHY_100BT_FEATURES;
1196 case SPEED_10:
1197 phydev->supported |= PHY_10BT_FEATURES;
1198 }
1199 }
1200}
1201
1171/** 1202/**
1172 * phy_probe - probe and init a PHY device 1203 * phy_probe - probe and init a PHY device
1173 * @dev: device to probe and init 1204 * @dev: device to probe and init
@@ -1202,7 +1233,8 @@ static int phy_probe(struct device *dev)
1202 * or both of these values 1233 * or both of these values
1203 */ 1234 */
1204 phydev->supported = phydrv->features; 1235 phydev->supported = phydrv->features;
1205 phydev->advertising = phydrv->features; 1236 of_set_phy_supported(phydev);
1237 phydev->advertising = phydev->supported;
1206 1238
1207 /* Set the state to READY by default */ 1239 /* Set the state to READY by default */
1208 phydev->state = PHY_READY; 1240 phydev->state = PHY_READY;
@@ -1295,7 +1327,9 @@ static struct phy_driver genphy_driver[] = {
1295 .name = "Generic PHY", 1327 .name = "Generic PHY",
1296 .soft_reset = genphy_soft_reset, 1328 .soft_reset = genphy_soft_reset,
1297 .config_init = genphy_config_init, 1329 .config_init = genphy_config_init,
1298 .features = 0, 1330 .features = PHY_GBIT_FEATURES | SUPPORTED_MII |
1331 SUPPORTED_AUI | SUPPORTED_FIBRE |
1332 SUPPORTED_BNC,
1299 .config_aneg = genphy_config_aneg, 1333 .config_aneg = genphy_config_aneg,
1300 .aneg_done = genphy_aneg_done, 1334 .aneg_done = genphy_aneg_done,
1301 .read_status = genphy_read_status, 1335 .read_status = genphy_read_status,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index fa1d69a38ccf..45483fdfbe06 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -64,65 +64,51 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
64 return err; 64 return err;
65} 65}
66 66
67/* RTL8201CP */ 67static struct phy_driver realtek_drvs[] = {
68static struct phy_driver rtl8201cp_driver = { 68 {
69 .phy_id = 0x00008201, 69 .phy_id = 0x00008201,
70 .name = "RTL8201CP Ethernet", 70 .name = "RTL8201CP Ethernet",
71 .phy_id_mask = 0x0000ffff, 71 .phy_id_mask = 0x0000ffff,
72 .features = PHY_BASIC_FEATURES, 72 .features = PHY_BASIC_FEATURES,
73 .flags = PHY_HAS_INTERRUPT, 73 .flags = PHY_HAS_INTERRUPT,
74 .config_aneg = &genphy_config_aneg, 74 .config_aneg = &genphy_config_aneg,
75 .read_status = &genphy_read_status, 75 .read_status = &genphy_read_status,
76 .driver = { .owner = THIS_MODULE,}, 76 .driver = { .owner = THIS_MODULE,},
77}; 77 }, {
78 78 .phy_id = 0x001cc912,
79/* RTL8211B */ 79 .name = "RTL8211B Gigabit Ethernet",
80static struct phy_driver rtl8211b_driver = { 80 .phy_id_mask = 0x001fffff,
81 .phy_id = 0x001cc912, 81 .features = PHY_GBIT_FEATURES,
82 .name = "RTL8211B Gigabit Ethernet", 82 .flags = PHY_HAS_INTERRUPT,
83 .phy_id_mask = 0x001fffff, 83 .config_aneg = &genphy_config_aneg,
84 .features = PHY_GBIT_FEATURES, 84 .read_status = &genphy_read_status,
85 .flags = PHY_HAS_INTERRUPT, 85 .ack_interrupt = &rtl821x_ack_interrupt,
86 .config_aneg = &genphy_config_aneg, 86 .config_intr = &rtl8211b_config_intr,
87 .read_status = &genphy_read_status, 87 .driver = { .owner = THIS_MODULE,},
88 .ack_interrupt = &rtl821x_ack_interrupt, 88 }, {
89 .config_intr = &rtl8211b_config_intr, 89 .phy_id = 0x001cc915,
90 .driver = { .owner = THIS_MODULE,}, 90 .name = "RTL8211E Gigabit Ethernet",
91}; 91 .phy_id_mask = 0x001fffff,
92 92 .features = PHY_GBIT_FEATURES,
93/* RTL8211E */ 93 .flags = PHY_HAS_INTERRUPT,
94static struct phy_driver rtl8211e_driver = { 94 .config_aneg = &genphy_config_aneg,
95 .phy_id = 0x001cc915, 95 .read_status = &genphy_read_status,
96 .name = "RTL8211E Gigabit Ethernet", 96 .ack_interrupt = &rtl821x_ack_interrupt,
97 .phy_id_mask = 0x001fffff, 97 .config_intr = &rtl8211e_config_intr,
98 .features = PHY_GBIT_FEATURES, 98 .suspend = genphy_suspend,
99 .flags = PHY_HAS_INTERRUPT, 99 .resume = genphy_resume,
100 .config_aneg = &genphy_config_aneg, 100 .driver = { .owner = THIS_MODULE,},
101 .read_status = &genphy_read_status, 101 },
102 .ack_interrupt = &rtl821x_ack_interrupt,
103 .config_intr = &rtl8211e_config_intr,
104 .suspend = genphy_suspend,
105 .resume = genphy_resume,
106 .driver = { .owner = THIS_MODULE,},
107}; 102};
108 103
109static int __init realtek_init(void) 104static int __init realtek_init(void)
110{ 105{
111 int ret; 106 return phy_drivers_register(realtek_drvs, ARRAY_SIZE(realtek_drvs));
112
113 ret = phy_driver_register(&rtl8201cp_driver);
114 if (ret < 0)
115 return -ENODEV;
116 ret = phy_driver_register(&rtl8211b_driver);
117 if (ret < 0)
118 return -ENODEV;
119 return phy_driver_register(&rtl8211e_driver);
120} 107}
121 108
122static void __exit realtek_exit(void) 109static void __exit realtek_exit(void)
123{ 110{
124 phy_driver_unregister(&rtl8211b_driver); 111 phy_drivers_unregister(realtek_drvs, ARRAY_SIZE(realtek_drvs));
125 phy_driver_unregister(&rtl8211e_driver);
126} 112}
127 113
128module_init(realtek_init); 114module_init(realtek_init);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 11f34813e23f..180c49479c42 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -249,8 +249,7 @@ static int __init smsc_init(void)
249 249
250static void __exit smsc_exit(void) 250static void __exit smsc_exit(void)
251{ 251{
252 return phy_drivers_unregister(smsc_phy_driver, 252 phy_drivers_unregister(smsc_phy_driver, ARRAY_SIZE(smsc_phy_driver));
253 ARRAY_SIZE(smsc_phy_driver));
254} 253}
255 254
256MODULE_DESCRIPTION("SMSC PHY driver"); 255MODULE_DESCRIPTION("SMSC PHY driver");
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 14372c65a7e8..5dc0935da99c 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -319,8 +319,7 @@ static int __init vsc82xx_init(void)
319 319
320static void __exit vsc82xx_exit(void) 320static void __exit vsc82xx_exit(void)
321{ 321{
322 return phy_drivers_unregister(vsc82xx_driver, 322 phy_drivers_unregister(vsc82xx_driver, ARRAY_SIZE(vsc82xx_driver));
323 ARRAY_SIZE(vsc82xx_driver));
324} 323}
325 324
326module_init(vsc82xx_init); 325module_init(vsc82xx_init);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index e3923ebb693f..91d6c1272fcf 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -757,7 +757,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
757 757
758 err = get_filter(argp, &code); 758 err = get_filter(argp, &code);
759 if (err >= 0) { 759 if (err >= 0) {
760 struct sock_fprog fprog = { 760 struct sock_fprog_kern fprog = {
761 .len = err, 761 .len = err,
762 .filter = code, 762 .filter = code,
763 }; 763 };
@@ -778,7 +778,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
778 778
779 err = get_filter(argp, &code); 779 err = get_filter(argp, &code);
780 if (err >= 0) { 780 if (err >= 0) {
781 struct sock_fprog fprog = { 781 struct sock_fprog_kern fprog = {
782 .len = err, 782 .len = err,
783 .filter = code, 783 .filter = code,
784 }; 784 };
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 01805319e1e0..1aff970be33e 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
281 nf_reset(skb); 281 nf_reset(skb);
282 282
283 skb->ip_summed = CHECKSUM_NONE; 283 skb->ip_summed = CHECKSUM_NONE;
284 ip_select_ident(skb, &rt->dst, NULL); 284 ip_select_ident(skb, NULL);
285 ip_send_check(iph); 285 ip_send_check(iph);
286 286
287 ip_local_out(skb); 287 ip_local_out(skb);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index a8497183ff8b..dac7a0d9bb46 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -494,7 +494,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
494 ndev->mtu = RIO_MAX_MSG_SIZE - 14; 494 ndev->mtu = RIO_MAX_MSG_SIZE - 14;
495 ndev->features = NETIF_F_LLTX; 495 ndev->features = NETIF_F_LLTX;
496 SET_NETDEV_DEV(ndev, &mport->dev); 496 SET_NETDEV_DEV(ndev, &mport->dev);
497 SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops); 497 ndev->ethtool_ops = &rionet_ethtool_ops;
498 498
499 spin_lock_init(&rnet->lock); 499 spin_lock_init(&rnet->lock);
500 spin_lock_init(&rnet->tx_lock); 500 spin_lock_init(&rnet->tx_lock);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index ce4989be86d9..b4958c7ffa84 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -968,7 +968,7 @@ static void team_port_disable(struct team *team,
968static void __team_compute_features(struct team *team) 968static void __team_compute_features(struct team *team)
969{ 969{
970 struct team_port *port; 970 struct team_port *port;
971 u32 vlan_features = TEAM_VLAN_FEATURES; 971 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
972 unsigned short max_hard_header_len = ETH_HLEN; 972 unsigned short max_hard_header_len = ETH_HLEN;
973 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; 973 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
974 974
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index dbde3412ee5e..a58dfebb5512 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -49,7 +49,7 @@ struct lb_port_mapping {
49struct lb_priv_ex { 49struct lb_priv_ex {
50 struct team *team; 50 struct team *team;
51 struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE]; 51 struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
52 struct sock_fprog *orig_fprog; 52 struct sock_fprog_kern *orig_fprog;
53 struct { 53 struct {
54 unsigned int refresh_interval; /* in tenths of second */ 54 unsigned int refresh_interval; /* in tenths of second */
55 struct delayed_work refresh_dw; 55 struct delayed_work refresh_dw;
@@ -241,15 +241,15 @@ static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
241 return 0; 241 return 0;
242} 242}
243 243
244static int __fprog_create(struct sock_fprog **pfprog, u32 data_len, 244static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
245 const void *data) 245 const void *data)
246{ 246{
247 struct sock_fprog *fprog; 247 struct sock_fprog_kern *fprog;
248 struct sock_filter *filter = (struct sock_filter *) data; 248 struct sock_filter *filter = (struct sock_filter *) data;
249 249
250 if (data_len % sizeof(struct sock_filter)) 250 if (data_len % sizeof(struct sock_filter))
251 return -EINVAL; 251 return -EINVAL;
252 fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL); 252 fprog = kmalloc(sizeof(*fprog), GFP_KERNEL);
253 if (!fprog) 253 if (!fprog)
254 return -ENOMEM; 254 return -ENOMEM;
255 fprog->filter = kmemdup(filter, data_len, GFP_KERNEL); 255 fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
@@ -262,7 +262,7 @@ static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
262 return 0; 262 return 0;
263} 263}
264 264
265static void __fprog_destroy(struct sock_fprog *fprog) 265static void __fprog_destroy(struct sock_fprog_kern *fprog)
266{ 266{
267 kfree(fprog->filter); 267 kfree(fprog->filter);
268 kfree(fprog); 268 kfree(fprog);
@@ -273,7 +273,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
273 struct lb_priv *lb_priv = get_lb_priv(team); 273 struct lb_priv *lb_priv = get_lb_priv(team);
274 struct sk_filter *fp = NULL; 274 struct sk_filter *fp = NULL;
275 struct sk_filter *orig_fp; 275 struct sk_filter *orig_fp;
276 struct sock_fprog *fprog = NULL; 276 struct sock_fprog_kern *fprog = NULL;
277 int err; 277 int err;
278 278
279 if (ctx->data.bin_val.len) { 279 if (ctx->data.bin_val.len) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ee328ba101e7..98bad1fb1bfb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -498,12 +498,12 @@ static void tun_detach_all(struct net_device *dev)
498 for (i = 0; i < n; i++) { 498 for (i = 0; i < n; i++) {
499 tfile = rtnl_dereference(tun->tfiles[i]); 499 tfile = rtnl_dereference(tun->tfiles[i]);
500 BUG_ON(!tfile); 500 BUG_ON(!tfile);
501 wake_up_all(&tfile->wq.wait); 501 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
502 RCU_INIT_POINTER(tfile->tun, NULL); 502 RCU_INIT_POINTER(tfile->tun, NULL);
503 --tun->numqueues; 503 --tun->numqueues;
504 } 504 }
505 list_for_each_entry(tfile, &tun->disabled, next) { 505 list_for_each_entry(tfile, &tun->disabled, next) {
506 wake_up_all(&tfile->wq.wait); 506 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
507 RCU_INIT_POINTER(tfile->tun, NULL); 507 RCU_INIT_POINTER(tfile->tun, NULL);
508 } 508 }
509 BUG_ON(tun->numqueues != 0); 509 BUG_ON(tun->numqueues != 0);
@@ -807,8 +807,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
807 /* Notify and wake up reader process */ 807 /* Notify and wake up reader process */
808 if (tfile->flags & TUN_FASYNC) 808 if (tfile->flags & TUN_FASYNC)
809 kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 809 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
810 wake_up_interruptible_poll(&tfile->wq.wait, POLLIN | 810 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
811 POLLRDNORM | POLLRDBAND);
812 811
813 rcu_read_unlock(); 812 rcu_read_unlock();
814 return NETDEV_TX_OK; 813 return NETDEV_TX_OK;
@@ -965,7 +964,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
965 964
966 tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 965 tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
967 966
968 poll_wait(file, &tfile->wq.wait, wait); 967 poll_wait(file, sk_sleep(sk), wait);
969 968
970 if (!skb_queue_empty(&sk->sk_receive_queue)) 969 if (!skb_queue_empty(&sk->sk_receive_queue))
971 mask |= POLLIN | POLLRDNORM; 970 mask |= POLLIN | POLLRDNORM;
@@ -1330,47 +1329,26 @@ done:
1330static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 1329static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1331 const struct iovec *iv, ssize_t len, int noblock) 1330 const struct iovec *iv, ssize_t len, int noblock)
1332{ 1331{
1333 DECLARE_WAITQUEUE(wait, current);
1334 struct sk_buff *skb; 1332 struct sk_buff *skb;
1335 ssize_t ret = 0; 1333 ssize_t ret = 0;
1334 int peeked, err, off = 0;
1336 1335
1337 tun_debug(KERN_INFO, tun, "tun_do_read\n"); 1336 tun_debug(KERN_INFO, tun, "tun_do_read\n");
1338 1337
1339 if (unlikely(!noblock)) 1338 if (!len)
1340 add_wait_queue(&tfile->wq.wait, &wait); 1339 return ret;
1341 while (len) {
1342 if (unlikely(!noblock))
1343 current->state = TASK_INTERRUPTIBLE;
1344 1340
1345 /* Read frames from the queue */ 1341 if (tun->dev->reg_state != NETREG_REGISTERED)
1346 if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) { 1342 return -EIO;
1347 if (noblock) {
1348 ret = -EAGAIN;
1349 break;
1350 }
1351 if (signal_pending(current)) {
1352 ret = -ERESTARTSYS;
1353 break;
1354 }
1355 if (tun->dev->reg_state != NETREG_REGISTERED) {
1356 ret = -EIO;
1357 break;
1358 }
1359
1360 /* Nothing to read, let's sleep */
1361 schedule();
1362 continue;
1363 }
1364 1343
1344 /* Read frames from queue */
1345 skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
1346 &peeked, &off, &err);
1347 if (skb) {
1365 ret = tun_put_user(tun, tfile, skb, iv, len); 1348 ret = tun_put_user(tun, tfile, skb, iv, len);
1366 kfree_skb(skb); 1349 kfree_skb(skb);
1367 break; 1350 } else
1368 } 1351 ret = err;
1369
1370 if (unlikely(!noblock)) {
1371 current->state = TASK_RUNNING;
1372 remove_wait_queue(&tfile->wq.wait, &wait);
1373 }
1374 1352
1375 return ret; 1353 return ret;
1376} 1354}
@@ -2199,8 +2177,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
2199 tfile->flags = 0; 2177 tfile->flags = 0;
2200 tfile->ifindex = 0; 2178 tfile->ifindex = 0;
2201 2179
2202 rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
2203 init_waitqueue_head(&tfile->wq.wait); 2180 init_waitqueue_head(&tfile->wq.wait);
2181 RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
2204 2182
2205 tfile->socket.file = file; 2183 tfile->socket.file = file;
2206 tfile->socket.ops = &tun_socket_ops; 2184 tfile->socket.ops = &tun_socket_ops;
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 630caf48f63a..8cfc3bb0c6a6 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -793,7 +793,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
793 793
794 netdev->netdev_ops = &catc_netdev_ops; 794 netdev->netdev_ops = &catc_netdev_ops;
795 netdev->watchdog_timeo = TX_TIMEOUT; 795 netdev->watchdog_timeo = TX_TIMEOUT;
796 SET_ETHTOOL_OPS(netdev, &ops); 796 netdev->ethtool_ops = &ops;
797 797
798 catc->usbdev = usbdev; 798 catc->usbdev = usbdev;
799 catc->netdev = netdev; 799 catc->netdev = netdev;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 2e025ddcef21..5ee7a1dbc023 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -24,13 +24,21 @@
24#include <net/ipv6.h> 24#include <net/ipv6.h>
25#include <net/addrconf.h> 25#include <net/addrconf.h>
26 26
27/* alternative VLAN for IP session 0 if not untagged */
28#define MBIM_IPS0_VID 4094
29
27/* driver specific data - must match cdc_ncm usage */ 30/* driver specific data - must match cdc_ncm usage */
28struct cdc_mbim_state { 31struct cdc_mbim_state {
29 struct cdc_ncm_ctx *ctx; 32 struct cdc_ncm_ctx *ctx;
30 atomic_t pmcount; 33 atomic_t pmcount;
31 struct usb_driver *subdriver; 34 struct usb_driver *subdriver;
32 struct usb_interface *control; 35 unsigned long _unused;
33 struct usb_interface *data; 36 unsigned long flags;
37};
38
39/* flags for the cdc_mbim_state.flags field */
40enum cdc_mbim_flags {
41 FLAG_IPS0_VLAN = 1 << 0, /* IP session 0 is tagged */
34}; 42};
35 43
36/* using a counter to merge subdriver requests with our own into a combined state */ 44/* using a counter to merge subdriver requests with our own into a combined state */
@@ -62,16 +70,91 @@ static int cdc_mbim_wdm_manage_power(struct usb_interface *intf, int status)
62 return cdc_mbim_manage_power(dev, status); 70 return cdc_mbim_manage_power(dev, status);
63} 71}
64 72
73static int cdc_mbim_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
74{
75 struct usbnet *dev = netdev_priv(netdev);
76 struct cdc_mbim_state *info = (void *)&dev->data;
77
78 /* creation of this VLAN is a request to tag IP session 0 */
79 if (vid == MBIM_IPS0_VID)
80 info->flags |= FLAG_IPS0_VLAN;
81 else
82 if (vid >= 512) /* we don't map these to MBIM session */
83 return -EINVAL;
84 return 0;
85}
86
87static int cdc_mbim_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
88{
89 struct usbnet *dev = netdev_priv(netdev);
90 struct cdc_mbim_state *info = (void *)&dev->data;
91
92 /* this is a request for an untagged IP session 0 */
93 if (vid == MBIM_IPS0_VID)
94 info->flags &= ~FLAG_IPS0_VLAN;
95 return 0;
96}
97
98static const struct net_device_ops cdc_mbim_netdev_ops = {
99 .ndo_open = usbnet_open,
100 .ndo_stop = usbnet_stop,
101 .ndo_start_xmit = usbnet_start_xmit,
102 .ndo_tx_timeout = usbnet_tx_timeout,
103 .ndo_change_mtu = usbnet_change_mtu,
104 .ndo_set_mac_address = eth_mac_addr,
105 .ndo_validate_addr = eth_validate_addr,
106 .ndo_vlan_rx_add_vid = cdc_mbim_rx_add_vid,
107 .ndo_vlan_rx_kill_vid = cdc_mbim_rx_kill_vid,
108};
109
110/* Change the control interface altsetting and update the .driver_info
111 * pointer if the matching entry after changing class codes points to
112 * a different struct
113 */
114static int cdc_mbim_set_ctrlalt(struct usbnet *dev, struct usb_interface *intf, u8 alt)
115{
116 struct usb_driver *driver = to_usb_driver(intf->dev.driver);
117 const struct usb_device_id *id;
118 struct driver_info *info;
119 int ret;
120
121 ret = usb_set_interface(dev->udev,
122 intf->cur_altsetting->desc.bInterfaceNumber,
123 alt);
124 if (ret)
125 return ret;
126
127 id = usb_match_id(intf, driver->id_table);
128 if (!id)
129 return -ENODEV;
130
131 info = (struct driver_info *)id->driver_info;
132 if (info != dev->driver_info) {
133 dev_dbg(&intf->dev, "driver_info updated to '%s'\n",
134 info->description);
135 dev->driver_info = info;
136 }
137 return 0;
138}
65 139
66static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf) 140static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
67{ 141{
68 struct cdc_ncm_ctx *ctx; 142 struct cdc_ncm_ctx *ctx;
69 struct usb_driver *subdriver = ERR_PTR(-ENODEV); 143 struct usb_driver *subdriver = ERR_PTR(-ENODEV);
70 int ret = -ENODEV; 144 int ret = -ENODEV;
71 u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf); 145 u8 data_altsetting = 1;
72 struct cdc_mbim_state *info = (void *)&dev->data; 146 struct cdc_mbim_state *info = (void *)&dev->data;
73 147
74 /* Probably NCM, defer for cdc_ncm_bind */ 148 /* should we change control altsetting on a NCM/MBIM function? */
149 if (cdc_ncm_select_altsetting(intf) == CDC_NCM_COMM_ALTSETTING_MBIM) {
150 data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM;
151 ret = cdc_mbim_set_ctrlalt(dev, intf, CDC_NCM_COMM_ALTSETTING_MBIM);
152 if (ret)
153 goto err;
154 ret = -ENODEV;
155 }
156
157 /* we will hit this for NCM/MBIM functions if prefer_mbim is false */
75 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 158 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
76 goto err; 159 goto err;
77 160
@@ -101,7 +184,10 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
101 dev->net->flags |= IFF_NOARP; 184 dev->net->flags |= IFF_NOARP;
102 185
103 /* no need to put the VLAN tci in the packet headers */ 186 /* no need to put the VLAN tci in the packet headers */
104 dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX; 187 dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER;
188
189 /* monitor VLAN additions and removals */
190 dev->net->netdev_ops = &cdc_mbim_netdev_ops;
105err: 191err:
106 return ret; 192 return ret;
107} 193}
@@ -164,12 +250,24 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
164 skb_pull(skb, ETH_HLEN); 250 skb_pull(skb, ETH_HLEN);
165 } 251 }
166 252
253 /* Is IP session <0> tagged too? */
254 if (info->flags & FLAG_IPS0_VLAN) {
255 /* drop all untagged packets */
256 if (!tci)
257 goto error;
258 /* map MBIM_IPS0_VID to IPS<0> */
259 if (tci == MBIM_IPS0_VID)
260 tci = 0;
261 }
262
167 /* mapping VLANs to MBIM sessions: 263 /* mapping VLANs to MBIM sessions:
168 * no tag => IPS session <0> 264 * no tag => IPS session <0> if !FLAG_IPS0_VLAN
169 * 1 - 255 => IPS session <vlanid> 265 * 1 - 255 => IPS session <vlanid>
170 * 256 - 511 => DSS session <vlanid - 256> 266 * 256 - 511 => DSS session <vlanid - 256>
171 * 512 - 4095 => unsupported, drop 267 * 512 - 4093 => unsupported, drop
268 * 4094 => IPS session <0> if FLAG_IPS0_VLAN
172 */ 269 */
270
173 switch (tci & 0x0f00) { 271 switch (tci & 0x0f00) {
174 case 0x0000: /* VLAN ID 0 - 255 */ 272 case 0x0000: /* VLAN ID 0 - 255 */
175 if (!is_ip) 273 if (!is_ip)
@@ -178,6 +276,8 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
178 c[3] = tci; 276 c[3] = tci;
179 break; 277 break;
180 case 0x0100: /* VLAN ID 256 - 511 */ 278 case 0x0100: /* VLAN ID 256 - 511 */
279 if (is_ip)
280 goto error;
181 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN); 281 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
182 c = (u8 *)&sign; 282 c = (u8 *)&sign;
183 c[3] = tci; 283 c[3] = tci;
@@ -223,8 +323,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
223 /* need to send the NA on the VLAN dev, if any */ 323 /* need to send the NA on the VLAN dev, if any */
224 rcu_read_lock(); 324 rcu_read_lock();
225 if (tci) { 325 if (tci) {
226 netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q), 326 netdev = __vlan_find_dev_deep_rcu(dev->net, htons(ETH_P_8021Q),
227 tci); 327 tci);
228 if (!netdev) { 328 if (!netdev) {
229 rcu_read_unlock(); 329 rcu_read_unlock();
230 return; 330 return;
@@ -268,7 +368,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
268 __be16 proto = htons(ETH_P_802_3); 368 __be16 proto = htons(ETH_P_802_3);
269 struct sk_buff *skb = NULL; 369 struct sk_buff *skb = NULL;
270 370
271 if (tci < 256) { /* IPS session? */ 371 if (tci < 256 || tci == MBIM_IPS0_VID) { /* IPS session? */
272 if (len < sizeof(struct iphdr)) 372 if (len < sizeof(struct iphdr))
273 goto err; 373 goto err;
274 374
@@ -320,6 +420,7 @@ static int cdc_mbim_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
320 struct usb_cdc_ncm_dpe16 *dpe16; 420 struct usb_cdc_ncm_dpe16 *dpe16;
321 int ndpoffset; 421 int ndpoffset;
322 int loopcount = 50; /* arbitrary max preventing infinite loop */ 422 int loopcount = 50; /* arbitrary max preventing infinite loop */
423 u32 payload = 0;
323 u8 *c; 424 u8 *c;
324 u16 tci; 425 u16 tci;
325 426
@@ -338,6 +439,9 @@ next_ndp:
338 case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN): 439 case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN):
339 c = (u8 *)&ndp16->dwSignature; 440 c = (u8 *)&ndp16->dwSignature;
340 tci = c[3]; 441 tci = c[3];
442 /* tag IPS<0> packets too if MBIM_IPS0_VID exists */
443 if (!tci && info->flags & FLAG_IPS0_VLAN)
444 tci = MBIM_IPS0_VID;
341 break; 445 break;
342 case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN): 446 case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN):
343 c = (u8 *)&ndp16->dwSignature; 447 c = (u8 *)&ndp16->dwSignature;
@@ -379,6 +483,7 @@ next_ndp:
379 if (!skb) 483 if (!skb)
380 goto error; 484 goto error;
381 usbnet_skb_return(dev, skb); 485 usbnet_skb_return(dev, skb);
486 payload += len; /* count payload bytes in this NTB */
382 } 487 }
383 } 488 }
384err_ndp: 489err_ndp:
@@ -387,6 +492,10 @@ err_ndp:
387 if (ndpoffset && loopcount--) 492 if (ndpoffset && loopcount--)
388 goto next_ndp; 493 goto next_ndp;
389 494
495 /* update stats */
496 ctx->rx_overhead += skb_in->len - payload;
497 ctx->rx_ntbs++;
498
390 return 1; 499 return 1;
391error: 500error:
392 return 0; 501 return 0;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 9a2bd11943eb..80a844e0ae03 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -65,19 +65,384 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
65static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); 65static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
66static struct usb_driver cdc_ncm_driver; 66static struct usb_driver cdc_ncm_driver;
67 67
68static int cdc_ncm_setup(struct usbnet *dev) 68struct cdc_ncm_stats {
69 char stat_string[ETH_GSTRING_LEN];
70 int sizeof_stat;
71 int stat_offset;
72};
73
74#define CDC_NCM_STAT(str, m) { \
75 .stat_string = str, \
76 .sizeof_stat = sizeof(((struct cdc_ncm_ctx *)0)->m), \
77 .stat_offset = offsetof(struct cdc_ncm_ctx, m) }
78#define CDC_NCM_SIMPLE_STAT(m) CDC_NCM_STAT(__stringify(m), m)
79
80static const struct cdc_ncm_stats cdc_ncm_gstrings_stats[] = {
81 CDC_NCM_SIMPLE_STAT(tx_reason_ntb_full),
82 CDC_NCM_SIMPLE_STAT(tx_reason_ndp_full),
83 CDC_NCM_SIMPLE_STAT(tx_reason_timeout),
84 CDC_NCM_SIMPLE_STAT(tx_reason_max_datagram),
85 CDC_NCM_SIMPLE_STAT(tx_overhead),
86 CDC_NCM_SIMPLE_STAT(tx_ntbs),
87 CDC_NCM_SIMPLE_STAT(rx_overhead),
88 CDC_NCM_SIMPLE_STAT(rx_ntbs),
89};
90
91static int cdc_ncm_get_sset_count(struct net_device __always_unused *netdev, int sset)
92{
93 switch (sset) {
94 case ETH_SS_STATS:
95 return ARRAY_SIZE(cdc_ncm_gstrings_stats);
96 default:
97 return -EOPNOTSUPP;
98 }
99}
100
101static void cdc_ncm_get_ethtool_stats(struct net_device *netdev,
102 struct ethtool_stats __always_unused *stats,
103 u64 *data)
104{
105 struct usbnet *dev = netdev_priv(netdev);
106 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
107 int i;
108 char *p = NULL;
109
110 for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) {
111 p = (char *)ctx + cdc_ncm_gstrings_stats[i].stat_offset;
112 data[i] = (cdc_ncm_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
113 }
114}
115
116static void cdc_ncm_get_strings(struct net_device __always_unused *netdev, u32 stringset, u8 *data)
117{
118 u8 *p = data;
119 int i;
120
121 switch (stringset) {
122 case ETH_SS_STATS:
123 for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) {
124 memcpy(p, cdc_ncm_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
125 p += ETH_GSTRING_LEN;
126 }
127 }
128}
129
130static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx);
131
132static const struct ethtool_ops cdc_ncm_ethtool_ops = {
133 .get_settings = usbnet_get_settings,
134 .set_settings = usbnet_set_settings,
135 .get_link = usbnet_get_link,
136 .nway_reset = usbnet_nway_reset,
137 .get_drvinfo = usbnet_get_drvinfo,
138 .get_msglevel = usbnet_get_msglevel,
139 .set_msglevel = usbnet_set_msglevel,
140 .get_ts_info = ethtool_op_get_ts_info,
141 .get_sset_count = cdc_ncm_get_sset_count,
142 .get_strings = cdc_ncm_get_strings,
143 .get_ethtool_stats = cdc_ncm_get_ethtool_stats,
144};
145
146static u32 cdc_ncm_check_rx_max(struct usbnet *dev, u32 new_rx)
147{
148 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
149 u32 val, max, min;
150
151 /* clamp new_rx to sane values */
152 min = USB_CDC_NCM_NTB_MIN_IN_SIZE;
153 max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_RX, le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize));
154
155 /* dwNtbInMaxSize spec violation? Use MIN size for both limits */
156 if (max < min) {
157 dev_warn(&dev->intf->dev, "dwNtbInMaxSize=%u is too small. Using %u\n",
158 le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize), min);
159 max = min;
160 }
161
162 val = clamp_t(u32, new_rx, min, max);
163 if (val != new_rx)
164 dev_dbg(&dev->intf->dev, "rx_max must be in the [%u, %u] range\n", min, max);
165
166 return val;
167}
168
169static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
170{
171 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
172 u32 val, max, min;
173
174 /* clamp new_tx to sane values */
175 min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
176 max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
177
178 /* some devices set dwNtbOutMaxSize too low for the above default */
179 min = min(min, max);
180
181 val = clamp_t(u32, new_tx, min, max);
182 if (val != new_tx)
183 dev_dbg(&dev->intf->dev, "tx_max must be in the [%u, %u] range\n", min, max);
184
185 return val;
186}
187
188static ssize_t cdc_ncm_show_min_tx_pkt(struct device *d, struct device_attribute *attr, char *buf)
189{
190 struct usbnet *dev = netdev_priv(to_net_dev(d));
191 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
192
193 return sprintf(buf, "%u\n", ctx->min_tx_pkt);
194}
195
196static ssize_t cdc_ncm_show_rx_max(struct device *d, struct device_attribute *attr, char *buf)
197{
198 struct usbnet *dev = netdev_priv(to_net_dev(d));
199 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
200
201 return sprintf(buf, "%u\n", ctx->rx_max);
202}
203
204static ssize_t cdc_ncm_show_tx_max(struct device *d, struct device_attribute *attr, char *buf)
205{
206 struct usbnet *dev = netdev_priv(to_net_dev(d));
207 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
208
209 return sprintf(buf, "%u\n", ctx->tx_max);
210}
211
212static ssize_t cdc_ncm_show_tx_timer_usecs(struct device *d, struct device_attribute *attr, char *buf)
213{
214 struct usbnet *dev = netdev_priv(to_net_dev(d));
215 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
216
217 return sprintf(buf, "%u\n", ctx->timer_interval / (u32)NSEC_PER_USEC);
218}
219
220static ssize_t cdc_ncm_store_min_tx_pkt(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
221{
222 struct usbnet *dev = netdev_priv(to_net_dev(d));
223 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
224 unsigned long val;
225
226 /* no need to restrict values - anything from 0 to infinity is OK */
227 if (kstrtoul(buf, 0, &val))
228 return -EINVAL;
229
230 ctx->min_tx_pkt = val;
231 return len;
232}
233
234static ssize_t cdc_ncm_store_rx_max(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
235{
236 struct usbnet *dev = netdev_priv(to_net_dev(d));
237 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
238 unsigned long val;
239
240 if (kstrtoul(buf, 0, &val) || cdc_ncm_check_rx_max(dev, val) != val)
241 return -EINVAL;
242
243 cdc_ncm_update_rxtx_max(dev, val, ctx->tx_max);
244 return len;
245}
246
247static ssize_t cdc_ncm_store_tx_max(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
69{ 248{
249 struct usbnet *dev = netdev_priv(to_net_dev(d));
70 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; 250 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
251 unsigned long val;
252
253 if (kstrtoul(buf, 0, &val) || cdc_ncm_check_tx_max(dev, val) != val)
254 return -EINVAL;
255
256 cdc_ncm_update_rxtx_max(dev, ctx->rx_max, val);
257 return len;
258}
259
260static ssize_t cdc_ncm_store_tx_timer_usecs(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
261{
262 struct usbnet *dev = netdev_priv(to_net_dev(d));
263 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
264 ssize_t ret;
265 unsigned long val;
266
267 ret = kstrtoul(buf, 0, &val);
268 if (ret)
269 return ret;
270 if (val && (val < CDC_NCM_TIMER_INTERVAL_MIN || val > CDC_NCM_TIMER_INTERVAL_MAX))
271 return -EINVAL;
272
273 spin_lock_bh(&ctx->mtx);
274 ctx->timer_interval = val * NSEC_PER_USEC;
275 if (!ctx->timer_interval)
276 ctx->tx_timer_pending = 0;
277 spin_unlock_bh(&ctx->mtx);
278 return len;
279}
280
281static DEVICE_ATTR(min_tx_pkt, S_IRUGO | S_IWUSR, cdc_ncm_show_min_tx_pkt, cdc_ncm_store_min_tx_pkt);
282static DEVICE_ATTR(rx_max, S_IRUGO | S_IWUSR, cdc_ncm_show_rx_max, cdc_ncm_store_rx_max);
283static DEVICE_ATTR(tx_max, S_IRUGO | S_IWUSR, cdc_ncm_show_tx_max, cdc_ncm_store_tx_max);
284static DEVICE_ATTR(tx_timer_usecs, S_IRUGO | S_IWUSR, cdc_ncm_show_tx_timer_usecs, cdc_ncm_store_tx_timer_usecs);
285
286#define NCM_PARM_ATTR(name, format, tocpu) \
287static ssize_t cdc_ncm_show_##name(struct device *d, struct device_attribute *attr, char *buf) \
288{ \
289 struct usbnet *dev = netdev_priv(to_net_dev(d)); \
290 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; \
291 return sprintf(buf, format "\n", tocpu(ctx->ncm_parm.name)); \
292} \
293static DEVICE_ATTR(name, S_IRUGO, cdc_ncm_show_##name, NULL)
294
295NCM_PARM_ATTR(bmNtbFormatsSupported, "0x%04x", le16_to_cpu);
296NCM_PARM_ATTR(dwNtbInMaxSize, "%u", le32_to_cpu);
297NCM_PARM_ATTR(wNdpInDivisor, "%u", le16_to_cpu);
298NCM_PARM_ATTR(wNdpInPayloadRemainder, "%u", le16_to_cpu);
299NCM_PARM_ATTR(wNdpInAlignment, "%u", le16_to_cpu);
300NCM_PARM_ATTR(dwNtbOutMaxSize, "%u", le32_to_cpu);
301NCM_PARM_ATTR(wNdpOutDivisor, "%u", le16_to_cpu);
302NCM_PARM_ATTR(wNdpOutPayloadRemainder, "%u", le16_to_cpu);
303NCM_PARM_ATTR(wNdpOutAlignment, "%u", le16_to_cpu);
304NCM_PARM_ATTR(wNtbOutMaxDatagrams, "%u", le16_to_cpu);
305
306static struct attribute *cdc_ncm_sysfs_attrs[] = {
307 &dev_attr_min_tx_pkt.attr,
308 &dev_attr_rx_max.attr,
309 &dev_attr_tx_max.attr,
310 &dev_attr_tx_timer_usecs.attr,
311 &dev_attr_bmNtbFormatsSupported.attr,
312 &dev_attr_dwNtbInMaxSize.attr,
313 &dev_attr_wNdpInDivisor.attr,
314 &dev_attr_wNdpInPayloadRemainder.attr,
315 &dev_attr_wNdpInAlignment.attr,
316 &dev_attr_dwNtbOutMaxSize.attr,
317 &dev_attr_wNdpOutDivisor.attr,
318 &dev_attr_wNdpOutPayloadRemainder.attr,
319 &dev_attr_wNdpOutAlignment.attr,
320 &dev_attr_wNtbOutMaxDatagrams.attr,
321 NULL,
322};
323
324static struct attribute_group cdc_ncm_sysfs_attr_group = {
325 .name = "cdc_ncm",
326 .attrs = cdc_ncm_sysfs_attrs,
327};
328
329/* handle rx_max and tx_max changes */
330static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
331{
332 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
333 u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
71 u32 val; 334 u32 val;
72 u8 flags;
73 u8 iface_no;
74 int err;
75 int eth_hlen;
76 u16 mbim_mtu;
77 u16 ntb_fmt_supported;
78 __le16 max_datagram_size;
79 335
80 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; 336 val = cdc_ncm_check_rx_max(dev, new_rx);
337
338 /* inform device about NTB input size changes */
339 if (val != ctx->rx_max) {
340 __le32 dwNtbInMaxSize = cpu_to_le32(val);
341
342 dev_info(&dev->intf->dev, "setting rx_max = %u\n", val);
343
344 /* tell device to use new size */
345 if (usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
346 USB_TYPE_CLASS | USB_DIR_OUT
347 | USB_RECIP_INTERFACE,
348 0, iface_no, &dwNtbInMaxSize, 4) < 0)
349 dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
350 else
351 ctx->rx_max = val;
352 }
353
354 /* usbnet use these values for sizing rx queues */
355 if (dev->rx_urb_size != ctx->rx_max) {
356 dev->rx_urb_size = ctx->rx_max;
357 if (netif_running(dev->net))
358 usbnet_unlink_rx_urbs(dev);
359 }
360
361 val = cdc_ncm_check_tx_max(dev, new_tx);
362 if (val != ctx->tx_max)
363 dev_info(&dev->intf->dev, "setting tx_max = %u\n", val);
364
365 /* Adding a pad byte here if necessary simplifies the handling
366 * in cdc_ncm_fill_tx_frame, making tx_max always represent
367 * the real skb max size.
368 *
369 * We cannot use dev->maxpacket here because this is called from
370 * .bind which is called before usbnet sets up dev->maxpacket
371 */
372 if (val != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
373 val % usb_maxpacket(dev->udev, dev->out, 1) == 0)
374 val++;
375
376 /* we might need to flush any pending tx buffers if running */
377 if (netif_running(dev->net) && val > ctx->tx_max) {
378 netif_tx_lock_bh(dev->net);
379 usbnet_start_xmit(NULL, dev->net);
380 /* make sure tx_curr_skb is reallocated if it was empty */
381 if (ctx->tx_curr_skb) {
382 dev_kfree_skb_any(ctx->tx_curr_skb);
383 ctx->tx_curr_skb = NULL;
384 }
385 ctx->tx_max = val;
386 netif_tx_unlock_bh(dev->net);
387 } else {
388 ctx->tx_max = val;
389 }
390
391 dev->hard_mtu = ctx->tx_max;
392
393 /* max qlen depend on hard_mtu and rx_urb_size */
394 usbnet_update_max_qlen(dev);
395
396 /* never pad more than 3 full USB packets per transfer */
397 ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out, 1),
398 CDC_NCM_MIN_TX_PKT, ctx->tx_max);
399}
400
401/* helpers for NCM and MBIM differences */
402static u8 cdc_ncm_flags(struct usbnet *dev)
403{
404 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
405
406 if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc)
407 return ctx->mbim_desc->bmNetworkCapabilities;
408 if (ctx->func_desc)
409 return ctx->func_desc->bmNetworkCapabilities;
410 return 0;
411}
412
413static int cdc_ncm_eth_hlen(struct usbnet *dev)
414{
415 if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting))
416 return 0;
417 return ETH_HLEN;
418}
419
420static u32 cdc_ncm_min_dgram_size(struct usbnet *dev)
421{
422 if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting))
423 return CDC_MBIM_MIN_DATAGRAM_SIZE;
424 return CDC_NCM_MIN_DATAGRAM_SIZE;
425}
426
427static u32 cdc_ncm_max_dgram_size(struct usbnet *dev)
428{
429 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
430
431 if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc)
432 return le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
433 if (ctx->ether_desc)
434 return le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
435 return CDC_NCM_MAX_DATAGRAM_SIZE;
436}
437
438/* initial one-time device setup. MUST be called with the data interface
439 * in altsetting 0
440 */
441static int cdc_ncm_init(struct usbnet *dev)
442{
443 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
444 u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
445 int err;
81 446
82 err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS, 447 err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
83 USB_TYPE_CLASS | USB_DIR_IN 448 USB_TYPE_CLASS | USB_DIR_IN
@@ -89,7 +454,36 @@ static int cdc_ncm_setup(struct usbnet *dev)
89 return err; /* GET_NTB_PARAMETERS is required */ 454 return err; /* GET_NTB_PARAMETERS is required */
90 } 455 }
91 456
92 /* read correct set of parameters according to device mode */ 457 /* set CRC Mode */
458 if (cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_CRC_MODE) {
459 dev_dbg(&dev->intf->dev, "Setting CRC mode off\n");
460 err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
461 USB_TYPE_CLASS | USB_DIR_OUT
462 | USB_RECIP_INTERFACE,
463 USB_CDC_NCM_CRC_NOT_APPENDED,
464 iface_no, NULL, 0);
465 if (err < 0)
466 dev_err(&dev->intf->dev, "SET_CRC_MODE failed\n");
467 }
468
469 /* set NTB format, if both formats are supported.
470 *
471 * "The host shall only send this command while the NCM Data
472 * Interface is in alternate setting 0."
473 */
474 if (le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported) &
475 USB_CDC_NCM_NTB32_SUPPORTED) {
476 dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
477 err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
478 USB_TYPE_CLASS | USB_DIR_OUT
479 | USB_RECIP_INTERFACE,
480 USB_CDC_NCM_NTB16_FORMAT,
481 iface_no, NULL, 0);
482 if (err < 0)
483 dev_err(&dev->intf->dev, "SET_NTB_FORMAT failed\n");
484 }
485
486 /* set initial device values */
93 ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize); 487 ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
94 ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize); 488 ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
95 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); 489 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
@@ -97,72 +491,79 @@ static int cdc_ncm_setup(struct usbnet *dev)
97 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); 491 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
98 /* devices prior to NCM Errata shall set this field to zero */ 492 /* devices prior to NCM Errata shall set this field to zero */
99 ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); 493 ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
100 ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
101
102 /* there are some minor differences in NCM and MBIM defaults */
103 if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
104 if (!ctx->mbim_desc)
105 return -EINVAL;
106 eth_hlen = 0;
107 flags = ctx->mbim_desc->bmNetworkCapabilities;
108 ctx->max_datagram_size = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
109 if (ctx->max_datagram_size < CDC_MBIM_MIN_DATAGRAM_SIZE)
110 ctx->max_datagram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
111 } else {
112 if (!ctx->func_desc)
113 return -EINVAL;
114 eth_hlen = ETH_HLEN;
115 flags = ctx->func_desc->bmNetworkCapabilities;
116 ctx->max_datagram_size = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
117 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
118 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
119 }
120
121 /* common absolute max for NCM and MBIM */
122 if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
123 ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
124 494
125 dev_dbg(&dev->intf->dev, 495 dev_dbg(&dev->intf->dev,
126 "dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n", 496 "dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
127 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, 497 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
128 ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags); 498 ctx->tx_ndp_modulus, ctx->tx_max_datagrams, cdc_ncm_flags(dev));
129 499
130 /* max count of tx datagrams */ 500 /* max count of tx datagrams */
131 if ((ctx->tx_max_datagrams == 0) || 501 if ((ctx->tx_max_datagrams == 0) ||
132 (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX)) 502 (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
133 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; 503 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
134 504
135 /* verify maximum size of received NTB in bytes */ 505 /* set up maximum NDP size */
136 if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) { 506 ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
137 dev_dbg(&dev->intf->dev, "Using min receive length=%d\n",
138 USB_CDC_NCM_NTB_MIN_IN_SIZE);
139 ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
140 }
141 507
142 if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) { 508 /* initial coalescing timer interval */
143 dev_dbg(&dev->intf->dev, "Using default maximum receive length=%d\n", 509 ctx->timer_interval = CDC_NCM_TIMER_INTERVAL_USEC * NSEC_PER_USEC;
144 CDC_NCM_NTB_MAX_SIZE_RX);
145 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
146 }
147 510
148 /* inform device about NTB input size changes */ 511 return 0;
149 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { 512}
150 __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
151 513
152 err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE, 514/* set a new max datagram size */
153 USB_TYPE_CLASS | USB_DIR_OUT 515static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
154 | USB_RECIP_INTERFACE, 516{
155 0, iface_no, &dwNtbInMaxSize, 4); 517 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
156 if (err < 0) 518 u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
157 dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n"); 519 __le16 max_datagram_size;
520 u16 mbim_mtu;
521 int err;
522
523 /* set default based on descriptors */
524 ctx->max_datagram_size = clamp_t(u32, new_size,
525 cdc_ncm_min_dgram_size(dev),
526 CDC_NCM_MAX_DATAGRAM_SIZE);
527
528 /* inform the device about the selected Max Datagram Size? */
529 if (!(cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
530 goto out;
531
532 /* read current mtu value from device */
533 err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
534 USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
535 0, iface_no, &max_datagram_size, 2);
536 if (err < 0) {
537 dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
538 goto out;
158 } 539 }
159 540
160 /* verify maximum size of transmitted NTB in bytes */ 541 if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
161 if (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX) { 542 goto out;
162 dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n", 543
163 CDC_NCM_NTB_MAX_SIZE_TX); 544 max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
164 ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX; 545 err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
546 USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
547 0, iface_no, &max_datagram_size, 2);
548 if (err < 0)
549 dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
550
551out:
552 /* set MTU to max supported by the device if necessary */
553 dev->net->mtu = min_t(int, dev->net->mtu, ctx->max_datagram_size - cdc_ncm_eth_hlen(dev));
554
555 /* do not exceed operater preferred MTU */
556 if (ctx->mbim_extended_desc) {
557 mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
558 if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
559 dev->net->mtu = mbim_mtu;
165 } 560 }
561}
562
563static void cdc_ncm_fix_modulus(struct usbnet *dev)
564{
565 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
566 u32 val;
166 567
167 /* 568 /*
168 * verify that the structure alignment is: 569 * verify that the structure alignment is:
@@ -199,68 +600,31 @@ static int cdc_ncm_setup(struct usbnet *dev)
199 } 600 }
200 601
201 /* adjust TX-remainder according to NCM specification. */ 602 /* adjust TX-remainder according to NCM specification. */
202 ctx->tx_remainder = ((ctx->tx_remainder - eth_hlen) & 603 ctx->tx_remainder = ((ctx->tx_remainder - cdc_ncm_eth_hlen(dev)) &
203 (ctx->tx_modulus - 1)); 604 (ctx->tx_modulus - 1));
605}
204 606
205 /* additional configuration */ 607static int cdc_ncm_setup(struct usbnet *dev)
206 608{
207 /* set CRC Mode */ 609 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
208 if (flags & USB_CDC_NCM_NCAP_CRC_MODE) { 610 u32 def_rx, def_tx;
209 err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
210 USB_TYPE_CLASS | USB_DIR_OUT
211 | USB_RECIP_INTERFACE,
212 USB_CDC_NCM_CRC_NOT_APPENDED,
213 iface_no, NULL, 0);
214 if (err < 0)
215 dev_dbg(&dev->intf->dev, "Setting CRC mode off failed\n");
216 }
217
218 /* set NTB format, if both formats are supported */
219 if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
220 err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
221 USB_TYPE_CLASS | USB_DIR_OUT
222 | USB_RECIP_INTERFACE,
223 USB_CDC_NCM_NTB16_FORMAT,
224 iface_no, NULL, 0);
225 if (err < 0)
226 dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit failed\n");
227 }
228
229 /* inform the device about the selected Max Datagram Size */
230 if (!(flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
231 goto out;
232
233 /* read current mtu value from device */
234 err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
235 USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
236 0, iface_no, &max_datagram_size, 2);
237 if (err < 0) {
238 dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
239 goto out;
240 }
241
242 if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
243 goto out;
244 611
245 max_datagram_size = cpu_to_le16(ctx->max_datagram_size); 612 /* be conservative when selecting intial buffer size to
246 err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE, 613 * increase the number of hosts this will work for
247 USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, 614 */
248 0, iface_no, &max_datagram_size, 2); 615 def_rx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_RX,
249 if (err < 0) 616 le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize));
250 dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n"); 617 def_tx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_TX,
618 le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
251 619
252out: 620 /* clamp rx_max and tx_max and inform device */
253 /* set MTU to max supported by the device if necessary */ 621 cdc_ncm_update_rxtx_max(dev, def_rx, def_tx);
254 if (dev->net->mtu > ctx->max_datagram_size - eth_hlen)
255 dev->net->mtu = ctx->max_datagram_size - eth_hlen;
256 622
257 /* do not exceed operater preferred MTU */ 623 /* sanitize the modulus and remainder values */
258 if (ctx->mbim_extended_desc) { 624 cdc_ncm_fix_modulus(dev);
259 mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
260 if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
261 dev->net->mtu = mbim_mtu;
262 }
263 625
626 /* set max datagram size */
627 cdc_ncm_set_dgram_size(dev, cdc_ncm_max_dgram_size(dev));
264 return 0; 628 return 0;
265} 629}
266 630
@@ -424,10 +788,21 @@ advance:
424 } 788 }
425 789
426 /* check if we got everything */ 790 /* check if we got everything */
427 if (!ctx->data || (!ctx->mbim_desc && !ctx->ether_desc)) { 791 if (!ctx->data) {
428 dev_dbg(&intf->dev, "CDC descriptors missing\n"); 792 dev_dbg(&intf->dev, "CDC Union missing and no IAD found\n");
429 goto error; 793 goto error;
430 } 794 }
795 if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) {
796 if (!ctx->mbim_desc) {
797 dev_dbg(&intf->dev, "MBIM functional descriptor missing\n");
798 goto error;
799 }
800 } else {
801 if (!ctx->ether_desc || !ctx->func_desc) {
802 dev_dbg(&intf->dev, "NCM or ECM functional descriptors missing\n");
803 goto error;
804 }
805 }
431 806
432 /* claim data interface, if different from control */ 807 /* claim data interface, if different from control */
433 if (ctx->data != ctx->control) { 808 if (ctx->data != ctx->control) {
@@ -447,8 +822,8 @@ advance:
447 goto error2; 822 goto error2;
448 } 823 }
449 824
450 /* initialize data interface */ 825 /* initialize basic device settings */
451 if (cdc_ncm_setup(dev)) 826 if (cdc_ncm_init(dev))
452 goto error2; 827 goto error2;
453 828
454 /* configure data interface */ 829 /* configure data interface */
@@ -477,18 +852,14 @@ advance:
477 dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr); 852 dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
478 } 853 }
479 854
480 /* usbnet use these values for sizing tx/rx queues */ 855 /* finish setting up the device specific data */
481 dev->hard_mtu = ctx->tx_max; 856 cdc_ncm_setup(dev);
482 dev->rx_urb_size = ctx->rx_max;
483 857
484 /* cdc_ncm_setup will override dwNtbOutMaxSize if it is 858 /* override ethtool_ops */
485 * outside the sane range. Adding a pad byte here if necessary 859 dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
486 * simplifies the handling in cdc_ncm_fill_tx_frame, making 860
487 * tx_max always represent the real skb max size. 861 /* add our sysfs attrs */
488 */ 862 dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group;
489 if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
490 ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
491 ctx->tx_max++;
492 863
493 return 0; 864 return 0;
494 865
@@ -541,10 +912,10 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
541} 912}
542EXPORT_SYMBOL_GPL(cdc_ncm_unbind); 913EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
543 914
544/* Select the MBIM altsetting iff it is preferred and available, 915/* Return the number of the MBIM control interface altsetting iff it
545 * returning the number of the corresponding data interface altsetting 916 * is preferred and available,
546 */ 917 */
547u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf) 918u8 cdc_ncm_select_altsetting(struct usb_interface *intf)
548{ 919{
549 struct usb_host_interface *alt; 920 struct usb_host_interface *alt;
550 921
@@ -563,15 +934,15 @@ u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf)
563 * the rules given in section 6 (USB Device Model) of this 934 * the rules given in section 6 (USB Device Model) of this
564 * specification." 935 * specification."
565 */ 936 */
566 if (prefer_mbim && intf->num_altsetting == 2) { 937 if (intf->num_altsetting < 2)
938 return intf->cur_altsetting->desc.bAlternateSetting;
939
940 if (prefer_mbim) {
567 alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM); 941 alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM);
568 if (alt && cdc_ncm_comm_intf_is_mbim(alt) && 942 if (alt && cdc_ncm_comm_intf_is_mbim(alt))
569 !usb_set_interface(dev->udev, 943 return CDC_NCM_COMM_ALTSETTING_MBIM;
570 intf->cur_altsetting->desc.bInterfaceNumber,
571 CDC_NCM_COMM_ALTSETTING_MBIM))
572 return CDC_NCM_DATA_ALTSETTING_MBIM;
573 } 944 }
574 return CDC_NCM_DATA_ALTSETTING_NCM; 945 return CDC_NCM_COMM_ALTSETTING_NCM;
575} 946}
576EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting); 947EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
577 948
@@ -580,12 +951,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
580 int ret; 951 int ret;
581 952
582 /* MBIM backwards compatible function? */ 953 /* MBIM backwards compatible function? */
583 cdc_ncm_select_altsetting(dev, intf); 954 if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
584 if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
585 return -ENODEV; 955 return -ENODEV;
586 956
587 /* NCM data altsetting is always 1 */ 957 /* The NCM data altsetting is fixed */
588 ret = cdc_ncm_bind_common(dev, intf, 1); 958 ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM);
589 959
590 /* 960 /*
591 * We should get an event when network connection is "connected" or 961 * We should get an event when network connection is "connected" or
@@ -628,7 +998,7 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
628 cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max); 998 cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
629 999
630 /* verify that there is room for the NDP and the datagram (reserve) */ 1000 /* verify that there is room for the NDP and the datagram (reserve) */
631 if ((ctx->tx_max - skb->len - reserve) < CDC_NCM_NDP_SIZE) 1001 if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size)
632 return NULL; 1002 return NULL;
633 1003
634 /* link to it */ 1004 /* link to it */
@@ -638,7 +1008,7 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
638 nth16->wNdpIndex = cpu_to_le16(skb->len); 1008 nth16->wNdpIndex = cpu_to_le16(skb->len);
639 1009
640 /* push a new empty NDP */ 1010 /* push a new empty NDP */
641 ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, CDC_NCM_NDP_SIZE), 0, CDC_NCM_NDP_SIZE); 1011 ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
642 ndp16->dwSignature = sign; 1012 ndp16->dwSignature = sign;
643 ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16)); 1013 ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
644 return ndp16; 1014 return ndp16;
@@ -683,6 +1053,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
683 1053
684 /* count total number of frames in this NTB */ 1054 /* count total number of frames in this NTB */
685 ctx->tx_curr_frame_num = 0; 1055 ctx->tx_curr_frame_num = 0;
1056
1057 /* recent payload counter for this skb_out */
1058 ctx->tx_curr_frame_payload = 0;
686 } 1059 }
687 1060
688 for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) { 1061 for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) {
@@ -720,6 +1093,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
720 ctx->tx_rem_sign = sign; 1093 ctx->tx_rem_sign = sign;
721 skb = NULL; 1094 skb = NULL;
722 ready2send = 1; 1095 ready2send = 1;
1096 ctx->tx_reason_ntb_full++; /* count reason for transmitting */
723 } 1097 }
724 break; 1098 break;
725 } 1099 }
@@ -733,12 +1107,14 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
733 ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len); 1107 ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
734 ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16)); 1108 ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
735 memcpy(skb_put(skb_out, skb->len), skb->data, skb->len); 1109 memcpy(skb_put(skb_out, skb->len), skb->data, skb->len);
1110 ctx->tx_curr_frame_payload += skb->len; /* count real tx payload data */
736 dev_kfree_skb_any(skb); 1111 dev_kfree_skb_any(skb);
737 skb = NULL; 1112 skb = NULL;
738 1113
739 /* send now if this NDP is full */ 1114 /* send now if this NDP is full */
740 if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) { 1115 if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) {
741 ready2send = 1; 1116 ready2send = 1;
1117 ctx->tx_reason_ndp_full++; /* count reason for transmitting */
742 break; 1118 break;
743 } 1119 }
744 } 1120 }
@@ -758,7 +1134,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
758 ctx->tx_curr_skb = skb_out; 1134 ctx->tx_curr_skb = skb_out;
759 goto exit_no_skb; 1135 goto exit_no_skb;
760 1136
761 } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) { 1137 } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0) && (ctx->timer_interval > 0)) {
762 /* wait for more frames */ 1138 /* wait for more frames */
763 /* push variables */ 1139 /* push variables */
764 ctx->tx_curr_skb = skb_out; 1140 ctx->tx_curr_skb = skb_out;
@@ -768,11 +1144,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
768 goto exit_no_skb; 1144 goto exit_no_skb;
769 1145
770 } else { 1146 } else {
1147 if (n == ctx->tx_max_datagrams)
1148 ctx->tx_reason_max_datagram++; /* count reason for transmitting */
771 /* frame goes out */ 1149 /* frame goes out */
772 /* variables will be reset at next call */ 1150 /* variables will be reset at next call */
773 } 1151 }
774 1152
775 /* If collected data size is less or equal CDC_NCM_MIN_TX_PKT 1153 /* If collected data size is less or equal ctx->min_tx_pkt
776 * bytes, we send buffers as it is. If we get more data, it 1154 * bytes, we send buffers as it is. If we get more data, it
777 * would be more efficient for USB HS mobile device with DMA 1155 * would be more efficient for USB HS mobile device with DMA
778 * engine to receive a full size NTB, than canceling DMA 1156 * engine to receive a full size NTB, than canceling DMA
@@ -782,7 +1160,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
782 * a ZLP after full sized NTBs. 1160 * a ZLP after full sized NTBs.
783 */ 1161 */
784 if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && 1162 if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
785 skb_out->len > CDC_NCM_MIN_TX_PKT) 1163 skb_out->len > ctx->min_tx_pkt)
786 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, 1164 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
787 ctx->tx_max - skb_out->len); 1165 ctx->tx_max - skb_out->len);
788 else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) 1166 else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
@@ -795,11 +1173,22 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
795 /* return skb */ 1173 /* return skb */
796 ctx->tx_curr_skb = NULL; 1174 ctx->tx_curr_skb = NULL;
797 dev->net->stats.tx_packets += ctx->tx_curr_frame_num; 1175 dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
1176
1177 /* keep private stats: framing overhead and number of NTBs */
1178 ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
1179 ctx->tx_ntbs++;
1180
1181 /* usbnet has already counted all the framing overhead.
1182 * Adjust the stats so that the tx_bytes counter show real
1183 * payload data instead.
1184 */
1185 dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
1186
798 return skb_out; 1187 return skb_out;
799 1188
800exit_no_skb: 1189exit_no_skb:
801 /* Start timer, if there is a remaining skb */ 1190 /* Start timer, if there is a remaining non-empty skb */
802 if (ctx->tx_curr_skb != NULL) 1191 if (ctx->tx_curr_skb != NULL && n > 0)
803 cdc_ncm_tx_timeout_start(ctx); 1192 cdc_ncm_tx_timeout_start(ctx);
804 return NULL; 1193 return NULL;
805} 1194}
@@ -810,7 +1199,7 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
810 /* start timer, if not already started */ 1199 /* start timer, if not already started */
811 if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop))) 1200 if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop)))
812 hrtimer_start(&ctx->tx_timer, 1201 hrtimer_start(&ctx->tx_timer,
813 ktime_set(0, CDC_NCM_TIMER_INTERVAL), 1202 ktime_set(0, ctx->timer_interval),
814 HRTIMER_MODE_REL); 1203 HRTIMER_MODE_REL);
815} 1204}
816 1205
@@ -835,6 +1224,7 @@ static void cdc_ncm_txpath_bh(unsigned long param)
835 cdc_ncm_tx_timeout_start(ctx); 1224 cdc_ncm_tx_timeout_start(ctx);
836 spin_unlock_bh(&ctx->mtx); 1225 spin_unlock_bh(&ctx->mtx);
837 } else if (dev->net != NULL) { 1226 } else if (dev->net != NULL) {
1227 ctx->tx_reason_timeout++; /* count reason for transmitting */
838 spin_unlock_bh(&ctx->mtx); 1228 spin_unlock_bh(&ctx->mtx);
839 netif_tx_lock_bh(dev->net); 1229 netif_tx_lock_bh(dev->net);
840 usbnet_start_xmit(NULL, dev->net); 1230 usbnet_start_xmit(NULL, dev->net);
@@ -970,6 +1360,7 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
970 struct usb_cdc_ncm_dpe16 *dpe16; 1360 struct usb_cdc_ncm_dpe16 *dpe16;
971 int ndpoffset; 1361 int ndpoffset;
972 int loopcount = 50; /* arbitrary max preventing infinite loop */ 1362 int loopcount = 50; /* arbitrary max preventing infinite loop */
1363 u32 payload = 0;
973 1364
974 ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in); 1365 ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
975 if (ndpoffset < 0) 1366 if (ndpoffset < 0)
@@ -1015,13 +1406,13 @@ next_ndp:
1015 break; 1406 break;
1016 1407
1017 } else { 1408 } else {
1018 skb = skb_clone(skb_in, GFP_ATOMIC); 1409 /* create a fresh copy to reduce truesize */
1410 skb = netdev_alloc_skb_ip_align(dev->net, len);
1019 if (!skb) 1411 if (!skb)
1020 goto error; 1412 goto error;
1021 skb->len = len; 1413 memcpy(skb_put(skb, len), skb_in->data + offset, len);
1022 skb->data = ((u8 *)skb_in->data) + offset;
1023 skb_set_tail_pointer(skb, len);
1024 usbnet_skb_return(dev, skb); 1414 usbnet_skb_return(dev, skb);
1415 payload += len; /* count payload bytes in this NTB */
1025 } 1416 }
1026 } 1417 }
1027err_ndp: 1418err_ndp:
@@ -1030,6 +1421,10 @@ err_ndp:
1030 if (ndpoffset && loopcount--) 1421 if (ndpoffset && loopcount--)
1031 goto next_ndp; 1422 goto next_ndp;
1032 1423
1424 /* update stats */
1425 ctx->rx_overhead += skb_in->len - payload;
1426 ctx->rx_ntbs++;
1427
1033 return 1; 1428 return 1;
1034error: 1429error:
1035 return 0; 1430 return 0;
@@ -1049,14 +1444,14 @@ cdc_ncm_speed_change(struct usbnet *dev,
1049 */ 1444 */
1050 if ((tx_speed > 1000000) && (rx_speed > 1000000)) { 1445 if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
1051 netif_info(dev, link, dev->net, 1446 netif_info(dev, link, dev->net,
1052 "%u mbit/s downlink %u mbit/s uplink\n", 1447 "%u mbit/s downlink %u mbit/s uplink\n",
1053 (unsigned int)(rx_speed / 1000000U), 1448 (unsigned int)(rx_speed / 1000000U),
1054 (unsigned int)(tx_speed / 1000000U)); 1449 (unsigned int)(tx_speed / 1000000U));
1055 } else { 1450 } else {
1056 netif_info(dev, link, dev->net, 1451 netif_info(dev, link, dev->net,
1057 "%u kbit/s downlink %u kbit/s uplink\n", 1452 "%u kbit/s downlink %u kbit/s uplink\n",
1058 (unsigned int)(rx_speed / 1000U), 1453 (unsigned int)(rx_speed / 1000U),
1059 (unsigned int)(tx_speed / 1000U)); 1454 (unsigned int)(tx_speed / 1000U));
1060 } 1455 }
1061} 1456}
1062 1457
@@ -1086,11 +1481,10 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1086 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be 1481 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
1087 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE. 1482 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
1088 */ 1483 */
1089 ctx->connected = le16_to_cpu(event->wValue);
1090 netif_info(dev, link, dev->net, 1484 netif_info(dev, link, dev->net,
1091 "network connection: %sconnected\n", 1485 "network connection: %sconnected\n",
1092 ctx->connected ? "" : "dis"); 1486 !!event->wValue ? "" : "dis");
1093 usbnet_link_change(dev, ctx->connected, 0); 1487 usbnet_link_change(dev, !!event->wValue, 0);
1094 break; 1488 break;
1095 1489
1096 case USB_CDC_NOTIFY_SPEED_CHANGE: 1490 case USB_CDC_NOTIFY_SPEED_CHANGE:
@@ -1110,23 +1504,11 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1110 } 1504 }
1111} 1505}
1112 1506
1113static int cdc_ncm_check_connect(struct usbnet *dev)
1114{
1115 struct cdc_ncm_ctx *ctx;
1116
1117 ctx = (struct cdc_ncm_ctx *)dev->data[0];
1118 if (ctx == NULL)
1119 return 1; /* disconnected */
1120
1121 return !ctx->connected;
1122}
1123
1124static const struct driver_info cdc_ncm_info = { 1507static const struct driver_info cdc_ncm_info = {
1125 .description = "CDC NCM", 1508 .description = "CDC NCM",
1126 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET, 1509 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
1127 .bind = cdc_ncm_bind, 1510 .bind = cdc_ncm_bind,
1128 .unbind = cdc_ncm_unbind, 1511 .unbind = cdc_ncm_unbind,
1129 .check_connect = cdc_ncm_check_connect,
1130 .manage_power = usbnet_manage_power, 1512 .manage_power = usbnet_manage_power,
1131 .status = cdc_ncm_status, 1513 .status = cdc_ncm_status,
1132 .rx_fixup = cdc_ncm_rx_fixup, 1514 .rx_fixup = cdc_ncm_rx_fixup,
@@ -1140,7 +1522,6 @@ static const struct driver_info wwan_info = {
1140 | FLAG_WWAN, 1522 | FLAG_WWAN,
1141 .bind = cdc_ncm_bind, 1523 .bind = cdc_ncm_bind,
1142 .unbind = cdc_ncm_unbind, 1524 .unbind = cdc_ncm_unbind,
1143 .check_connect = cdc_ncm_check_connect,
1144 .manage_power = usbnet_manage_power, 1525 .manage_power = usbnet_manage_power,
1145 .status = cdc_ncm_status, 1526 .status = cdc_ncm_status,
1146 .rx_fixup = cdc_ncm_rx_fixup, 1527 .rx_fixup = cdc_ncm_rx_fixup,
@@ -1154,7 +1535,6 @@ static const struct driver_info wwan_noarp_info = {
1154 | FLAG_WWAN | FLAG_NOARP, 1535 | FLAG_WWAN | FLAG_NOARP,
1155 .bind = cdc_ncm_bind, 1536 .bind = cdc_ncm_bind,
1156 .unbind = cdc_ncm_unbind, 1537 .unbind = cdc_ncm_unbind,
1157 .check_connect = cdc_ncm_check_connect,
1158 .manage_power = usbnet_manage_power, 1538 .manage_power = usbnet_manage_power,
1159 .status = cdc_ncm_status, 1539 .status = cdc_ncm_status,
1160 .rx_fixup = cdc_ncm_rx_fixup, 1540 .rx_fixup = cdc_ncm_rx_fixup,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 660bd5ea9fc0..a3a05869309d 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2425,7 +2425,7 @@ static void hso_net_init(struct net_device *net)
2425 net->type = ARPHRD_NONE; 2425 net->type = ARPHRD_NONE;
2426 net->mtu = DEFAULT_MTU - 14; 2426 net->mtu = DEFAULT_MTU - 14;
2427 net->tx_queue_len = 10; 2427 net->tx_queue_len = 10;
2428 SET_ETHTOOL_OPS(net, &ops); 2428 net->ethtool_ops = &ops;
2429 2429
2430 /* and initialize the semaphore */ 2430 /* and initialize the semaphore */
2431 spin_lock_init(&hso_net->net_lock); 2431 spin_lock_init(&hso_net->net_lock);
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 312178d7b698..f9822bc75425 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -172,24 +172,11 @@ err:
172 return ret; 172 return ret;
173} 173}
174 174
175static int huawei_cdc_ncm_check_connect(struct usbnet *usbnet_dev)
176{
177 struct cdc_ncm_ctx *ctx;
178
179 ctx = (struct cdc_ncm_ctx *)usbnet_dev->data[0];
180
181 if (ctx == NULL)
182 return 1; /* disconnected */
183
184 return !ctx->connected;
185}
186
187static const struct driver_info huawei_cdc_ncm_info = { 175static const struct driver_info huawei_cdc_ncm_info = {
188 .description = "Huawei CDC NCM device", 176 .description = "Huawei CDC NCM device",
189 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, 177 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
190 .bind = huawei_cdc_ncm_bind, 178 .bind = huawei_cdc_ncm_bind,
191 .unbind = huawei_cdc_ncm_unbind, 179 .unbind = huawei_cdc_ncm_unbind,
192 .check_connect = huawei_cdc_ncm_check_connect,
193 .manage_power = huawei_cdc_ncm_manage_power, 180 .manage_power = huawei_cdc_ncm_manage_power,
194 .rx_fixup = cdc_ncm_rx_fixup, 181 .rx_fixup = cdc_ncm_rx_fixup,
195 .tx_fixup = cdc_ncm_tx_fixup, 182 .tx_fixup = cdc_ncm_tx_fixup,
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 973275fef250..76465b117b72 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -534,7 +534,7 @@ static int ipheth_probe(struct usb_interface *intf,
534 usb_set_intfdata(intf, dev); 534 usb_set_intfdata(intf, dev);
535 535
536 SET_NETDEV_DEV(netdev, &intf->dev); 536 SET_NETDEV_DEV(netdev, &intf->dev);
537 SET_ETHTOOL_OPS(netdev, &ops); 537 netdev->ethtool_ops = &ops;
538 538
539 retval = register_netdev(netdev); 539 retval = register_netdev(netdev);
540 if (retval) { 540 if (retval) {
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index a359d3bb7c5b..dcb6d33141e0 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1171,7 +1171,7 @@ err_fw:
1171 netdev->netdev_ops = &kaweth_netdev_ops; 1171 netdev->netdev_ops = &kaweth_netdev_ops;
1172 netdev->watchdog_timeo = KAWETH_TX_TIMEOUT; 1172 netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
1173 netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size); 1173 netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size);
1174 SET_ETHTOOL_OPS(netdev, &ops); 1174 netdev->ethtool_ops = &ops;
1175 1175
1176 /* kaweth is zeroed as part of alloc_netdev */ 1176 /* kaweth is zeroed as part of alloc_netdev */
1177 INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl); 1177 INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 03e8a15d7deb..f84080215915 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1159,7 +1159,7 @@ static int pegasus_probe(struct usb_interface *intf,
1159 1159
1160 net->watchdog_timeo = PEGASUS_TX_TIMEOUT; 1160 net->watchdog_timeo = PEGASUS_TX_TIMEOUT;
1161 net->netdev_ops = &pegasus_netdev_ops; 1161 net->netdev_ops = &pegasus_netdev_ops;
1162 SET_ETHTOOL_OPS(net, &ops); 1162 net->ethtool_ops = &ops;
1163 pegasus->mii.dev = net; 1163 pegasus->mii.dev = net;
1164 pegasus->mii.mdio_read = mdio_read; 1164 pegasus->mii.mdio_read = mdio_read;
1165 pegasus->mii.mdio_write = mdio_write; 1165 pegasus->mii.mdio_write = mdio_write;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index dc4bf06948c7..cf62d7e8329f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -763,7 +763,12 @@ static const struct usb_device_id products[] = {
763 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 763 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
764 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 764 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
765 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 765 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
766 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ 766 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
767 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
768 {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
769 {QMI_FIXED_INTF(0x0b3c, 0xc004, 6)}, /* Olivetti Olicard 155 */
770 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
771 {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
767 {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ 772 {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
768 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 773 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
769 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ 774 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 3fbfb0869030..25431965a625 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -630,12 +630,10 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
630 int ret; 630 int ret;
631 void *tmp; 631 void *tmp;
632 632
633 tmp = kmalloc(size, GFP_KERNEL); 633 tmp = kmemdup(data, size, GFP_KERNEL);
634 if (!tmp) 634 if (!tmp)
635 return -ENOMEM; 635 return -ENOMEM;
636 636
637 memcpy(tmp, data, size);
638
639 ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0), 637 ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
640 RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, 638 RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
641 value, index, tmp, size, 500); 639 value, index, tmp, size, 500);
@@ -3452,7 +3450,7 @@ static int rtl8152_probe(struct usb_interface *intf,
3452 NETIF_F_TSO | NETIF_F_FRAGLIST | 3450 NETIF_F_TSO | NETIF_F_FRAGLIST |
3453 NETIF_F_IPV6_CSUM | NETIF_F_TSO6; 3451 NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
3454 3452
3455 SET_ETHTOOL_OPS(netdev, &ops); 3453 netdev->ethtool_ops = &ops;
3456 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); 3454 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
3457 3455
3458 tp->mii.dev = netdev; 3456 tp->mii.dev = netdev;
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index da2c4583bd2d..6e87e5710048 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -878,7 +878,7 @@ static int rtl8150_probe(struct usb_interface *intf,
878 dev->netdev = netdev; 878 dev->netdev = netdev;
879 netdev->netdev_ops = &rtl8150_netdev_ops; 879 netdev->netdev_ops = &rtl8150_netdev_ops;
880 netdev->watchdog_timeo = RTL8150_TX_TIMEOUT; 880 netdev->watchdog_timeo = RTL8150_TX_TIMEOUT;
881 SET_ETHTOOL_OPS(netdev, &ops); 881 netdev->ethtool_ops = &ops;
882 dev->intr_interval = 100; /* 100ms */ 882 dev->intr_interval = 100; /* 100ms */
883 883
884 if (!alloc_all_urbs(dev)) { 884 if (!alloc_all_urbs(dev)) {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8a852b5f215f..7d9f84a91f37 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1646,7 +1646,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1646 dev->netdev_ops = &virtnet_netdev; 1646 dev->netdev_ops = &virtnet_netdev;
1647 dev->features = NETIF_F_HIGHDMA; 1647 dev->features = NETIF_F_HIGHDMA;
1648 1648
1649 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); 1649 dev->ethtool_ops = &virtnet_ethtool_ops;
1650 SET_NETDEV_DEV(dev, &vdev->dev); 1650 SET_NETDEV_DEV(dev, &vdev->dev);
1651 1651
1652 /* Do we support "hardware" checksums? */ 1652 /* Do we support "hardware" checksums? */
@@ -1724,6 +1724,13 @@ static int virtnet_probe(struct virtio_device *vdev)
1724 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 1724 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1725 vi->has_cvq = true; 1725 vi->has_cvq = true;
1726 1726
1727 if (vi->any_header_sg) {
1728 if (vi->mergeable_rx_bufs)
1729 dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1730 else
1731 dev->needed_headroom = sizeof(struct virtio_net_hdr);
1732 }
1733
1727 /* Use single tx/rx queue pair as default */ 1734 /* Use single tx/rx queue pair as default */
1728 vi->curr_queue_pairs = 1; 1735 vi->curr_queue_pairs = 1;
1729 vi->max_queue_pairs = max_queue_pairs; 1736 vi->max_queue_pairs = max_queue_pairs;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 600ab56c0008..40c1c7b0d9e0 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -431,8 +431,8 @@ vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
431 ethtool_cmd_speed_set(ecmd, adapter->link_speed); 431 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
432 ecmd->duplex = DUPLEX_FULL; 432 ecmd->duplex = DUPLEX_FULL;
433 } else { 433 } else {
434 ethtool_cmd_speed_set(ecmd, -1); 434 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
435 ecmd->duplex = -1; 435 ecmd->duplex = DUPLEX_UNKNOWN;
436 } 436 }
437 return 0; 437 return 0;
438} 438}
@@ -579,7 +579,7 @@ vmxnet3_get_rss_indir_size(struct net_device *netdev)
579} 579}
580 580
581static int 581static int
582vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p) 582vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key)
583{ 583{
584 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 584 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
585 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 585 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
@@ -592,7 +592,7 @@ vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p)
592} 592}
593 593
594static int 594static int
595vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p) 595vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key)
596{ 596{
597 unsigned int i; 597 unsigned int i;
598 unsigned long flags; 598 unsigned long flags;
@@ -628,12 +628,12 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
628 .get_rxnfc = vmxnet3_get_rxnfc, 628 .get_rxnfc = vmxnet3_get_rxnfc,
629#ifdef VMXNET3_RSS 629#ifdef VMXNET3_RSS
630 .get_rxfh_indir_size = vmxnet3_get_rss_indir_size, 630 .get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
631 .get_rxfh_indir = vmxnet3_get_rss_indir, 631 .get_rxfh = vmxnet3_get_rss,
632 .set_rxfh_indir = vmxnet3_set_rss_indir, 632 .set_rxfh = vmxnet3_set_rss,
633#endif 633#endif
634}; 634};
635 635
636void vmxnet3_set_ethtool_ops(struct net_device *netdev) 636void vmxnet3_set_ethtool_ops(struct net_device *netdev)
637{ 637{
638 SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops); 638 netdev->ethtool_ops = &vmxnet3_ethtool_ops;
639} 639}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 4dbb2ed85b97..1610d51dbb5c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -127,6 +127,7 @@ struct vxlan_dev {
127 struct list_head next; /* vxlan's per namespace list */ 127 struct list_head next; /* vxlan's per namespace list */
128 struct vxlan_sock *vn_sock; /* listening socket */ 128 struct vxlan_sock *vn_sock; /* listening socket */
129 struct net_device *dev; 129 struct net_device *dev;
130 struct net *net; /* netns for packet i/o */
130 struct vxlan_rdst default_dst; /* default destination */ 131 struct vxlan_rdst default_dst; /* default destination */
131 union vxlan_addr saddr; /* source address */ 132 union vxlan_addr saddr; /* source address */
132 __be16 dst_port; 133 __be16 dst_port;
@@ -134,7 +135,7 @@ struct vxlan_dev {
134 __u16 port_max; 135 __u16 port_max;
135 __u8 tos; /* TOS override */ 136 __u8 tos; /* TOS override */
136 __u8 ttl; 137 __u8 ttl;
137 u32 flags; /* VXLAN_F_* below */ 138 u32 flags; /* VXLAN_F_* in vxlan.h */
138 139
139 struct work_struct sock_work; 140 struct work_struct sock_work;
140 struct work_struct igmp_join; 141 struct work_struct igmp_join;
@@ -149,13 +150,6 @@ struct vxlan_dev {
149 struct hlist_head fdb_head[FDB_HASH_SIZE]; 150 struct hlist_head fdb_head[FDB_HASH_SIZE];
150}; 151};
151 152
152#define VXLAN_F_LEARN 0x01
153#define VXLAN_F_PROXY 0x02
154#define VXLAN_F_RSC 0x04
155#define VXLAN_F_L2MISS 0x08
156#define VXLAN_F_L3MISS 0x10
157#define VXLAN_F_IPV6 0x20 /* internal flag */
158
159/* salt for hash table */ 153/* salt for hash table */
160static u32 vxlan_salt __read_mostly; 154static u32 vxlan_salt __read_mostly;
161static struct workqueue_struct *vxlan_wq; 155static struct workqueue_struct *vxlan_wq;
@@ -571,6 +565,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
571 goto out; 565 goto out;
572 } 566 }
573 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ 567 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
568 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
574 569
575 off_eth = skb_gro_offset(skb); 570 off_eth = skb_gro_offset(skb);
576 hlen = off_eth + sizeof(*eh); 571 hlen = off_eth + sizeof(*eh);
@@ -605,6 +600,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
605 } 600 }
606 601
607 skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */ 602 skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
603 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
608 pp = ptype->callbacks.gro_receive(head, skb); 604 pp = ptype->callbacks.gro_receive(head, skb);
609 605
610out_unlock: 606out_unlock:
@@ -1203,6 +1199,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
1203 1199
1204 remote_ip = &vxlan->default_dst.remote_ip; 1200 remote_ip = &vxlan->default_dst.remote_ip;
1205 skb_reset_mac_header(skb); 1201 skb_reset_mac_header(skb);
1202 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
1206 skb->protocol = eth_type_trans(skb, vxlan->dev); 1203 skb->protocol = eth_type_trans(skb, vxlan->dev);
1207 1204
1208 /* Ignore packet loops (and multicast echo) */ 1205 /* Ignore packet loops (and multicast echo) */
@@ -1599,18 +1596,11 @@ __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
1599} 1596}
1600EXPORT_SYMBOL_GPL(vxlan_src_port); 1597EXPORT_SYMBOL_GPL(vxlan_src_port);
1601 1598
1602static int handle_offloads(struct sk_buff *skb) 1599static inline struct sk_buff *vxlan_handle_offloads(struct sk_buff *skb,
1600 bool udp_csum)
1603{ 1601{
1604 if (skb_is_gso(skb)) { 1602 int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1605 int err = skb_unclone(skb, GFP_ATOMIC); 1603 return iptunnel_handle_offloads(skb, udp_csum, type);
1606 if (unlikely(err))
1607 return err;
1608
1609 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1610 } else if (skb->ip_summed != CHECKSUM_PARTIAL)
1611 skb->ip_summed = CHECKSUM_NONE;
1612
1613 return 0;
1614} 1604}
1615 1605
1616#if IS_ENABLED(CONFIG_IPV6) 1606#if IS_ENABLED(CONFIG_IPV6)
@@ -1618,7 +1608,8 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1618 struct dst_entry *dst, struct sk_buff *skb, 1608 struct dst_entry *dst, struct sk_buff *skb,
1619 struct net_device *dev, struct in6_addr *saddr, 1609 struct net_device *dev, struct in6_addr *saddr,
1620 struct in6_addr *daddr, __u8 prio, __u8 ttl, 1610 struct in6_addr *daddr, __u8 prio, __u8 ttl,
1621 __be16 src_port, __be16 dst_port, __be32 vni) 1611 __be16 src_port, __be16 dst_port, __be32 vni,
1612 bool xnet)
1622{ 1613{
1623 struct ipv6hdr *ip6h; 1614 struct ipv6hdr *ip6h;
1624 struct vxlanhdr *vxh; 1615 struct vxlanhdr *vxh;
@@ -1626,12 +1617,11 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1626 int min_headroom; 1617 int min_headroom;
1627 int err; 1618 int err;
1628 1619
1629 if (!skb->encapsulation) { 1620 skb = vxlan_handle_offloads(skb, !udp_get_no_check6_tx(vs->sock->sk));
1630 skb_reset_inner_headers(skb); 1621 if (IS_ERR(skb))
1631 skb->encapsulation = 1; 1622 return -EINVAL;
1632 }
1633 1623
1634 skb_scrub_packet(skb, false); 1624 skb_scrub_packet(skb, xnet);
1635 1625
1636 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len 1626 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1637 + VXLAN_HLEN + sizeof(struct ipv6hdr) 1627 + VXLAN_HLEN + sizeof(struct ipv6hdr)
@@ -1663,27 +1653,14 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1663 uh->source = src_port; 1653 uh->source = src_port;
1664 1654
1665 uh->len = htons(skb->len); 1655 uh->len = htons(skb->len);
1666 uh->check = 0;
1667 1656
1668 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1657 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1669 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 1658 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1670 IPSKB_REROUTED); 1659 IPSKB_REROUTED);
1671 skb_dst_set(skb, dst); 1660 skb_dst_set(skb, dst);
1672 1661
1673 if (!skb_is_gso(skb) && !(dst->dev->features & NETIF_F_IPV6_CSUM)) { 1662 udp6_set_csum(udp_get_no_check6_tx(vs->sock->sk), skb,
1674 __wsum csum = skb_checksum(skb, 0, skb->len, 0); 1663 saddr, daddr, skb->len);
1675 skb->ip_summed = CHECKSUM_UNNECESSARY;
1676 uh->check = csum_ipv6_magic(saddr, daddr, skb->len,
1677 IPPROTO_UDP, csum);
1678 if (uh->check == 0)
1679 uh->check = CSUM_MANGLED_0;
1680 } else {
1681 skb->ip_summed = CHECKSUM_PARTIAL;
1682 skb->csum_start = skb_transport_header(skb) - skb->head;
1683 skb->csum_offset = offsetof(struct udphdr, check);
1684 uh->check = ~csum_ipv6_magic(saddr, daddr,
1685 skb->len, IPPROTO_UDP, 0);
1686 }
1687 1664
1688 __skb_push(skb, sizeof(*ip6h)); 1665 __skb_push(skb, sizeof(*ip6h));
1689 skb_reset_network_header(skb); 1666 skb_reset_network_header(skb);
@@ -1699,10 +1676,6 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1699 ip6h->daddr = *daddr; 1676 ip6h->daddr = *daddr;
1700 ip6h->saddr = *saddr; 1677 ip6h->saddr = *saddr;
1701 1678
1702 err = handle_offloads(skb);
1703 if (err)
1704 return err;
1705
1706 ip6tunnel_xmit(skb, dev); 1679 ip6tunnel_xmit(skb, dev);
1707 return 0; 1680 return 0;
1708} 1681}
@@ -1711,17 +1684,16 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1711int vxlan_xmit_skb(struct vxlan_sock *vs, 1684int vxlan_xmit_skb(struct vxlan_sock *vs,
1712 struct rtable *rt, struct sk_buff *skb, 1685 struct rtable *rt, struct sk_buff *skb,
1713 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, 1686 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
1714 __be16 src_port, __be16 dst_port, __be32 vni) 1687 __be16 src_port, __be16 dst_port, __be32 vni, bool xnet)
1715{ 1688{
1716 struct vxlanhdr *vxh; 1689 struct vxlanhdr *vxh;
1717 struct udphdr *uh; 1690 struct udphdr *uh;
1718 int min_headroom; 1691 int min_headroom;
1719 int err; 1692 int err;
1720 1693
1721 if (!skb->encapsulation) { 1694 skb = vxlan_handle_offloads(skb, !vs->sock->sk->sk_no_check_tx);
1722 skb_reset_inner_headers(skb); 1695 if (IS_ERR(skb))
1723 skb->encapsulation = 1; 1696 return -EINVAL;
1724 }
1725 1697
1726 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 1698 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
1727 + VXLAN_HLEN + sizeof(struct iphdr) 1699 + VXLAN_HLEN + sizeof(struct iphdr)
@@ -1753,14 +1725,12 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
1753 uh->source = src_port; 1725 uh->source = src_port;
1754 1726
1755 uh->len = htons(skb->len); 1727 uh->len = htons(skb->len);
1756 uh->check = 0;
1757 1728
1758 err = handle_offloads(skb); 1729 udp_set_csum(vs->sock->sk->sk_no_check_tx, skb,
1759 if (err) 1730 src, dst, skb->len);
1760 return err;
1761 1731
1762 return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP, 1732 return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
1763 tos, ttl, df, false); 1733 tos, ttl, df, xnet);
1764} 1734}
1765EXPORT_SYMBOL_GPL(vxlan_xmit_skb); 1735EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1766 1736
@@ -1853,7 +1823,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1853 fl4.daddr = dst->sin.sin_addr.s_addr; 1823 fl4.daddr = dst->sin.sin_addr.s_addr;
1854 fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr; 1824 fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
1855 1825
1856 rt = ip_route_output_key(dev_net(dev), &fl4); 1826 rt = ip_route_output_key(vxlan->net, &fl4);
1857 if (IS_ERR(rt)) { 1827 if (IS_ERR(rt)) {
1858 netdev_dbg(dev, "no route to %pI4\n", 1828 netdev_dbg(dev, "no route to %pI4\n",
1859 &dst->sin.sin_addr.s_addr); 1829 &dst->sin.sin_addr.s_addr);
@@ -1874,7 +1844,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1874 struct vxlan_dev *dst_vxlan; 1844 struct vxlan_dev *dst_vxlan;
1875 1845
1876 ip_rt_put(rt); 1846 ip_rt_put(rt);
1877 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); 1847 dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
1878 if (!dst_vxlan) 1848 if (!dst_vxlan)
1879 goto tx_error; 1849 goto tx_error;
1880 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1850 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1887,7 +1857,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1887 err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb, 1857 err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
1888 fl4.saddr, dst->sin.sin_addr.s_addr, 1858 fl4.saddr, dst->sin.sin_addr.s_addr,
1889 tos, ttl, df, src_port, dst_port, 1859 tos, ttl, df, src_port, dst_port,
1890 htonl(vni << 8)); 1860 htonl(vni << 8),
1861 !net_eq(vxlan->net, dev_net(vxlan->dev)));
1891 1862
1892 if (err < 0) 1863 if (err < 0)
1893 goto rt_tx_error; 1864 goto rt_tx_error;
@@ -1927,7 +1898,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1927 struct vxlan_dev *dst_vxlan; 1898 struct vxlan_dev *dst_vxlan;
1928 1899
1929 dst_release(ndst); 1900 dst_release(ndst);
1930 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); 1901 dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
1931 if (!dst_vxlan) 1902 if (!dst_vxlan)
1932 goto tx_error; 1903 goto tx_error;
1933 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1904 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1938,7 +1909,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1938 1909
1939 err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb, 1910 err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
1940 dev, &fl6.saddr, &fl6.daddr, 0, ttl, 1911 dev, &fl6.saddr, &fl6.daddr, 0, ttl,
1941 src_port, dst_port, htonl(vni << 8)); 1912 src_port, dst_port, htonl(vni << 8),
1913 !net_eq(vxlan->net, dev_net(vxlan->dev)));
1942#endif 1914#endif
1943 } 1915 }
1944 1916
@@ -2082,7 +2054,7 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2082static int vxlan_init(struct net_device *dev) 2054static int vxlan_init(struct net_device *dev)
2083{ 2055{
2084 struct vxlan_dev *vxlan = netdev_priv(dev); 2056 struct vxlan_dev *vxlan = netdev_priv(dev);
2085 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 2057 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2086 struct vxlan_sock *vs; 2058 struct vxlan_sock *vs;
2087 2059
2088 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2060 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -2090,7 +2062,7 @@ static int vxlan_init(struct net_device *dev)
2090 return -ENOMEM; 2062 return -ENOMEM;
2091 2063
2092 spin_lock(&vn->sock_lock); 2064 spin_lock(&vn->sock_lock);
2093 vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port); 2065 vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
2094 if (vs) { 2066 if (vs) {
2095 /* If we have a socket with same port already, reuse it */ 2067 /* If we have a socket with same port already, reuse it */
2096 atomic_inc(&vs->refcnt); 2068 atomic_inc(&vs->refcnt);
@@ -2172,8 +2144,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
2172/* Cleanup timer and forwarding table on shutdown */ 2144/* Cleanup timer and forwarding table on shutdown */
2173static int vxlan_stop(struct net_device *dev) 2145static int vxlan_stop(struct net_device *dev)
2174{ 2146{
2175 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
2176 struct vxlan_dev *vxlan = netdev_priv(dev); 2147 struct vxlan_dev *vxlan = netdev_priv(dev);
2148 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2177 struct vxlan_sock *vs = vxlan->vn_sock; 2149 struct vxlan_sock *vs = vxlan->vn_sock;
2178 2150
2179 if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && 2151 if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
@@ -2202,7 +2174,7 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2202 struct net_device *lowerdev; 2174 struct net_device *lowerdev;
2203 int max_mtu; 2175 int max_mtu;
2204 2176
2205 lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex); 2177 lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
2206 if (lowerdev == NULL) 2178 if (lowerdev == NULL)
2207 return eth_change_mtu(dev, new_mtu); 2179 return eth_change_mtu(dev, new_mtu);
2208 2180
@@ -2285,7 +2257,6 @@ static void vxlan_setup(struct net_device *dev)
2285 2257
2286 dev->tx_queue_len = 0; 2258 dev->tx_queue_len = 0;
2287 dev->features |= NETIF_F_LLTX; 2259 dev->features |= NETIF_F_LLTX;
2288 dev->features |= NETIF_F_NETNS_LOCAL;
2289 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2260 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2290 dev->features |= NETIF_F_RXCSUM; 2261 dev->features |= NETIF_F_RXCSUM;
2291 dev->features |= NETIF_F_GSO_SOFTWARE; 2262 dev->features |= NETIF_F_GSO_SOFTWARE;
@@ -2401,7 +2372,7 @@ static void vxlan_del_work(struct work_struct *work)
2401 * could be used for both IPv4 and IPv6 communications, but 2372 * could be used for both IPv4 and IPv6 communications, but
2402 * users may set bindv6only=1. 2373 * users may set bindv6only=1.
2403 */ 2374 */
2404static struct socket *create_v6_sock(struct net *net, __be16 port) 2375static struct socket *create_v6_sock(struct net *net, __be16 port, u32 flags)
2405{ 2376{
2406 struct sock *sk; 2377 struct sock *sk;
2407 struct socket *sock; 2378 struct socket *sock;
@@ -2438,18 +2409,25 @@ static struct socket *create_v6_sock(struct net *net, __be16 port)
2438 2409
2439 /* Disable multicast loopback */ 2410 /* Disable multicast loopback */
2440 inet_sk(sk)->mc_loop = 0; 2411 inet_sk(sk)->mc_loop = 0;
2412
2413 if (flags & VXLAN_F_UDP_ZERO_CSUM6_TX)
2414 udp_set_no_check6_tx(sk, true);
2415
2416 if (flags & VXLAN_F_UDP_ZERO_CSUM6_RX)
2417 udp_set_no_check6_rx(sk, true);
2418
2441 return sock; 2419 return sock;
2442} 2420}
2443 2421
2444#else 2422#else
2445 2423
2446static struct socket *create_v6_sock(struct net *net, __be16 port) 2424static struct socket *create_v6_sock(struct net *net, __be16 port, u32 flags)
2447{ 2425{
2448 return ERR_PTR(-EPFNOSUPPORT); 2426 return ERR_PTR(-EPFNOSUPPORT);
2449} 2427}
2450#endif 2428#endif
2451 2429
2452static struct socket *create_v4_sock(struct net *net, __be16 port) 2430static struct socket *create_v4_sock(struct net *net, __be16 port, u32 flags)
2453{ 2431{
2454 struct sock *sk; 2432 struct sock *sk;
2455 struct socket *sock; 2433 struct socket *sock;
@@ -2482,18 +2460,24 @@ static struct socket *create_v4_sock(struct net *net, __be16 port)
2482 2460
2483 /* Disable multicast loopback */ 2461 /* Disable multicast loopback */
2484 inet_sk(sk)->mc_loop = 0; 2462 inet_sk(sk)->mc_loop = 0;
2463
2464 if (!(flags & VXLAN_F_UDP_CSUM))
2465 sock->sk->sk_no_check_tx = 1;
2466
2485 return sock; 2467 return sock;
2486} 2468}
2487 2469
2488/* Create new listen socket if needed */ 2470/* Create new listen socket if needed */
2489static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, 2471static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2490 vxlan_rcv_t *rcv, void *data, bool ipv6) 2472 vxlan_rcv_t *rcv, void *data,
2473 u32 flags)
2491{ 2474{
2492 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2475 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2493 struct vxlan_sock *vs; 2476 struct vxlan_sock *vs;
2494 struct socket *sock; 2477 struct socket *sock;
2495 struct sock *sk; 2478 struct sock *sk;
2496 unsigned int h; 2479 unsigned int h;
2480 bool ipv6 = !!(flags & VXLAN_F_IPV6);
2497 2481
2498 vs = kzalloc(sizeof(*vs), GFP_KERNEL); 2482 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2499 if (!vs) 2483 if (!vs)
@@ -2505,9 +2489,9 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2505 INIT_WORK(&vs->del_work, vxlan_del_work); 2489 INIT_WORK(&vs->del_work, vxlan_del_work);
2506 2490
2507 if (ipv6) 2491 if (ipv6)
2508 sock = create_v6_sock(net, port); 2492 sock = create_v6_sock(net, port, flags);
2509 else 2493 else
2510 sock = create_v4_sock(net, port); 2494 sock = create_v4_sock(net, port, flags);
2511 if (IS_ERR(sock)) { 2495 if (IS_ERR(sock)) {
2512 kfree(vs); 2496 kfree(vs);
2513 return ERR_CAST(sock); 2497 return ERR_CAST(sock);
@@ -2545,12 +2529,12 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2545 2529
2546struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, 2530struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2547 vxlan_rcv_t *rcv, void *data, 2531 vxlan_rcv_t *rcv, void *data,
2548 bool no_share, bool ipv6) 2532 bool no_share, u32 flags)
2549{ 2533{
2550 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2534 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2551 struct vxlan_sock *vs; 2535 struct vxlan_sock *vs;
2552 2536
2553 vs = vxlan_socket_create(net, port, rcv, data, ipv6); 2537 vs = vxlan_socket_create(net, port, rcv, data, flags);
2554 if (!IS_ERR(vs)) 2538 if (!IS_ERR(vs))
2555 return vs; 2539 return vs;
2556 2540
@@ -2578,12 +2562,12 @@ EXPORT_SYMBOL_GPL(vxlan_sock_add);
2578static void vxlan_sock_work(struct work_struct *work) 2562static void vxlan_sock_work(struct work_struct *work)
2579{ 2563{
2580 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work); 2564 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
2581 struct net *net = dev_net(vxlan->dev); 2565 struct net *net = vxlan->net;
2582 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2566 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2583 __be16 port = vxlan->dst_port; 2567 __be16 port = vxlan->dst_port;
2584 struct vxlan_sock *nvs; 2568 struct vxlan_sock *nvs;
2585 2569
2586 nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags & VXLAN_F_IPV6); 2570 nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags);
2587 spin_lock(&vn->sock_lock); 2571 spin_lock(&vn->sock_lock);
2588 if (!IS_ERR(nvs)) 2572 if (!IS_ERR(nvs))
2589 vxlan_vs_add_dev(nvs, vxlan); 2573 vxlan_vs_add_dev(nvs, vxlan);
@@ -2605,6 +2589,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2605 if (!data[IFLA_VXLAN_ID]) 2589 if (!data[IFLA_VXLAN_ID])
2606 return -EINVAL; 2590 return -EINVAL;
2607 2591
2592 vxlan->net = dev_net(dev);
2593
2608 vni = nla_get_u32(data[IFLA_VXLAN_ID]); 2594 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
2609 dst->remote_vni = vni; 2595 dst->remote_vni = vni;
2610 2596
@@ -2705,12 +2691,23 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2705 if (data[IFLA_VXLAN_PORT]) 2691 if (data[IFLA_VXLAN_PORT])
2706 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); 2692 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
2707 2693
2694 if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
2695 vxlan->flags |= VXLAN_F_UDP_CSUM;
2696
2697 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
2698 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
2699 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
2700
2701 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
2702 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2703 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2704
2708 if (vxlan_find_vni(net, vni, vxlan->dst_port)) { 2705 if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
2709 pr_info("duplicate VNI %u\n", vni); 2706 pr_info("duplicate VNI %u\n", vni);
2710 return -EEXIST; 2707 return -EEXIST;
2711 } 2708 }
2712 2709
2713 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops); 2710 dev->ethtool_ops = &vxlan_ethtool_ops;
2714 2711
2715 /* create an fdb entry for a valid default destination */ 2712 /* create an fdb entry for a valid default destination */
2716 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { 2713 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
@@ -2739,8 +2736,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2739 2736
2740static void vxlan_dellink(struct net_device *dev, struct list_head *head) 2737static void vxlan_dellink(struct net_device *dev, struct list_head *head)
2741{ 2738{
2742 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
2743 struct vxlan_dev *vxlan = netdev_priv(dev); 2739 struct vxlan_dev *vxlan = netdev_priv(dev);
2740 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2744 2741
2745 spin_lock(&vn->sock_lock); 2742 spin_lock(&vn->sock_lock);
2746 if (!hlist_unhashed(&vxlan->hlist)) 2743 if (!hlist_unhashed(&vxlan->hlist))
@@ -2768,7 +2765,10 @@ static size_t vxlan_get_size(const struct net_device *dev)
2768 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ 2765 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
2769 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ 2766 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
2770 nla_total_size(sizeof(struct ifla_vxlan_port_range)) + 2767 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
2771 nla_total_size(sizeof(__be16))+ /* IFLA_VXLAN_PORT */ 2768 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
2769 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
2770 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
2771 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
2772 0; 2772 0;
2773} 2773}
2774 2774
@@ -2828,7 +2828,13 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
2828 !!(vxlan->flags & VXLAN_F_L3MISS)) || 2828 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
2829 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) || 2829 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
2830 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) || 2830 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
2831 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port)) 2831 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
2832 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
2833 !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
2834 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
2835 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
2836 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
2837 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)))
2832 goto nla_put_failure; 2838 goto nla_put_failure;
2833 2839
2834 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports)) 2840 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
@@ -2905,8 +2911,33 @@ static __net_init int vxlan_init_net(struct net *net)
2905 return 0; 2911 return 0;
2906} 2912}
2907 2913
2914static void __net_exit vxlan_exit_net(struct net *net)
2915{
2916 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2917 struct vxlan_dev *vxlan, *next;
2918 struct net_device *dev, *aux;
2919 LIST_HEAD(list);
2920
2921 rtnl_lock();
2922 for_each_netdev_safe(net, dev, aux)
2923 if (dev->rtnl_link_ops == &vxlan_link_ops)
2924 unregister_netdevice_queue(dev, &list);
2925
2926 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
2927 /* If vxlan->dev is in the same netns, it has already been added
2928 * to the list by the previous loop.
2929 */
2930 if (!net_eq(dev_net(vxlan->dev), net))
2931 unregister_netdevice_queue(dev, &list);
2932 }
2933
2934 unregister_netdevice_many(&list);
2935 rtnl_unlock();
2936}
2937
2908static struct pernet_operations vxlan_net_ops = { 2938static struct pernet_operations vxlan_net_ops = {
2909 .init = vxlan_init_net, 2939 .init = vxlan_init_net,
2940 .exit = vxlan_exit_net,
2910 .id = &vxlan_net_id, 2941 .id = &vxlan_net_id,
2911 .size = sizeof(struct vxlan_net), 2942 .size = sizeof(struct vxlan_net),
2912}; 2943};
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index bcfff0d62de4..93ace042d0aa 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -26,6 +26,7 @@
26#include <linux/ioport.h> 26#include <linux/ioport.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/delay.h>
29#include <linux/if.h> 30#include <linux/if.h>
30#include <linux/hdlc.h> 31#include <linux/hdlc.h>
31#include <asm/io.h> 32#include <asm/io.h>
@@ -678,7 +679,6 @@ static inline void
678fst_cpureset(struct fst_card_info *card) 679fst_cpureset(struct fst_card_info *card)
679{ 680{
680 unsigned char interrupt_line_register; 681 unsigned char interrupt_line_register;
681 unsigned long j = jiffies + 1;
682 unsigned int regval; 682 unsigned int regval;
683 683
684 if (card->family == FST_FAMILY_TXU) { 684 if (card->family == FST_FAMILY_TXU) {
@@ -696,16 +696,12 @@ fst_cpureset(struct fst_card_info *card)
696 /* 696 /*
697 * We are delaying here to allow the 9054 to reset itself 697 * We are delaying here to allow the 9054 to reset itself
698 */ 698 */
699 j = jiffies + 1; 699 usleep_range(10, 20);
700 while (jiffies < j)
701 /* Do nothing */ ;
702 outw(0x240f, card->pci_conf + CNTRL_9054 + 2); 700 outw(0x240f, card->pci_conf + CNTRL_9054 + 2);
703 /* 701 /*
704 * We are delaying here to allow the 9054 to reload its eeprom 702 * We are delaying here to allow the 9054 to reload its eeprom
705 */ 703 */
706 j = jiffies + 1; 704 usleep_range(10, 20);
707 while (jiffies < j)
708 /* Do nothing */ ;
709 outw(0x040f, card->pci_conf + CNTRL_9054 + 2); 705 outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
710 706
711 if (pci_write_config_byte 707 if (pci_write_config_byte
@@ -886,20 +882,18 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
886 * Receive a frame through the DMA 882 * Receive a frame through the DMA
887 */ 883 */
888static inline void 884static inline void
889fst_rx_dma(struct fst_card_info *card, dma_addr_t skb, 885fst_rx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
890 dma_addr_t mem, int len)
891{ 886{
892 /* 887 /*
893 * This routine will setup the DMA and start it 888 * This routine will setup the DMA and start it
894 */ 889 */
895 890
896 dbg(DBG_RX, "In fst_rx_dma %lx %lx %d\n", 891 dbg(DBG_RX, "In fst_rx_dma %x %x %d\n", (u32)dma, mem, len);
897 (unsigned long) skb, (unsigned long) mem, len);
898 if (card->dmarx_in_progress) { 892 if (card->dmarx_in_progress) {
899 dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n"); 893 dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
900 } 894 }
901 895
902 outl(skb, card->pci_conf + DMAPADR0); /* Copy to here */ 896 outl(dma, card->pci_conf + DMAPADR0); /* Copy to here */
903 outl(mem, card->pci_conf + DMALADR0); /* from here */ 897 outl(mem, card->pci_conf + DMALADR0); /* from here */
904 outl(len, card->pci_conf + DMASIZ0); /* for this length */ 898 outl(len, card->pci_conf + DMASIZ0); /* for this length */
905 outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */ 899 outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
@@ -915,20 +909,19 @@ fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
915 * Send a frame through the DMA 909 * Send a frame through the DMA
916 */ 910 */
917static inline void 911static inline void
918fst_tx_dma(struct fst_card_info *card, unsigned char *skb, 912fst_tx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
919 unsigned char *mem, int len)
920{ 913{
921 /* 914 /*
922 * This routine will setup the DMA and start it. 915 * This routine will setup the DMA and start it.
923 */ 916 */
924 917
925 dbg(DBG_TX, "In fst_tx_dma %p %p %d\n", skb, mem, len); 918 dbg(DBG_TX, "In fst_tx_dma %x %x %d\n", (u32)dma, mem, len);
926 if (card->dmatx_in_progress) { 919 if (card->dmatx_in_progress) {
927 dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n"); 920 dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n");
928 } 921 }
929 922
930 outl((unsigned long) skb, card->pci_conf + DMAPADR1); /* Copy from here */ 923 outl(dma, card->pci_conf + DMAPADR1); /* Copy from here */
931 outl((unsigned long) mem, card->pci_conf + DMALADR1); /* to here */ 924 outl(mem, card->pci_conf + DMALADR1); /* to here */
932 outl(len, card->pci_conf + DMASIZ1); /* for this length */ 925 outl(len, card->pci_conf + DMASIZ1); /* for this length */
933 outl(0x000000004, card->pci_conf + DMADPR1); /* In this direction */ 926 outl(0x000000004, card->pci_conf + DMADPR1); /* In this direction */
934 927
@@ -1405,9 +1398,7 @@ do_bottom_half_tx(struct fst_card_info *card)
1405 card->dma_len_tx = skb->len; 1398 card->dma_len_tx = skb->len;
1406 card->dma_txpos = port->txpos; 1399 card->dma_txpos = port->txpos;
1407 fst_tx_dma(card, 1400 fst_tx_dma(card,
1408 (char *) card-> 1401 card->tx_dma_handle_card,
1409 tx_dma_handle_card,
1410 (char *)
1411 BUF_OFFSET(txBuffer[pi] 1402 BUF_OFFSET(txBuffer[pi]
1412 [port->txpos][0]), 1403 [port->txpos][0]),
1413 skb->len); 1404 skb->len);
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index de3bbf43fc5a..cdd45fb8a1f6 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1322,10 +1322,6 @@ NOTE: This is rather a useless action right now, as the
1322 1322
1323static int sdla_change_mtu(struct net_device *dev, int new_mtu) 1323static int sdla_change_mtu(struct net_device *dev, int new_mtu)
1324{ 1324{
1325 struct frad_local *flp;
1326
1327 flp = netdev_priv(dev);
1328
1329 if (netif_running(dev)) 1325 if (netif_running(dev))
1330 return -EBUSY; 1326 return -EBUSY;
1331 1327
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 4a01e5c7fe09..4c417903e9be 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -1061,7 +1061,7 @@ int i2400m_firmware_check(struct i2400m *i2400m)
1061 goto error_bad_major; 1061 goto error_bad_major;
1062 } 1062 }
1063 result = 0; 1063 result = 0;
1064 if (minor < I2400M_HDIv_MINOR_2 && minor > I2400M_HDIv_MINOR) 1064 if (minor > I2400M_HDIv_MINOR_2 || minor < I2400M_HDIv_MINOR)
1065 dev_warn(dev, "untested minor fw version %u.%u.%u\n", 1065 dev_warn(dev, "untested minor fw version %u.%u.%u\n",
1066 major, minor, branch); 1066 major, minor, branch);
1067 /* Yes, we ignore the branch -- we don't have to track it */ 1067 /* Yes, we ignore the branch -- we don't have to track it */
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 9c34d2fccfac..9c78090e72f8 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -500,26 +500,23 @@ int i2400m_pm_notifier(struct notifier_block *notifier,
500 */ 500 */
501int i2400m_pre_reset(struct i2400m *i2400m) 501int i2400m_pre_reset(struct i2400m *i2400m)
502{ 502{
503 int result;
504 struct device *dev = i2400m_dev(i2400m); 503 struct device *dev = i2400m_dev(i2400m);
505 504
506 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 505 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
507 d_printf(1, dev, "pre-reset shut down\n"); 506 d_printf(1, dev, "pre-reset shut down\n");
508 507
509 result = 0;
510 mutex_lock(&i2400m->init_mutex); 508 mutex_lock(&i2400m->init_mutex);
511 if (i2400m->updown) { 509 if (i2400m->updown) {
512 netif_tx_disable(i2400m->wimax_dev.net_dev); 510 netif_tx_disable(i2400m->wimax_dev.net_dev);
513 __i2400m_dev_stop(i2400m); 511 __i2400m_dev_stop(i2400m);
514 result = 0;
515 /* down't set updown to zero -- this way 512 /* down't set updown to zero -- this way
516 * post_reset can restore properly */ 513 * post_reset can restore properly */
517 } 514 }
518 mutex_unlock(&i2400m->init_mutex); 515 mutex_unlock(&i2400m->init_mutex);
519 if (i2400m->bus_release) 516 if (i2400m->bus_release)
520 i2400m->bus_release(i2400m); 517 i2400m->bus_release(i2400m);
521 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); 518 d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m);
522 return result; 519 return 0;
523} 520}
524EXPORT_SYMBOL_GPL(i2400m_pre_reset); 521EXPORT_SYMBOL_GPL(i2400m_pre_reset);
525 522
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 99b3bfa717d5..d48776e4f343 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -365,15 +365,15 @@ static inline unsigned long at76_get_timeout(struct dfu_status *s)
365static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size, 365static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
366 int manifest_sync_timeout) 366 int manifest_sync_timeout)
367{ 367{
368 u8 *block;
369 struct dfu_status dfu_stat_buf;
370 int ret = 0; 368 int ret = 0;
371 int need_dfu_state = 1; 369 int need_dfu_state = 1;
372 int is_done = 0; 370 int is_done = 0;
373 u8 dfu_state = 0;
374 u32 dfu_timeout = 0; 371 u32 dfu_timeout = 0;
375 int bsize = 0; 372 int bsize = 0;
376 int blockno = 0; 373 int blockno = 0;
374 struct dfu_status *dfu_stat_buf = NULL;
375 u8 *dfu_state = NULL;
376 u8 *block = NULL;
377 377
378 at76_dbg(DBG_DFU, "%s( %p, %u, %d)", __func__, buf, size, 378 at76_dbg(DBG_DFU, "%s( %p, %u, %d)", __func__, buf, size,
379 manifest_sync_timeout); 379 manifest_sync_timeout);
@@ -383,13 +383,28 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
383 return -EINVAL; 383 return -EINVAL;
384 } 384 }
385 385
386 dfu_stat_buf = kmalloc(sizeof(struct dfu_status), GFP_KERNEL);
387 if (!dfu_stat_buf) {
388 ret = -ENOMEM;
389 goto exit;
390 }
391
386 block = kmalloc(FW_BLOCK_SIZE, GFP_KERNEL); 392 block = kmalloc(FW_BLOCK_SIZE, GFP_KERNEL);
387 if (!block) 393 if (!block) {
388 return -ENOMEM; 394 ret = -ENOMEM;
395 goto exit;
396 }
397
398 dfu_state = kmalloc(sizeof(u8), GFP_KERNEL);
399 if (!dfu_state) {
400 ret = -ENOMEM;
401 goto exit;
402 }
403 *dfu_state = 0;
389 404
390 do { 405 do {
391 if (need_dfu_state) { 406 if (need_dfu_state) {
392 ret = at76_dfu_get_state(udev, &dfu_state); 407 ret = at76_dfu_get_state(udev, dfu_state);
393 if (ret < 0) { 408 if (ret < 0) {
394 dev_err(&udev->dev, 409 dev_err(&udev->dev,
395 "cannot get DFU state: %d\n", ret); 410 "cannot get DFU state: %d\n", ret);
@@ -398,13 +413,13 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
398 need_dfu_state = 0; 413 need_dfu_state = 0;
399 } 414 }
400 415
401 switch (dfu_state) { 416 switch (*dfu_state) {
402 case STATE_DFU_DOWNLOAD_SYNC: 417 case STATE_DFU_DOWNLOAD_SYNC:
403 at76_dbg(DBG_DFU, "STATE_DFU_DOWNLOAD_SYNC"); 418 at76_dbg(DBG_DFU, "STATE_DFU_DOWNLOAD_SYNC");
404 ret = at76_dfu_get_status(udev, &dfu_stat_buf); 419 ret = at76_dfu_get_status(udev, dfu_stat_buf);
405 if (ret >= 0) { 420 if (ret >= 0) {
406 dfu_state = dfu_stat_buf.state; 421 *dfu_state = dfu_stat_buf->state;
407 dfu_timeout = at76_get_timeout(&dfu_stat_buf); 422 dfu_timeout = at76_get_timeout(dfu_stat_buf);
408 need_dfu_state = 0; 423 need_dfu_state = 0;
409 } else 424 } else
410 dev_err(&udev->dev, 425 dev_err(&udev->dev,
@@ -447,12 +462,12 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
447 case STATE_DFU_MANIFEST_SYNC: 462 case STATE_DFU_MANIFEST_SYNC:
448 at76_dbg(DBG_DFU, "STATE_DFU_MANIFEST_SYNC"); 463 at76_dbg(DBG_DFU, "STATE_DFU_MANIFEST_SYNC");
449 464
450 ret = at76_dfu_get_status(udev, &dfu_stat_buf); 465 ret = at76_dfu_get_status(udev, dfu_stat_buf);
451 if (ret < 0) 466 if (ret < 0)
452 break; 467 break;
453 468
454 dfu_state = dfu_stat_buf.state; 469 *dfu_state = dfu_stat_buf->state;
455 dfu_timeout = at76_get_timeout(&dfu_stat_buf); 470 dfu_timeout = at76_get_timeout(dfu_stat_buf);
456 need_dfu_state = 0; 471 need_dfu_state = 0;
457 472
458 /* override the timeout from the status response, 473 /* override the timeout from the status response,
@@ -484,14 +499,17 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
484 break; 499 break;
485 500
486 default: 501 default:
487 at76_dbg(DBG_DFU, "DFU UNKNOWN STATE (%d)", dfu_state); 502 at76_dbg(DBG_DFU, "DFU UNKNOWN STATE (%d)", *dfu_state);
488 ret = -EINVAL; 503 ret = -EINVAL;
489 break; 504 break;
490 } 505 }
491 } while (!is_done && (ret >= 0)); 506 } while (!is_done && (ret >= 0));
492 507
493exit: 508exit:
509 kfree(dfu_state);
494 kfree(block); 510 kfree(block);
511 kfree(dfu_stat_buf);
512
495 if (ret >= 0) 513 if (ret >= 0)
496 ret = 0; 514 ret = 0;
497 515
@@ -1277,6 +1295,7 @@ static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe)
1277 dev_err(&udev->dev, 1295 dev_err(&udev->dev,
1278 "loading %dth firmware block failed: %d\n", 1296 "loading %dth firmware block failed: %d\n",
1279 blockno, ret); 1297 blockno, ret);
1298 ret = -EIO;
1280 goto exit; 1299 goto exit;
1281 } 1300 }
1282 buf += bsize; 1301 buf += bsize;
@@ -1410,6 +1429,8 @@ static int at76_startup_device(struct at76_priv *priv)
1410 /* remove BSSID from previous run */ 1429 /* remove BSSID from previous run */
1411 memset(priv->bssid, 0, ETH_ALEN); 1430 memset(priv->bssid, 0, ETH_ALEN);
1412 1431
1432 priv->scanning = false;
1433
1413 if (at76_set_radio(priv, 1) == 1) 1434 if (at76_set_radio(priv, 1) == 1)
1414 at76_wait_completion(priv, CMD_RADIO_ON); 1435 at76_wait_completion(priv, CMD_RADIO_ON);
1415 1436
@@ -1483,6 +1504,52 @@ static void at76_work_submit_rx(struct work_struct *work)
1483 mutex_unlock(&priv->mtx); 1504 mutex_unlock(&priv->mtx);
1484} 1505}
1485 1506
1507/* This is a workaround to make scan working:
1508 * currently mac80211 does not process frames with no frequency
1509 * information.
1510 * However during scan the HW performs a sweep by itself, and we
1511 * are unable to know where the radio is actually tuned.
1512 * This function tries to do its best to guess this information..
1513 * During scan, If the current frame is a beacon or a probe response,
1514 * the channel information is extracted from it.
1515 * When not scanning, for other frames, or if it happens that for
1516 * whatever reason we fail to parse beacons and probe responses, this
1517 * function returns the priv->channel information, that should be correct
1518 * at least when we are not scanning.
1519 */
1520static inline int at76_guess_freq(struct at76_priv *priv)
1521{
1522 size_t el_off;
1523 const u8 *el;
1524 int channel = priv->channel;
1525 int len = priv->rx_skb->len;
1526 struct ieee80211_hdr *hdr = (void *)priv->rx_skb->data;
1527
1528 if (!priv->scanning)
1529 goto exit;
1530
1531 if (len < 24)
1532 goto exit;
1533
1534 if (ieee80211_is_probe_resp(hdr->frame_control)) {
1535 el_off = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
1536 el = ((struct ieee80211_mgmt *)hdr)->u.probe_resp.variable;
1537 } else if (ieee80211_is_beacon(hdr->frame_control)) {
1538 el_off = offsetof(struct ieee80211_mgmt, u.beacon.variable);
1539 el = ((struct ieee80211_mgmt *)hdr)->u.beacon.variable;
1540 } else {
1541 goto exit;
1542 }
1543 len -= el_off;
1544
1545 el = cfg80211_find_ie(WLAN_EID_DS_PARAMS, el, len);
1546 if (el && el[1] > 0)
1547 channel = el[2];
1548
1549exit:
1550 return ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
1551}
1552
1486static void at76_rx_tasklet(unsigned long param) 1553static void at76_rx_tasklet(unsigned long param)
1487{ 1554{
1488 struct urb *urb = (struct urb *)param; 1555 struct urb *urb = (struct urb *)param;
@@ -1523,6 +1590,8 @@ static void at76_rx_tasklet(unsigned long param)
1523 rx_status.signal = buf->rssi; 1590 rx_status.signal = buf->rssi;
1524 rx_status.flag |= RX_FLAG_DECRYPTED; 1591 rx_status.flag |= RX_FLAG_DECRYPTED;
1525 rx_status.flag |= RX_FLAG_IV_STRIPPED; 1592 rx_status.flag |= RX_FLAG_IV_STRIPPED;
1593 rx_status.band = IEEE80211_BAND_2GHZ;
1594 rx_status.freq = at76_guess_freq(priv);
1526 1595
1527 at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d", 1596 at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d",
1528 priv->rx_skb->len, priv->rx_skb->data_len); 1597 priv->rx_skb->len, priv->rx_skb->data_len);
@@ -1875,6 +1944,8 @@ static void at76_dwork_hw_scan(struct work_struct *work)
1875 if (is_valid_ether_addr(priv->bssid)) 1944 if (is_valid_ether_addr(priv->bssid))
1876 at76_join(priv); 1945 at76_join(priv);
1877 1946
1947 priv->scanning = false;
1948
1878 mutex_unlock(&priv->mtx); 1949 mutex_unlock(&priv->mtx);
1879 1950
1880 ieee80211_scan_completed(priv->hw, false); 1951 ieee80211_scan_completed(priv->hw, false);
@@ -1929,6 +2000,7 @@ static int at76_hw_scan(struct ieee80211_hw *hw,
1929 goto exit; 2000 goto exit;
1930 } 2001 }
1931 2002
2003 priv->scanning = true;
1932 ieee80211_queue_delayed_work(priv->hw, &priv->dwork_hw_scan, 2004 ieee80211_queue_delayed_work(priv->hw, &priv->dwork_hw_scan,
1933 SCAN_POLL_INTERVAL); 2005 SCAN_POLL_INTERVAL);
1934 2006
@@ -2020,6 +2092,44 @@ static void at76_configure_filter(struct ieee80211_hw *hw,
2020 ieee80211_queue_work(hw, &priv->work_set_promisc); 2092 ieee80211_queue_work(hw, &priv->work_set_promisc);
2021} 2093}
2022 2094
2095static int at76_set_wep(struct at76_priv *priv)
2096{
2097 int ret = 0;
2098 struct mib_mac_wep *mib_data = &priv->mib_buf.data.wep_mib;
2099
2100 priv->mib_buf.type = MIB_MAC_WEP;
2101 priv->mib_buf.size = sizeof(struct mib_mac_wep);
2102 priv->mib_buf.index = 0;
2103
2104 memset(mib_data, 0, sizeof(*mib_data));
2105
2106 if (priv->wep_enabled) {
2107 if (priv->wep_keys_len[priv->wep_key_id] > WEP_SMALL_KEY_LEN)
2108 mib_data->encryption_level = 2;
2109 else
2110 mib_data->encryption_level = 1;
2111
2112 /* always exclude unencrypted if WEP is active */
2113 mib_data->exclude_unencrypted = 1;
2114 } else {
2115 mib_data->exclude_unencrypted = 0;
2116 mib_data->encryption_level = 0;
2117 }
2118
2119 mib_data->privacy_invoked = priv->wep_enabled;
2120 mib_data->wep_default_key_id = priv->wep_key_id;
2121 memcpy(mib_data->wep_default_keyvalue, priv->wep_keys,
2122 sizeof(priv->wep_keys));
2123
2124 ret = at76_set_mib(priv, &priv->mib_buf);
2125
2126 if (ret < 0)
2127 wiphy_err(priv->hw->wiphy,
2128 "set_mib (wep) failed: %d\n", ret);
2129
2130 return ret;
2131}
2132
2023static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 2133static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2024 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 2134 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2025 struct ieee80211_key_conf *key) 2135 struct ieee80211_key_conf *key)
@@ -2062,7 +2172,7 @@ static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2062 priv->wep_enabled = 1; 2172 priv->wep_enabled = 1;
2063 } 2173 }
2064 2174
2065 at76_startup_device(priv); 2175 at76_set_wep(priv);
2066 2176
2067 mutex_unlock(&priv->mtx); 2177 mutex_unlock(&priv->mtx);
2068 2178
@@ -2330,16 +2440,22 @@ static int at76_probe(struct usb_interface *interface,
2330 struct usb_device *udev; 2440 struct usb_device *udev;
2331 int op_mode; 2441 int op_mode;
2332 int need_ext_fw = 0; 2442 int need_ext_fw = 0;
2333 struct mib_fw_version fwv; 2443 struct mib_fw_version *fwv = NULL;
2334 int board_type = (int)id->driver_info; 2444 int board_type = (int)id->driver_info;
2335 2445
2336 udev = usb_get_dev(interface_to_usbdev(interface)); 2446 udev = usb_get_dev(interface_to_usbdev(interface));
2337 2447
2448 fwv = kmalloc(sizeof(*fwv), GFP_KERNEL);
2449 if (!fwv) {
2450 ret = -ENOMEM;
2451 goto exit;
2452 }
2453
2338 /* Load firmware into kernel memory */ 2454 /* Load firmware into kernel memory */
2339 fwe = at76_load_firmware(udev, board_type); 2455 fwe = at76_load_firmware(udev, board_type);
2340 if (!fwe) { 2456 if (!fwe) {
2341 ret = -ENOENT; 2457 ret = -ENOENT;
2342 goto error; 2458 goto exit;
2343 } 2459 }
2344 2460
2345 op_mode = at76_get_op_mode(udev); 2461 op_mode = at76_get_op_mode(udev);
@@ -2353,7 +2469,7 @@ static int at76_probe(struct usb_interface *interface,
2353 dev_err(&interface->dev, 2469 dev_err(&interface->dev,
2354 "cannot handle a device in HW_CONFIG_MODE\n"); 2470 "cannot handle a device in HW_CONFIG_MODE\n");
2355 ret = -EBUSY; 2471 ret = -EBUSY;
2356 goto error; 2472 goto exit;
2357 } 2473 }
2358 2474
2359 if (op_mode != OPMODE_NORMAL_NIC_WITH_FLASH 2475 if (op_mode != OPMODE_NORMAL_NIC_WITH_FLASH
@@ -2366,10 +2482,10 @@ static int at76_probe(struct usb_interface *interface,
2366 dev_err(&interface->dev, 2482 dev_err(&interface->dev,
2367 "error %d downloading internal firmware\n", 2483 "error %d downloading internal firmware\n",
2368 ret); 2484 ret);
2369 goto error; 2485 goto exit;
2370 } 2486 }
2371 usb_put_dev(udev); 2487 usb_put_dev(udev);
2372 return ret; 2488 goto exit;
2373 } 2489 }
2374 2490
2375 /* Internal firmware already inside the device. Get firmware 2491 /* Internal firmware already inside the device. Get firmware
@@ -2382,8 +2498,8 @@ static int at76_probe(struct usb_interface *interface,
2382 * query the device for the fw version */ 2498 * query the device for the fw version */
2383 if ((fwe->fw_version.major > 0 || fwe->fw_version.minor >= 100) 2499 if ((fwe->fw_version.major > 0 || fwe->fw_version.minor >= 100)
2384 || (op_mode == OPMODE_NORMAL_NIC_WITH_FLASH)) { 2500 || (op_mode == OPMODE_NORMAL_NIC_WITH_FLASH)) {
2385 ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv)); 2501 ret = at76_get_mib(udev, MIB_FW_VERSION, fwv, sizeof(*fwv));
2386 if (ret < 0 || (fwv.major | fwv.minor) == 0) 2502 if (ret < 0 || (fwv->major | fwv->minor) == 0)
2387 need_ext_fw = 1; 2503 need_ext_fw = 1;
2388 } else 2504 } else
2389 /* No way to check firmware version, reload to be sure */ 2505 /* No way to check firmware version, reload to be sure */
@@ -2394,37 +2510,37 @@ static int at76_probe(struct usb_interface *interface,
2394 "downloading external firmware\n"); 2510 "downloading external firmware\n");
2395 2511
2396 ret = at76_load_external_fw(udev, fwe); 2512 ret = at76_load_external_fw(udev, fwe);
2397 if (ret) 2513 if (ret < 0)
2398 goto error; 2514 goto exit;
2399 2515
2400 /* Re-check firmware version */ 2516 /* Re-check firmware version */
2401 ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv)); 2517 ret = at76_get_mib(udev, MIB_FW_VERSION, fwv, sizeof(*fwv));
2402 if (ret < 0) { 2518 if (ret < 0) {
2403 dev_err(&interface->dev, 2519 dev_err(&interface->dev,
2404 "error %d getting firmware version\n", ret); 2520 "error %d getting firmware version\n", ret);
2405 goto error; 2521 goto exit;
2406 } 2522 }
2407 } 2523 }
2408 2524
2409 priv = at76_alloc_new_device(udev); 2525 priv = at76_alloc_new_device(udev);
2410 if (!priv) { 2526 if (!priv) {
2411 ret = -ENOMEM; 2527 ret = -ENOMEM;
2412 goto error; 2528 goto exit;
2413 } 2529 }
2414 2530
2415 usb_set_intfdata(interface, priv); 2531 usb_set_intfdata(interface, priv);
2416 2532
2417 memcpy(&priv->fw_version, &fwv, sizeof(struct mib_fw_version)); 2533 memcpy(&priv->fw_version, fwv, sizeof(struct mib_fw_version));
2418 priv->board_type = board_type; 2534 priv->board_type = board_type;
2419 2535
2420 ret = at76_init_new_device(priv, interface); 2536 ret = at76_init_new_device(priv, interface);
2421 if (ret < 0) 2537 if (ret < 0)
2422 at76_delete_device(priv); 2538 at76_delete_device(priv);
2423 2539
2424 return ret; 2540exit:
2425 2541 kfree(fwv);
2426error: 2542 if (ret < 0)
2427 usb_put_dev(udev); 2543 usb_put_dev(udev);
2428 return ret; 2544 return ret;
2429} 2545}
2430 2546
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h
index f14a65473fe8..55090a38ac95 100644
--- a/drivers/net/wireless/at76c50x-usb.h
+++ b/drivers/net/wireless/at76c50x-usb.h
@@ -219,18 +219,6 @@ struct at76_req_join {
219 u8 reserved; 219 u8 reserved;
220} __packed; 220} __packed;
221 221
222struct set_mib_buffer {
223 u8 type;
224 u8 size;
225 u8 index;
226 u8 reserved;
227 union {
228 u8 byte;
229 __le16 word;
230 u8 addr[ETH_ALEN];
231 } data;
232} __packed;
233
234struct mib_local { 222struct mib_local {
235 u16 reserved0; 223 u16 reserved0;
236 u8 beacon_enable; 224 u8 beacon_enable;
@@ -334,6 +322,19 @@ struct mib_mdomain {
334 u8 channel_list[14]; /* 0 for invalid channels */ 322 u8 channel_list[14]; /* 0 for invalid channels */
335} __packed; 323} __packed;
336 324
325struct set_mib_buffer {
326 u8 type;
327 u8 size;
328 u8 index;
329 u8 reserved;
330 union {
331 u8 byte;
332 __le16 word;
333 u8 addr[ETH_ALEN];
334 struct mib_mac_wep wep_mib;
335 } data;
336} __packed;
337
337struct at76_fw_header { 338struct at76_fw_header {
338 __le32 crc; /* CRC32 of the whole image */ 339 __le32 crc; /* CRC32 of the whole image */
339 __le32 board_type; /* firmware compatibility code */ 340 __le32 board_type; /* firmware compatibility code */
@@ -417,6 +418,7 @@ struct at76_priv {
417 int scan_max_time; /* scan max channel time */ 418 int scan_max_time; /* scan max channel time */
418 int scan_mode; /* SCAN_TYPE_ACTIVE, SCAN_TYPE_PASSIVE */ 419 int scan_mode; /* SCAN_TYPE_ACTIVE, SCAN_TYPE_PASSIVE */
419 int scan_need_any; /* if set, need to scan for any ESSID */ 420 int scan_need_any; /* if set, need to scan for any ESSID */
421 bool scanning; /* if set, the scan is running */
420 422
421 u16 assoc_id; /* current association ID, if associated */ 423 u16 assoc_id; /* current association ID, if associated */
422 424
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 507d9a9ee69a..f92050617ae6 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1090,7 +1090,8 @@ static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
1090 return ret; 1090 return ret;
1091} 1091}
1092 1092
1093static void ar5523_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 1093static void ar5523_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1094 u32 queues, bool drop)
1094{ 1095{
1095 struct ar5523 *ar = hw->priv; 1096 struct ar5523 *ar = hw->priv;
1096 1097
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index a1f099628850..17d221abd58c 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -175,7 +175,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
175 return 0; 175 return 0;
176} 176}
177 177
178int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param) 178int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
179{ 179{
180 struct bmi_cmd cmd; 180 struct bmi_cmd cmd;
181 union bmi_resp resp; 181 union bmi_resp resp;
@@ -184,7 +184,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
184 int ret; 184 int ret;
185 185
186 ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n", 186 ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
187 address, *param); 187 address, param);
188 188
189 if (ar->bmi.done_sent) { 189 if (ar->bmi.done_sent) {
190 ath10k_warn("command disallowed\n"); 190 ath10k_warn("command disallowed\n");
@@ -193,7 +193,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
193 193
194 cmd.id = __cpu_to_le32(BMI_EXECUTE); 194 cmd.id = __cpu_to_le32(BMI_EXECUTE);
195 cmd.execute.addr = __cpu_to_le32(address); 195 cmd.execute.addr = __cpu_to_le32(address);
196 cmd.execute.param = __cpu_to_le32(*param); 196 cmd.execute.param = __cpu_to_le32(param);
197 197
198 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); 198 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
199 if (ret) { 199 if (ret) {
@@ -204,10 +204,13 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
204 if (resplen < sizeof(resp.execute)) { 204 if (resplen < sizeof(resp.execute)) {
205 ath10k_warn("invalid execute response length (%d)\n", 205 ath10k_warn("invalid execute response length (%d)\n",
206 resplen); 206 resplen);
207 return ret; 207 return -EIO;
208 } 208 }
209 209
210 *param = __le32_to_cpu(resp.execute.result); 210 *result = __le32_to_cpu(resp.execute.result);
211
212 ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
213
211 return 0; 214 return 0;
212} 215}
213 216
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 8d81ce1cec21..111ab701465c 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -201,7 +201,8 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
201 \ 201 \
202 addr = host_interest_item_address(HI_ITEM(item)); \ 202 addr = host_interest_item_address(HI_ITEM(item)); \
203 ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \ 203 ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
204 *val = __le32_to_cpu(tmp); \ 204 if (!ret) \
205 *val = __le32_to_cpu(tmp); \
205 ret; \ 206 ret; \
206 }) 207 })
207 208
@@ -217,7 +218,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
217 ret; \ 218 ret; \
218 }) 219 })
219 220
220int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param); 221int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
221int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address); 222int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
222int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length); 223int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
223int ath10k_bmi_fast_download(struct ath10k *ar, u32 address, 224int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index a79499c82350..d185dc0cd12b 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -329,6 +329,33 @@ exit:
329 return ret; 329 return ret;
330} 330}
331 331
332void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
333{
334 struct ath10k *ar = pipe->ar;
335 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
336 struct ath10k_ce_ring *src_ring = pipe->src_ring;
337 u32 ctrl_addr = pipe->ctrl_addr;
338
339 lockdep_assert_held(&ar_pci->ce_lock);
340
341 /*
342 * This function must be called only if there is an incomplete
343 * scatter-gather transfer (before index register is updated)
344 * that needs to be cleaned up.
345 */
346 if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
347 return;
348
349 if (WARN_ON_ONCE(src_ring->write_index ==
350 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
351 return;
352
353 src_ring->write_index--;
354 src_ring->write_index &= src_ring->nentries_mask;
355
356 src_ring->per_transfer_context[src_ring->write_index] = NULL;
357}
358
332int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, 359int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
333 void *per_transfer_context, 360 void *per_transfer_context,
334 u32 buffer, 361 u32 buffer,
@@ -840,35 +867,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
840 867
841static int ath10k_ce_init_src_ring(struct ath10k *ar, 868static int ath10k_ce_init_src_ring(struct ath10k *ar,
842 unsigned int ce_id, 869 unsigned int ce_id,
843 struct ath10k_ce_pipe *ce_state,
844 const struct ce_attr *attr) 870 const struct ce_attr *attr)
845{ 871{
846 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 872 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
847 struct ath10k_ce_ring *src_ring; 873 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
848 unsigned int nentries = attr->src_nentries; 874 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
849 unsigned int ce_nbytes; 875 u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
850 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
851 dma_addr_t base_addr;
852 char *ptr;
853
854 nentries = roundup_pow_of_two(nentries);
855
856 if (ce_state->src_ring) {
857 WARN_ON(ce_state->src_ring->nentries != nentries);
858 return 0;
859 }
860
861 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
862 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
863 if (ptr == NULL)
864 return -ENOMEM;
865 876
866 ce_state->src_ring = (struct ath10k_ce_ring *)ptr; 877 nentries = roundup_pow_of_two(attr->src_nentries);
867 src_ring = ce_state->src_ring;
868 878
869 ptr += sizeof(struct ath10k_ce_ring); 879 memset(src_ring->per_transfer_context, 0,
870 src_ring->nentries = nentries; 880 nentries * sizeof(*src_ring->per_transfer_context));
871 src_ring->nentries_mask = nentries - 1;
872 881
873 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); 882 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
874 src_ring->sw_index &= src_ring->nentries_mask; 883 src_ring->sw_index &= src_ring->nentries_mask;
@@ -878,21 +887,87 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
878 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); 887 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
879 src_ring->write_index &= src_ring->nentries_mask; 888 src_ring->write_index &= src_ring->nentries_mask;
880 889
881 src_ring->per_transfer_context = (void **)ptr; 890 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
891 src_ring->base_addr_ce_space);
892 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
893 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
894 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
895 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
896 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
897
898 ath10k_dbg(ATH10K_DBG_BOOT,
899 "boot init ce src ring id %d entries %d base_addr %p\n",
900 ce_id, nentries, src_ring->base_addr_owner_space);
901
902 return 0;
903}
904
905static int ath10k_ce_init_dest_ring(struct ath10k *ar,
906 unsigned int ce_id,
907 const struct ce_attr *attr)
908{
909 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
910 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
911 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
912 u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
913
914 nentries = roundup_pow_of_two(attr->dest_nentries);
915
916 memset(dest_ring->per_transfer_context, 0,
917 nentries * sizeof(*dest_ring->per_transfer_context));
918
919 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
920 dest_ring->sw_index &= dest_ring->nentries_mask;
921 dest_ring->write_index =
922 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
923 dest_ring->write_index &= dest_ring->nentries_mask;
924
925 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
926 dest_ring->base_addr_ce_space);
927 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
928 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
929 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
930 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
931
932 ath10k_dbg(ATH10K_DBG_BOOT,
933 "boot ce dest ring id %d entries %d base_addr %p\n",
934 ce_id, nentries, dest_ring->base_addr_owner_space);
935
936 return 0;
937}
938
939static struct ath10k_ce_ring *
940ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
941 const struct ce_attr *attr)
942{
943 struct ath10k_ce_ring *src_ring;
944 u32 nentries = attr->src_nentries;
945 dma_addr_t base_addr;
946
947 nentries = roundup_pow_of_two(nentries);
948
949 src_ring = kzalloc(sizeof(*src_ring) +
950 (nentries *
951 sizeof(*src_ring->per_transfer_context)),
952 GFP_KERNEL);
953 if (src_ring == NULL)
954 return ERR_PTR(-ENOMEM);
955
956 src_ring->nentries = nentries;
957 src_ring->nentries_mask = nentries - 1;
882 958
883 /* 959 /*
884 * Legacy platforms that do not support cache 960 * Legacy platforms that do not support cache
885 * coherent DMA are unsupported 961 * coherent DMA are unsupported
886 */ 962 */
887 src_ring->base_addr_owner_space_unaligned = 963 src_ring->base_addr_owner_space_unaligned =
888 pci_alloc_consistent(ar_pci->pdev, 964 dma_alloc_coherent(ar->dev,
889 (nentries * sizeof(struct ce_desc) + 965 (nentries * sizeof(struct ce_desc) +
890 CE_DESC_RING_ALIGN), 966 CE_DESC_RING_ALIGN),
891 &base_addr); 967 &base_addr, GFP_KERNEL);
892 if (!src_ring->base_addr_owner_space_unaligned) { 968 if (!src_ring->base_addr_owner_space_unaligned) {
893 kfree(ce_state->src_ring); 969 kfree(src_ring);
894 ce_state->src_ring = NULL; 970 return ERR_PTR(-ENOMEM);
895 return -ENOMEM;
896 } 971 }
897 972
898 src_ring->base_addr_ce_space_unaligned = base_addr; 973 src_ring->base_addr_ce_space_unaligned = base_addr;
@@ -912,88 +987,54 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
912 kmalloc((nentries * sizeof(struct ce_desc) + 987 kmalloc((nentries * sizeof(struct ce_desc) +
913 CE_DESC_RING_ALIGN), GFP_KERNEL); 988 CE_DESC_RING_ALIGN), GFP_KERNEL);
914 if (!src_ring->shadow_base_unaligned) { 989 if (!src_ring->shadow_base_unaligned) {
915 pci_free_consistent(ar_pci->pdev, 990 dma_free_coherent(ar->dev,
916 (nentries * sizeof(struct ce_desc) + 991 (nentries * sizeof(struct ce_desc) +
917 CE_DESC_RING_ALIGN), 992 CE_DESC_RING_ALIGN),
918 src_ring->base_addr_owner_space, 993 src_ring->base_addr_owner_space,
919 src_ring->base_addr_ce_space); 994 src_ring->base_addr_ce_space);
920 kfree(ce_state->src_ring); 995 kfree(src_ring);
921 ce_state->src_ring = NULL; 996 return ERR_PTR(-ENOMEM);
922 return -ENOMEM;
923 } 997 }
924 998
925 src_ring->shadow_base = PTR_ALIGN( 999 src_ring->shadow_base = PTR_ALIGN(
926 src_ring->shadow_base_unaligned, 1000 src_ring->shadow_base_unaligned,
927 CE_DESC_RING_ALIGN); 1001 CE_DESC_RING_ALIGN);
928 1002
929 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 1003 return src_ring;
930 src_ring->base_addr_ce_space);
931 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
932 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
933 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
934 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
935 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
936
937 ath10k_dbg(ATH10K_DBG_BOOT,
938 "boot ce src ring id %d entries %d base_addr %p\n",
939 ce_id, nentries, src_ring->base_addr_owner_space);
940
941 return 0;
942} 1004}
943 1005
944static int ath10k_ce_init_dest_ring(struct ath10k *ar, 1006static struct ath10k_ce_ring *
945 unsigned int ce_id, 1007ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
946 struct ath10k_ce_pipe *ce_state, 1008 const struct ce_attr *attr)
947 const struct ce_attr *attr)
948{ 1009{
949 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
950 struct ath10k_ce_ring *dest_ring; 1010 struct ath10k_ce_ring *dest_ring;
951 unsigned int nentries = attr->dest_nentries; 1011 u32 nentries;
952 unsigned int ce_nbytes;
953 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
954 dma_addr_t base_addr; 1012 dma_addr_t base_addr;
955 char *ptr;
956 1013
957 nentries = roundup_pow_of_two(nentries); 1014 nentries = roundup_pow_of_two(attr->dest_nentries);
958 1015
959 if (ce_state->dest_ring) { 1016 dest_ring = kzalloc(sizeof(*dest_ring) +
960 WARN_ON(ce_state->dest_ring->nentries != nentries); 1017 (nentries *
961 return 0; 1018 sizeof(*dest_ring->per_transfer_context)),
962 } 1019 GFP_KERNEL);
963 1020 if (dest_ring == NULL)
964 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); 1021 return ERR_PTR(-ENOMEM);
965 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
966 if (ptr == NULL)
967 return -ENOMEM;
968 1022
969 ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
970 dest_ring = ce_state->dest_ring;
971
972 ptr += sizeof(struct ath10k_ce_ring);
973 dest_ring->nentries = nentries; 1023 dest_ring->nentries = nentries;
974 dest_ring->nentries_mask = nentries - 1; 1024 dest_ring->nentries_mask = nentries - 1;
975 1025
976 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
977 dest_ring->sw_index &= dest_ring->nentries_mask;
978 dest_ring->write_index =
979 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
980 dest_ring->write_index &= dest_ring->nentries_mask;
981
982 dest_ring->per_transfer_context = (void **)ptr;
983
984 /* 1026 /*
985 * Legacy platforms that do not support cache 1027 * Legacy platforms that do not support cache
986 * coherent DMA are unsupported 1028 * coherent DMA are unsupported
987 */ 1029 */
988 dest_ring->base_addr_owner_space_unaligned = 1030 dest_ring->base_addr_owner_space_unaligned =
989 pci_alloc_consistent(ar_pci->pdev, 1031 dma_alloc_coherent(ar->dev,
990 (nentries * sizeof(struct ce_desc) + 1032 (nentries * sizeof(struct ce_desc) +
991 CE_DESC_RING_ALIGN), 1033 CE_DESC_RING_ALIGN),
992 &base_addr); 1034 &base_addr, GFP_KERNEL);
993 if (!dest_ring->base_addr_owner_space_unaligned) { 1035 if (!dest_ring->base_addr_owner_space_unaligned) {
994 kfree(ce_state->dest_ring); 1036 kfree(dest_ring);
995 ce_state->dest_ring = NULL; 1037 return ERR_PTR(-ENOMEM);
996 return -ENOMEM;
997 } 1038 }
998 1039
999 dest_ring->base_addr_ce_space_unaligned = base_addr; 1040 dest_ring->base_addr_ce_space_unaligned = base_addr;
@@ -1012,39 +1053,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1012 dest_ring->base_addr_ce_space_unaligned, 1053 dest_ring->base_addr_ce_space_unaligned,
1013 CE_DESC_RING_ALIGN); 1054 CE_DESC_RING_ALIGN);
1014 1055
1015 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 1056 return dest_ring;
1016 dest_ring->base_addr_ce_space);
1017 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1018 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1019 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1020 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1021
1022 ath10k_dbg(ATH10K_DBG_BOOT,
1023 "boot ce dest ring id %d entries %d base_addr %p\n",
1024 ce_id, nentries, dest_ring->base_addr_owner_space);
1025
1026 return 0;
1027}
1028
1029static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
1030 unsigned int ce_id,
1031 const struct ce_attr *attr)
1032{
1033 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1034 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1035 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1036
1037 spin_lock_bh(&ar_pci->ce_lock);
1038
1039 ce_state->ar = ar;
1040 ce_state->id = ce_id;
1041 ce_state->ctrl_addr = ctrl_addr;
1042 ce_state->attr_flags = attr->flags;
1043 ce_state->src_sz_max = attr->src_sz_max;
1044
1045 spin_unlock_bh(&ar_pci->ce_lock);
1046
1047 return ce_state;
1048} 1057}
1049 1058
1050/* 1059/*
@@ -1054,11 +1063,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
1054 * initialization. It may be that only one side or the other is 1063 * initialization. It may be that only one side or the other is
1055 * initialized by software/firmware. 1064 * initialized by software/firmware.
1056 */ 1065 */
1057struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, 1066int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
1058 unsigned int ce_id, 1067 const struct ce_attr *attr)
1059 const struct ce_attr *attr)
1060{ 1068{
1061 struct ath10k_ce_pipe *ce_state; 1069 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1070 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1062 int ret; 1071 int ret;
1063 1072
1064 /* 1073 /*
@@ -1074,64 +1083,128 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
1074 1083
1075 ret = ath10k_pci_wake(ar); 1084 ret = ath10k_pci_wake(ar);
1076 if (ret) 1085 if (ret)
1077 return NULL; 1086 return ret;
1078 1087
1079 ce_state = ath10k_ce_init_state(ar, ce_id, attr); 1088 spin_lock_bh(&ar_pci->ce_lock);
1080 if (!ce_state) { 1089 ce_state->ar = ar;
1081 ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id); 1090 ce_state->id = ce_id;
1082 goto out; 1091 ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
1083 } 1092 ce_state->attr_flags = attr->flags;
1093 ce_state->src_sz_max = attr->src_sz_max;
1094 spin_unlock_bh(&ar_pci->ce_lock);
1084 1095
1085 if (attr->src_nentries) { 1096 if (attr->src_nentries) {
1086 ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr); 1097 ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
1087 if (ret) { 1098 if (ret) {
1088 ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", 1099 ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
1089 ce_id, ret); 1100 ce_id, ret);
1090 ath10k_ce_deinit(ce_state);
1091 ce_state = NULL;
1092 goto out; 1101 goto out;
1093 } 1102 }
1094 } 1103 }
1095 1104
1096 if (attr->dest_nentries) { 1105 if (attr->dest_nentries) {
1097 ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr); 1106 ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1098 if (ret) { 1107 if (ret) {
1099 ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", 1108 ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
1100 ce_id, ret); 1109 ce_id, ret);
1101 ath10k_ce_deinit(ce_state);
1102 ce_state = NULL;
1103 goto out; 1110 goto out;
1104 } 1111 }
1105 } 1112 }
1106 1113
1107out: 1114out:
1108 ath10k_pci_sleep(ar); 1115 ath10k_pci_sleep(ar);
1109 return ce_state; 1116 return ret;
1110} 1117}
1111 1118
1112void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state) 1119static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1120{
1121 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1122
1123 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
1124 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
1125 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
1126 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
1127}
1128
1129static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1130{
1131 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1132
1133 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
1134 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
1135 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
1136}
1137
1138void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
1139{
1140 int ret;
1141
1142 ret = ath10k_pci_wake(ar);
1143 if (ret)
1144 return;
1145
1146 ath10k_ce_deinit_src_ring(ar, ce_id);
1147 ath10k_ce_deinit_dest_ring(ar, ce_id);
1148
1149 ath10k_pci_sleep(ar);
1150}
1151
1152int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1153 const struct ce_attr *attr)
1154{
1155 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1156 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1157 int ret;
1158
1159 if (attr->src_nentries) {
1160 ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
1161 if (IS_ERR(ce_state->src_ring)) {
1162 ret = PTR_ERR(ce_state->src_ring);
1163 ath10k_err("failed to allocate copy engine source ring %d: %d\n",
1164 ce_id, ret);
1165 ce_state->src_ring = NULL;
1166 return ret;
1167 }
1168 }
1169
1170 if (attr->dest_nentries) {
1171 ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
1172 attr);
1173 if (IS_ERR(ce_state->dest_ring)) {
1174 ret = PTR_ERR(ce_state->dest_ring);
1175 ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
1176 ce_id, ret);
1177 ce_state->dest_ring = NULL;
1178 return ret;
1179 }
1180 }
1181
1182 return 0;
1183}
1184
1185void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1113{ 1186{
1114 struct ath10k *ar = ce_state->ar;
1115 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1187 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1188 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1116 1189
1117 if (ce_state->src_ring) { 1190 if (ce_state->src_ring) {
1118 kfree(ce_state->src_ring->shadow_base_unaligned); 1191 kfree(ce_state->src_ring->shadow_base_unaligned);
1119 pci_free_consistent(ar_pci->pdev, 1192 dma_free_coherent(ar->dev,
1120 (ce_state->src_ring->nentries * 1193 (ce_state->src_ring->nentries *
1121 sizeof(struct ce_desc) + 1194 sizeof(struct ce_desc) +
1122 CE_DESC_RING_ALIGN), 1195 CE_DESC_RING_ALIGN),
1123 ce_state->src_ring->base_addr_owner_space, 1196 ce_state->src_ring->base_addr_owner_space,
1124 ce_state->src_ring->base_addr_ce_space); 1197 ce_state->src_ring->base_addr_ce_space);
1125 kfree(ce_state->src_ring); 1198 kfree(ce_state->src_ring);
1126 } 1199 }
1127 1200
1128 if (ce_state->dest_ring) { 1201 if (ce_state->dest_ring) {
1129 pci_free_consistent(ar_pci->pdev, 1202 dma_free_coherent(ar->dev,
1130 (ce_state->dest_ring->nentries * 1203 (ce_state->dest_ring->nentries *
1131 sizeof(struct ce_desc) + 1204 sizeof(struct ce_desc) +
1132 CE_DESC_RING_ALIGN), 1205 CE_DESC_RING_ALIGN),
1133 ce_state->dest_ring->base_addr_owner_space, 1206 ce_state->dest_ring->base_addr_owner_space,
1134 ce_state->dest_ring->base_addr_ce_space); 1207 ce_state->dest_ring->base_addr_ce_space);
1135 kfree(ce_state->dest_ring); 1208 kfree(ce_state->dest_ring);
1136 } 1209 }
1137 1210
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 8eb7f99ed992..7a5a36fc59c1 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -104,7 +104,8 @@ struct ath10k_ce_ring {
104 void *shadow_base_unaligned; 104 void *shadow_base_unaligned;
105 struct ce_desc *shadow_base; 105 struct ce_desc *shadow_base;
106 106
107 void **per_transfer_context; 107 /* keep last */
108 void *per_transfer_context[0];
108}; 109};
109 110
110struct ath10k_ce_pipe { 111struct ath10k_ce_pipe {
@@ -159,6 +160,8 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
159 unsigned int transfer_id, 160 unsigned int transfer_id,
160 unsigned int flags); 161 unsigned int flags);
161 162
163void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
164
162void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, 165void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
163 void (*send_cb)(struct ath10k_ce_pipe *), 166 void (*send_cb)(struct ath10k_ce_pipe *),
164 int disable_interrupts); 167 int disable_interrupts);
@@ -210,10 +213,12 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
210 213
211/*==================CE Engine Initialization=======================*/ 214/*==================CE Engine Initialization=======================*/
212 215
213/* Initialize an instance of a CE */ 216int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
214struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, 217 const struct ce_attr *attr);
215 unsigned int ce_id, 218void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
216 const struct ce_attr *attr); 219int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
220 const struct ce_attr *attr);
221void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
217 222
218/*==================CE Engine Shutdown=======================*/ 223/*==================CE Engine Shutdown=======================*/
219/* 224/*
@@ -236,8 +241,6 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
236 unsigned int *nbytesp, 241 unsigned int *nbytesp,
237 unsigned int *transfer_idp); 242 unsigned int *transfer_idp);
238 243
239void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
240
241/*==================CE Interrupt Handlers====================*/ 244/*==================CE Interrupt Handlers====================*/
242void ath10k_ce_per_engine_service_any(struct ath10k *ar); 245void ath10k_ce_per_engine_service_any(struct ath10k *ar);
243void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); 246void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index ebc5fc2ede75..82017f56e661 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -58,36 +58,6 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
58 complete(&ar->target_suspend); 58 complete(&ar->target_suspend);
59} 59}
60 60
61static int ath10k_init_connect_htc(struct ath10k *ar)
62{
63 int status;
64
65 status = ath10k_wmi_connect_htc_service(ar);
66 if (status)
67 goto conn_fail;
68
69 /* Start HTC */
70 status = ath10k_htc_start(&ar->htc);
71 if (status)
72 goto conn_fail;
73
74 /* Wait for WMI event to be ready */
75 status = ath10k_wmi_wait_for_service_ready(ar);
76 if (status <= 0) {
77 ath10k_warn("wmi service ready event not received");
78 status = -ETIMEDOUT;
79 goto timeout;
80 }
81
82 ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
83 return 0;
84
85timeout:
86 ath10k_htc_stop(&ar->htc);
87conn_fail:
88 return status;
89}
90
91static int ath10k_init_configure_target(struct ath10k *ar) 61static int ath10k_init_configure_target(struct ath10k *ar)
92{ 62{
93 u32 param_host; 63 u32 param_host;
@@ -249,30 +219,40 @@ exit:
249 219
250static int ath10k_download_and_run_otp(struct ath10k *ar) 220static int ath10k_download_and_run_otp(struct ath10k *ar)
251{ 221{
252 u32 address = ar->hw_params.patch_load_addr; 222 u32 result, address = ar->hw_params.patch_load_addr;
253 u32 exec_param;
254 int ret; 223 int ret;
255 224
256 /* OTP is optional */ 225 /* OTP is optional */
257 226
258 if (!ar->otp_data || !ar->otp_len) 227 if (!ar->otp_data || !ar->otp_len) {
228 ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
229 ar->otp_data, ar->otp_len);
259 return 0; 230 return 0;
231 }
232
233 ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
234 address, ar->otp_len);
260 235
261 ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len); 236 ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
262 if (ret) { 237 if (ret) {
263 ath10k_err("could not write otp (%d)\n", ret); 238 ath10k_err("could not write otp (%d)\n", ret);
264 goto exit; 239 return ret;
265 } 240 }
266 241
267 exec_param = 0; 242 ret = ath10k_bmi_execute(ar, address, 0, &result);
268 ret = ath10k_bmi_execute(ar, address, &exec_param);
269 if (ret) { 243 if (ret) {
270 ath10k_err("could not execute otp (%d)\n", ret); 244 ath10k_err("could not execute otp (%d)\n", ret);
271 goto exit; 245 return ret;
272 } 246 }
273 247
274exit: 248 ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
275 return ret; 249
250 if (result != 0) {
251 ath10k_err("otp calibration failed: %d", result);
252 return -EINVAL;
253 }
254
255 return 0;
276} 256}
277 257
278static int ath10k_download_fw(struct ath10k *ar) 258static int ath10k_download_fw(struct ath10k *ar)
@@ -389,8 +369,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
389 /* first fetch the firmware file (firmware-*.bin) */ 369 /* first fetch the firmware file (firmware-*.bin) */
390 ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); 370 ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
391 if (IS_ERR(ar->firmware)) { 371 if (IS_ERR(ar->firmware)) {
392 ath10k_err("Could not fetch firmware file '%s': %ld\n", 372 ath10k_err("could not fetch firmware file '%s/%s': %ld\n",
393 name, PTR_ERR(ar->firmware)); 373 ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
394 return PTR_ERR(ar->firmware); 374 return PTR_ERR(ar->firmware);
395 } 375 }
396 376
@@ -401,14 +381,14 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
401 magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1; 381 magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
402 382
403 if (len < magic_len) { 383 if (len < magic_len) {
404 ath10k_err("firmware image too small to contain magic: %zu\n", 384 ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n",
405 len); 385 ar->hw_params.fw.dir, name, len);
406 ret = -EINVAL; 386 ret = -EINVAL;
407 goto err; 387 goto err;
408 } 388 }
409 389
410 if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) { 390 if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
411 ath10k_err("Invalid firmware magic\n"); 391 ath10k_err("invalid firmware magic\n");
412 ret = -EINVAL; 392 ret = -EINVAL;
413 goto err; 393 goto err;
414 } 394 }
@@ -430,7 +410,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
430 data += sizeof(*hdr); 410 data += sizeof(*hdr);
431 411
432 if (len < ie_len) { 412 if (len < ie_len) {
433 ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n", 413 ath10k_err("invalid length for FW IE %d (%zu < %zu)\n",
434 ie_id, len, ie_len); 414 ie_id, len, ie_len);
435 ret = -EINVAL; 415 ret = -EINVAL;
436 goto err; 416 goto err;
@@ -513,8 +493,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
513 } 493 }
514 494
515 if (!ar->firmware_data || !ar->firmware_len) { 495 if (!ar->firmware_data || !ar->firmware_len) {
516 ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n", 496 ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
517 name); 497 ar->hw_params.fw.dir, name);
518 ret = -ENOMEDIUM; 498 ret = -ENOMEDIUM;
519 goto err; 499 goto err;
520 } 500 }
@@ -531,7 +511,9 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
531 ar->hw_params.fw.board); 511 ar->hw_params.fw.board);
532 if (IS_ERR(ar->board)) { 512 if (IS_ERR(ar->board)) {
533 ret = PTR_ERR(ar->board); 513 ret = PTR_ERR(ar->board);
534 ath10k_err("could not fetch board data (%d)\n", ret); 514 ath10k_err("could not fetch board data '%s/%s' (%d)\n",
515 ar->hw_params.fw.dir, ar->hw_params.fw.board,
516 ret);
535 goto err; 517 goto err;
536 } 518 }
537 519
@@ -549,19 +531,21 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
549{ 531{
550 int ret; 532 int ret;
551 533
534 ar->fw_api = 2;
535 ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
536
552 ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE); 537 ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
553 if (ret == 0) { 538 if (ret == 0)
554 ar->fw_api = 2; 539 goto success;
555 goto out; 540
556 } 541 ar->fw_api = 1;
542 ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
557 543
558 ret = ath10k_core_fetch_firmware_api_1(ar); 544 ret = ath10k_core_fetch_firmware_api_1(ar);
559 if (ret) 545 if (ret)
560 return ret; 546 return ret;
561 547
562 ar->fw_api = 1; 548success:
563
564out:
565 ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); 549 ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
566 550
567 return 0; 551 return 0;
@@ -572,16 +556,22 @@ static int ath10k_init_download_firmware(struct ath10k *ar)
572 int ret; 556 int ret;
573 557
574 ret = ath10k_download_board_data(ar); 558 ret = ath10k_download_board_data(ar);
575 if (ret) 559 if (ret) {
560 ath10k_err("failed to download board data: %d\n", ret);
576 return ret; 561 return ret;
562 }
577 563
578 ret = ath10k_download_and_run_otp(ar); 564 ret = ath10k_download_and_run_otp(ar);
579 if (ret) 565 if (ret) {
566 ath10k_err("failed to run otp: %d\n", ret);
580 return ret; 567 return ret;
568 }
581 569
582 ret = ath10k_download_fw(ar); 570 ret = ath10k_download_fw(ar);
583 if (ret) 571 if (ret) {
572 ath10k_err("failed to download firmware: %d\n", ret);
584 return ret; 573 return ret;
574 }
585 575
586 return ret; 576 return ret;
587} 577}
@@ -660,8 +650,9 @@ static void ath10k_core_restart(struct work_struct *work)
660 650
661 switch (ar->state) { 651 switch (ar->state) {
662 case ATH10K_STATE_ON: 652 case ATH10K_STATE_ON:
663 ath10k_halt(ar);
664 ar->state = ATH10K_STATE_RESTARTING; 653 ar->state = ATH10K_STATE_RESTARTING;
654 del_timer_sync(&ar->scan.timeout);
655 ath10k_reset_scan((unsigned long)ar);
665 ieee80211_restart_hw(ar->hw); 656 ieee80211_restart_hw(ar->hw);
666 break; 657 break;
667 case ATH10K_STATE_OFF: 658 case ATH10K_STATE_OFF:
@@ -670,6 +661,8 @@ static void ath10k_core_restart(struct work_struct *work)
670 ath10k_warn("cannot restart a device that hasn't been started\n"); 661 ath10k_warn("cannot restart a device that hasn't been started\n");
671 break; 662 break;
672 case ATH10K_STATE_RESTARTING: 663 case ATH10K_STATE_RESTARTING:
664 /* hw restart might be requested from multiple places */
665 break;
673 case ATH10K_STATE_RESTARTED: 666 case ATH10K_STATE_RESTARTED:
674 ar->state = ATH10K_STATE_WEDGED; 667 ar->state = ATH10K_STATE_WEDGED;
675 /* fall through */ 668 /* fall through */
@@ -681,70 +674,6 @@ static void ath10k_core_restart(struct work_struct *work)
681 mutex_unlock(&ar->conf_mutex); 674 mutex_unlock(&ar->conf_mutex);
682} 675}
683 676
684struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
685 const struct ath10k_hif_ops *hif_ops)
686{
687 struct ath10k *ar;
688
689 ar = ath10k_mac_create();
690 if (!ar)
691 return NULL;
692
693 ar->ath_common.priv = ar;
694 ar->ath_common.hw = ar->hw;
695
696 ar->p2p = !!ath10k_p2p;
697 ar->dev = dev;
698
699 ar->hif.priv = hif_priv;
700 ar->hif.ops = hif_ops;
701
702 init_completion(&ar->scan.started);
703 init_completion(&ar->scan.completed);
704 init_completion(&ar->scan.on_channel);
705 init_completion(&ar->target_suspend);
706
707 init_completion(&ar->install_key_done);
708 init_completion(&ar->vdev_setup_done);
709
710 setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
711
712 ar->workqueue = create_singlethread_workqueue("ath10k_wq");
713 if (!ar->workqueue)
714 goto err_wq;
715
716 mutex_init(&ar->conf_mutex);
717 spin_lock_init(&ar->data_lock);
718
719 INIT_LIST_HEAD(&ar->peers);
720 init_waitqueue_head(&ar->peer_mapping_wq);
721
722 init_completion(&ar->offchan_tx_completed);
723 INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
724 skb_queue_head_init(&ar->offchan_tx_queue);
725
726 INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
727 skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
728
729 INIT_WORK(&ar->restart_work, ath10k_core_restart);
730
731 return ar;
732
733err_wq:
734 ath10k_mac_destroy(ar);
735 return NULL;
736}
737EXPORT_SYMBOL(ath10k_core_create);
738
739void ath10k_core_destroy(struct ath10k *ar)
740{
741 flush_workqueue(ar->workqueue);
742 destroy_workqueue(ar->workqueue);
743
744 ath10k_mac_destroy(ar);
745}
746EXPORT_SYMBOL(ath10k_core_destroy);
747
748int ath10k_core_start(struct ath10k *ar) 677int ath10k_core_start(struct ath10k *ar)
749{ 678{
750 int status; 679 int status;
@@ -785,10 +714,28 @@ int ath10k_core_start(struct ath10k *ar)
785 goto err; 714 goto err;
786 } 715 }
787 716
717 status = ath10k_htt_init(ar);
718 if (status) {
719 ath10k_err("failed to init htt: %d\n", status);
720 goto err_wmi_detach;
721 }
722
723 status = ath10k_htt_tx_alloc(&ar->htt);
724 if (status) {
725 ath10k_err("failed to alloc htt tx: %d\n", status);
726 goto err_wmi_detach;
727 }
728
729 status = ath10k_htt_rx_alloc(&ar->htt);
730 if (status) {
731 ath10k_err("failed to alloc htt rx: %d\n", status);
732 goto err_htt_tx_detach;
733 }
734
788 status = ath10k_hif_start(ar); 735 status = ath10k_hif_start(ar);
789 if (status) { 736 if (status) {
790 ath10k_err("could not start HIF: %d\n", status); 737 ath10k_err("could not start HIF: %d\n", status);
791 goto err_wmi_detach; 738 goto err_htt_rx_detach;
792 } 739 }
793 740
794 status = ath10k_htc_wait_target(&ar->htc); 741 status = ath10k_htc_wait_target(&ar->htc);
@@ -797,15 +744,30 @@ int ath10k_core_start(struct ath10k *ar)
797 goto err_hif_stop; 744 goto err_hif_stop;
798 } 745 }
799 746
800 status = ath10k_htt_attach(ar); 747 status = ath10k_htt_connect(&ar->htt);
801 if (status) { 748 if (status) {
802 ath10k_err("could not attach htt (%d)\n", status); 749 ath10k_err("failed to connect htt (%d)\n", status);
803 goto err_hif_stop; 750 goto err_hif_stop;
804 } 751 }
805 752
806 status = ath10k_init_connect_htc(ar); 753 status = ath10k_wmi_connect(ar);
807 if (status) 754 if (status) {
808 goto err_htt_detach; 755 ath10k_err("could not connect wmi: %d\n", status);
756 goto err_hif_stop;
757 }
758
759 status = ath10k_htc_start(&ar->htc);
760 if (status) {
761 ath10k_err("failed to start htc: %d\n", status);
762 goto err_hif_stop;
763 }
764
765 status = ath10k_wmi_wait_for_service_ready(ar);
766 if (status <= 0) {
767 ath10k_warn("wmi service ready event not received");
768 status = -ETIMEDOUT;
769 goto err_htc_stop;
770 }
809 771
810 ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n", 772 ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n",
811 ar->hw->wiphy->fw_version); 773 ar->hw->wiphy->fw_version);
@@ -813,31 +775,36 @@ int ath10k_core_start(struct ath10k *ar)
813 status = ath10k_wmi_cmd_init(ar); 775 status = ath10k_wmi_cmd_init(ar);
814 if (status) { 776 if (status) {
815 ath10k_err("could not send WMI init command (%d)\n", status); 777 ath10k_err("could not send WMI init command (%d)\n", status);
816 goto err_disconnect_htc; 778 goto err_htc_stop;
817 } 779 }
818 780
819 status = ath10k_wmi_wait_for_unified_ready(ar); 781 status = ath10k_wmi_wait_for_unified_ready(ar);
820 if (status <= 0) { 782 if (status <= 0) {
821 ath10k_err("wmi unified ready event not received\n"); 783 ath10k_err("wmi unified ready event not received\n");
822 status = -ETIMEDOUT; 784 status = -ETIMEDOUT;
823 goto err_disconnect_htc; 785 goto err_htc_stop;
824 } 786 }
825 787
826 status = ath10k_htt_attach_target(&ar->htt); 788 status = ath10k_htt_setup(&ar->htt);
827 if (status) 789 if (status) {
828 goto err_disconnect_htc; 790 ath10k_err("failed to setup htt: %d\n", status);
791 goto err_htc_stop;
792 }
829 793
830 status = ath10k_debug_start(ar); 794 status = ath10k_debug_start(ar);
831 if (status) 795 if (status)
832 goto err_disconnect_htc; 796 goto err_htc_stop;
833 797
834 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; 798 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
835 INIT_LIST_HEAD(&ar->arvifs); 799 INIT_LIST_HEAD(&ar->arvifs);
836 800
837 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) 801 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
838 ath10k_info("%s (0x%x) fw %s api %d htt %d.%d\n", 802 ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
839 ar->hw_params.name, ar->target_version, 803 ar->hw_params.name,
840 ar->hw->wiphy->fw_version, ar->fw_api, 804 ar->target_version,
805 ar->chip_id,
806 ar->hw->wiphy->fw_version,
807 ar->fw_api,
841 ar->htt.target_version_major, 808 ar->htt.target_version_major,
842 ar->htt.target_version_minor); 809 ar->htt.target_version_minor);
843 810
@@ -845,12 +812,14 @@ int ath10k_core_start(struct ath10k *ar)
845 812
846 return 0; 813 return 0;
847 814
848err_disconnect_htc: 815err_htc_stop:
849 ath10k_htc_stop(&ar->htc); 816 ath10k_htc_stop(&ar->htc);
850err_htt_detach:
851 ath10k_htt_detach(&ar->htt);
852err_hif_stop: 817err_hif_stop:
853 ath10k_hif_stop(ar); 818 ath10k_hif_stop(ar);
819err_htt_rx_detach:
820 ath10k_htt_rx_free(&ar->htt);
821err_htt_tx_detach:
822 ath10k_htt_tx_free(&ar->htt);
854err_wmi_detach: 823err_wmi_detach:
855 ath10k_wmi_detach(ar); 824 ath10k_wmi_detach(ar);
856err: 825err:
@@ -885,10 +854,14 @@ void ath10k_core_stop(struct ath10k *ar)
885 lockdep_assert_held(&ar->conf_mutex); 854 lockdep_assert_held(&ar->conf_mutex);
886 855
887 /* try to suspend target */ 856 /* try to suspend target */
888 ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR); 857 if (ar->state != ATH10K_STATE_RESTARTING)
858 ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
859
889 ath10k_debug_stop(ar); 860 ath10k_debug_stop(ar);
890 ath10k_htc_stop(&ar->htc); 861 ath10k_htc_stop(&ar->htc);
891 ath10k_htt_detach(&ar->htt); 862 ath10k_hif_stop(ar);
863 ath10k_htt_tx_free(&ar->htt);
864 ath10k_htt_rx_free(&ar->htt);
892 ath10k_wmi_detach(ar); 865 ath10k_wmi_detach(ar);
893} 866}
894EXPORT_SYMBOL(ath10k_core_stop); 867EXPORT_SYMBOL(ath10k_core_stop);
@@ -980,22 +953,15 @@ static int ath10k_core_check_chip_id(struct ath10k *ar)
980 return 0; 953 return 0;
981} 954}
982 955
983int ath10k_core_register(struct ath10k *ar, u32 chip_id) 956static void ath10k_core_register_work(struct work_struct *work)
984{ 957{
958 struct ath10k *ar = container_of(work, struct ath10k, register_work);
985 int status; 959 int status;
986 960
987 ar->chip_id = chip_id;
988
989 status = ath10k_core_check_chip_id(ar);
990 if (status) {
991 ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
992 return status;
993 }
994
995 status = ath10k_core_probe_fw(ar); 961 status = ath10k_core_probe_fw(ar);
996 if (status) { 962 if (status) {
997 ath10k_err("could not probe fw (%d)\n", status); 963 ath10k_err("could not probe fw (%d)\n", status);
998 return status; 964 goto err;
999 } 965 }
1000 966
1001 status = ath10k_mac_register(ar); 967 status = ath10k_mac_register(ar);
@@ -1010,18 +976,43 @@ int ath10k_core_register(struct ath10k *ar, u32 chip_id)
1010 goto err_unregister_mac; 976 goto err_unregister_mac;
1011 } 977 }
1012 978
1013 return 0; 979 set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
980 return;
1014 981
1015err_unregister_mac: 982err_unregister_mac:
1016 ath10k_mac_unregister(ar); 983 ath10k_mac_unregister(ar);
1017err_release_fw: 984err_release_fw:
1018 ath10k_core_free_firmware_files(ar); 985 ath10k_core_free_firmware_files(ar);
1019 return status; 986err:
987 device_release_driver(ar->dev);
988 return;
989}
990
991int ath10k_core_register(struct ath10k *ar, u32 chip_id)
992{
993 int status;
994
995 ar->chip_id = chip_id;
996
997 status = ath10k_core_check_chip_id(ar);
998 if (status) {
999 ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
1000 return status;
1001 }
1002
1003 queue_work(ar->workqueue, &ar->register_work);
1004
1005 return 0;
1020} 1006}
1021EXPORT_SYMBOL(ath10k_core_register); 1007EXPORT_SYMBOL(ath10k_core_register);
1022 1008
1023void ath10k_core_unregister(struct ath10k *ar) 1009void ath10k_core_unregister(struct ath10k *ar)
1024{ 1010{
1011 cancel_work_sync(&ar->register_work);
1012
1013 if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
1014 return;
1015
1025 /* We must unregister from mac80211 before we stop HTC and HIF. 1016 /* We must unregister from mac80211 before we stop HTC and HIF.
1026 * Otherwise we will fail to submit commands to FW and mac80211 will be 1017 * Otherwise we will fail to submit commands to FW and mac80211 will be
1027 * unhappy about callback failures. */ 1018 * unhappy about callback failures. */
@@ -1033,6 +1024,71 @@ void ath10k_core_unregister(struct ath10k *ar)
1033} 1024}
1034EXPORT_SYMBOL(ath10k_core_unregister); 1025EXPORT_SYMBOL(ath10k_core_unregister);
1035 1026
1027struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
1028 const struct ath10k_hif_ops *hif_ops)
1029{
1030 struct ath10k *ar;
1031
1032 ar = ath10k_mac_create();
1033 if (!ar)
1034 return NULL;
1035
1036 ar->ath_common.priv = ar;
1037 ar->ath_common.hw = ar->hw;
1038
1039 ar->p2p = !!ath10k_p2p;
1040 ar->dev = dev;
1041
1042 ar->hif.priv = hif_priv;
1043 ar->hif.ops = hif_ops;
1044
1045 init_completion(&ar->scan.started);
1046 init_completion(&ar->scan.completed);
1047 init_completion(&ar->scan.on_channel);
1048 init_completion(&ar->target_suspend);
1049
1050 init_completion(&ar->install_key_done);
1051 init_completion(&ar->vdev_setup_done);
1052
1053 setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
1054
1055 ar->workqueue = create_singlethread_workqueue("ath10k_wq");
1056 if (!ar->workqueue)
1057 goto err_wq;
1058
1059 mutex_init(&ar->conf_mutex);
1060 spin_lock_init(&ar->data_lock);
1061
1062 INIT_LIST_HEAD(&ar->peers);
1063 init_waitqueue_head(&ar->peer_mapping_wq);
1064
1065 init_completion(&ar->offchan_tx_completed);
1066 INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
1067 skb_queue_head_init(&ar->offchan_tx_queue);
1068
1069 INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
1070 skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
1071
1072 INIT_WORK(&ar->register_work, ath10k_core_register_work);
1073 INIT_WORK(&ar->restart_work, ath10k_core_restart);
1074
1075 return ar;
1076
1077err_wq:
1078 ath10k_mac_destroy(ar);
1079 return NULL;
1080}
1081EXPORT_SYMBOL(ath10k_core_create);
1082
1083void ath10k_core_destroy(struct ath10k *ar)
1084{
1085 flush_workqueue(ar->workqueue);
1086 destroy_workqueue(ar->workqueue);
1087
1088 ath10k_mac_destroy(ar);
1089}
1090EXPORT_SYMBOL(ath10k_core_destroy);
1091
1036MODULE_AUTHOR("Qualcomm Atheros"); 1092MODULE_AUTHOR("Qualcomm Atheros");
1037MODULE_DESCRIPTION("Core module for QCA988X PCIe devices."); 1093MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
1038MODULE_LICENSE("Dual BSD/GPL"); 1094MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 0e71979d837c..68ceef61933d 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -119,6 +119,7 @@ struct ath10k_peer_stat {
119 u8 peer_macaddr[ETH_ALEN]; 119 u8 peer_macaddr[ETH_ALEN];
120 u32 peer_rssi; 120 u32 peer_rssi;
121 u32 peer_tx_rate; 121 u32 peer_tx_rate;
122 u32 peer_rx_rate; /* 10x only */
122}; 123};
123 124
124struct ath10k_target_stats { 125struct ath10k_target_stats {
@@ -130,6 +131,12 @@ struct ath10k_target_stats {
130 u32 cycle_count; 131 u32 cycle_count;
131 u32 phy_err_count; 132 u32 phy_err_count;
132 u32 chan_tx_power; 133 u32 chan_tx_power;
134 u32 ack_rx_bad;
135 u32 rts_bad;
136 u32 rts_good;
137 u32 fcs_bad;
138 u32 no_beacons;
139 u32 mib_int_count;
133 140
134 /* PDEV TX stats */ 141 /* PDEV TX stats */
135 s32 comp_queued; 142 s32 comp_queued;
@@ -260,6 +267,8 @@ struct ath10k_vif {
260 u8 fixed_rate; 267 u8 fixed_rate;
261 u8 fixed_nss; 268 u8 fixed_nss;
262 u8 force_sgi; 269 u8 force_sgi;
270 bool use_cts_prot;
271 int num_legacy_stations;
263}; 272};
264 273
265struct ath10k_vif_iter { 274struct ath10k_vif_iter {
@@ -326,6 +335,7 @@ enum ath10k_dev_flags {
326 /* Indicates that ath10k device is during CAC phase of DFS */ 335 /* Indicates that ath10k device is during CAC phase of DFS */
327 ATH10K_CAC_RUNNING, 336 ATH10K_CAC_RUNNING,
328 ATH10K_FLAG_FIRST_BOOT_DONE, 337 ATH10K_FLAG_FIRST_BOOT_DONE,
338 ATH10K_FLAG_CORE_REGISTERED,
329}; 339};
330 340
331struct ath10k { 341struct ath10k {
@@ -419,13 +429,24 @@ struct ath10k {
419 struct cfg80211_chan_def chandef; 429 struct cfg80211_chan_def chandef;
420 430
421 int free_vdev_map; 431 int free_vdev_map;
432 bool promisc;
433 bool monitor;
422 int monitor_vdev_id; 434 int monitor_vdev_id;
423 bool monitor_enabled; 435 bool monitor_started;
424 bool monitor_present;
425 unsigned int filter_flags; 436 unsigned int filter_flags;
426 unsigned long dev_flags; 437 unsigned long dev_flags;
427 u32 dfs_block_radar_events; 438 u32 dfs_block_radar_events;
428 439
440 /* protected by conf_mutex */
441 bool radar_enabled;
442 int num_started_vdevs;
443
444 /* Protected by conf-mutex */
445 u8 supp_tx_chainmask;
446 u8 supp_rx_chainmask;
447 u8 cfg_tx_chainmask;
448 u8 cfg_rx_chainmask;
449
429 struct wmi_pdev_set_wmm_params_arg wmm_params; 450 struct wmi_pdev_set_wmm_params_arg wmm_params;
430 struct completion install_key_done; 451 struct completion install_key_done;
431 452
@@ -456,6 +477,7 @@ struct ath10k {
456 477
457 enum ath10k_state state; 478 enum ath10k_state state;
458 479
480 struct work_struct register_work;
459 struct work_struct restart_work; 481 struct work_struct restart_work;
460 482
461 /* cycle count is reported twice for each visited channel during scan. 483 /* cycle count is reported twice for each visited channel during scan.
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 6debd281350a..1b7ff4ba122c 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
161 u8 *tmp = ev->data; 161 u8 *tmp = ev->data;
162 struct ath10k_target_stats *stats; 162 struct ath10k_target_stats *stats;
163 int num_pdev_stats, num_vdev_stats, num_peer_stats; 163 int num_pdev_stats, num_vdev_stats, num_peer_stats;
164 struct wmi_pdev_stats *ps; 164 struct wmi_pdev_stats_10x *ps;
165 int i; 165 int i;
166 166
167 spin_lock_bh(&ar->data_lock); 167 spin_lock_bh(&ar->data_lock);
@@ -173,7 +173,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
173 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */ 173 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
174 174
175 if (num_pdev_stats) { 175 if (num_pdev_stats) {
176 ps = (struct wmi_pdev_stats *)tmp; 176 ps = (struct wmi_pdev_stats_10x *)tmp;
177 177
178 stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf); 178 stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
179 stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count); 179 stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
@@ -228,7 +228,18 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
228 stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop); 228 stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
229 stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs); 229 stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
230 230
231 tmp += sizeof(struct wmi_pdev_stats); 231 if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
232 ar->fw_features)) {
233 stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad);
234 stats->rts_bad = __le32_to_cpu(ps->rts_bad);
235 stats->rts_good = __le32_to_cpu(ps->rts_good);
236 stats->fcs_bad = __le32_to_cpu(ps->fcs_bad);
237 stats->no_beacons = __le32_to_cpu(ps->no_beacons);
238 stats->mib_int_count = __le32_to_cpu(ps->mib_int_count);
239 tmp += sizeof(struct wmi_pdev_stats_10x);
240 } else {
241 tmp += sizeof(struct wmi_pdev_stats_old);
242 }
232 } 243 }
233 244
234 /* 0 or max vdevs */ 245 /* 0 or max vdevs */
@@ -243,22 +254,29 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
243 } 254 }
244 255
245 if (num_peer_stats) { 256 if (num_peer_stats) {
246 struct wmi_peer_stats *peer_stats; 257 struct wmi_peer_stats_10x *peer_stats;
247 struct ath10k_peer_stat *s; 258 struct ath10k_peer_stat *s;
248 259
249 stats->peers = num_peer_stats; 260 stats->peers = num_peer_stats;
250 261
251 for (i = 0; i < num_peer_stats; i++) { 262 for (i = 0; i < num_peer_stats; i++) {
252 peer_stats = (struct wmi_peer_stats *)tmp; 263 peer_stats = (struct wmi_peer_stats_10x *)tmp;
253 s = &stats->peer_stat[i]; 264 s = &stats->peer_stat[i];
254 265
255 WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr, 266 memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
256 s->peer_macaddr); 267 ETH_ALEN);
257 s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi); 268 s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
258 s->peer_tx_rate = 269 s->peer_tx_rate =
259 __le32_to_cpu(peer_stats->peer_tx_rate); 270 __le32_to_cpu(peer_stats->peer_tx_rate);
260 271 if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
261 tmp += sizeof(struct wmi_peer_stats); 272 ar->fw_features)) {
273 s->peer_rx_rate =
274 __le32_to_cpu(peer_stats->peer_rx_rate);
275 tmp += sizeof(struct wmi_peer_stats_10x);
276
277 } else {
278 tmp += sizeof(struct wmi_peer_stats_old);
279 }
262 } 280 }
263 } 281 }
264 282
@@ -272,7 +290,7 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
272 struct ath10k *ar = file->private_data; 290 struct ath10k *ar = file->private_data;
273 struct ath10k_target_stats *fw_stats; 291 struct ath10k_target_stats *fw_stats;
274 char *buf = NULL; 292 char *buf = NULL;
275 unsigned int len = 0, buf_len = 2500; 293 unsigned int len = 0, buf_len = 8000;
276 ssize_t ret_cnt = 0; 294 ssize_t ret_cnt = 0;
277 long left; 295 long left;
278 int i; 296 int i;
@@ -320,6 +338,16 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
320 "Cycle count", fw_stats->cycle_count); 338 "Cycle count", fw_stats->cycle_count);
321 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 339 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
322 "PHY error count", fw_stats->phy_err_count); 340 "PHY error count", fw_stats->phy_err_count);
341 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
342 "RTS bad count", fw_stats->rts_bad);
343 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
344 "RTS good count", fw_stats->rts_good);
345 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
346 "FCS bad count", fw_stats->fcs_bad);
347 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
348 "No beacon count", fw_stats->no_beacons);
349 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
350 "MIB int count", fw_stats->mib_int_count);
323 351
324 len += scnprintf(buf + len, buf_len - len, "\n"); 352 len += scnprintf(buf + len, buf_len - len, "\n");
325 len += scnprintf(buf + len, buf_len - len, "%30s\n", 353 len += scnprintf(buf + len, buf_len - len, "%30s\n",
@@ -411,8 +439,8 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
411 "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs); 439 "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
412 440
413 len += scnprintf(buf + len, buf_len - len, "\n"); 441 len += scnprintf(buf + len, buf_len - len, "\n");
414 len += scnprintf(buf + len, buf_len - len, "%30s\n", 442 len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
415 "ath10k PEER stats"); 443 "ath10k PEER stats", fw_stats->peers);
416 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 444 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
417 "================="); 445 "=================");
418 446
@@ -425,6 +453,9 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
425 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 453 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
426 "Peer TX rate", 454 "Peer TX rate",
427 fw_stats->peer_stat[i].peer_tx_rate); 455 fw_stats->peer_stat[i].peer_tx_rate);
456 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
457 "Peer RX rate",
458 fw_stats->peer_stat[i].peer_rx_rate);
428 len += scnprintf(buf + len, buf_len - len, "\n"); 459 len += scnprintf(buf + len, buf_len - len, "\n");
429 } 460 }
430 spin_unlock_bh(&ar->data_lock); 461 spin_unlock_bh(&ar->data_lock);
@@ -451,27 +482,37 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
451 char __user *user_buf, 482 char __user *user_buf,
452 size_t count, loff_t *ppos) 483 size_t count, loff_t *ppos)
453{ 484{
454 const char buf[] = "To simulate firmware crash write the keyword" 485 const char buf[] = "To simulate firmware crash write one of the"
455 " `crash` to this file.\nThis will force firmware" 486 " keywords to this file:\n `soft` - this will send"
456 " to report a crash to the host system.\n"; 487 " WMI_FORCE_FW_HANG_ASSERT to firmware if FW"
488 " supports that command.\n `hard` - this will send"
489 " to firmware command with illegal parameters"
490 " causing firmware crash.\n";
491
457 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); 492 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
458} 493}
459 494
495/* Simulate firmware crash:
496 * 'soft': Call wmi command causing firmware hang. This firmware hang is
497 * recoverable by warm firmware reset.
498 * 'hard': Force firmware crash by setting any vdev parameter for not allowed
499 * vdev id. This is hard firmware crash because it is recoverable only by cold
500 * firmware reset.
501 */
460static ssize_t ath10k_write_simulate_fw_crash(struct file *file, 502static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
461 const char __user *user_buf, 503 const char __user *user_buf,
462 size_t count, loff_t *ppos) 504 size_t count, loff_t *ppos)
463{ 505{
464 struct ath10k *ar = file->private_data; 506 struct ath10k *ar = file->private_data;
465 char buf[32] = {}; 507 char buf[32];
466 int ret; 508 int ret;
467 509
468 mutex_lock(&ar->conf_mutex); 510 mutex_lock(&ar->conf_mutex);
469 511
470 simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); 512 simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
471 if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) { 513
472 ret = -EINVAL; 514 /* make sure that buf is null terminated */
473 goto exit; 515 buf[sizeof(buf) - 1] = 0;
474 }
475 516
476 if (ar->state != ATH10K_STATE_ON && 517 if (ar->state != ATH10K_STATE_ON &&
477 ar->state != ATH10K_STATE_RESTARTED) { 518 ar->state != ATH10K_STATE_RESTARTED) {
@@ -479,14 +520,30 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
479 goto exit; 520 goto exit;
480 } 521 }
481 522
482 ath10k_info("simulating firmware crash\n"); 523 /* drop the possible '\n' from the end */
524 if (buf[count - 1] == '\n') {
525 buf[count - 1] = 0;
526 count--;
527 }
483 528
484 ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); 529 if (!strcmp(buf, "soft")) {
485 if (ret) 530 ath10k_info("simulating soft firmware crash\n");
486 ath10k_warn("failed to force fw hang (%d)\n", ret); 531 ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
532 } else if (!strcmp(buf, "hard")) {
533 ath10k_info("simulating hard firmware crash\n");
534 ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1,
535 ar->wmi.vdev_param->rts_threshold, 0);
536 } else {
537 ret = -EINVAL;
538 goto exit;
539 }
540
541 if (ret) {
542 ath10k_warn("failed to simulate firmware crash: %d\n", ret);
543 goto exit;
544 }
487 545
488 if (ret == 0) 546 ret = count;
489 ret = count;
490 547
491exit: 548exit:
492 mutex_unlock(&ar->conf_mutex); 549 mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 7f1bccd3597f..e493db4b4a41 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -157,6 +157,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
157 goto err_pull; 157 goto err_pull;
158 } 158 }
159 ep->tx_credits -= credits; 159 ep->tx_credits -= credits;
160 ath10k_dbg(ATH10K_DBG_HTC,
161 "htc ep %d consumed %d credits (total %d)\n",
162 eid, credits, ep->tx_credits);
160 spin_unlock_bh(&htc->tx_lock); 163 spin_unlock_bh(&htc->tx_lock);
161 } 164 }
162 165
@@ -185,6 +188,9 @@ err_credits:
185 if (ep->tx_credit_flow_enabled) { 188 if (ep->tx_credit_flow_enabled) {
186 spin_lock_bh(&htc->tx_lock); 189 spin_lock_bh(&htc->tx_lock);
187 ep->tx_credits += credits; 190 ep->tx_credits += credits;
191 ath10k_dbg(ATH10K_DBG_HTC,
192 "htc ep %d reverted %d credits back (total %d)\n",
193 eid, credits, ep->tx_credits);
188 spin_unlock_bh(&htc->tx_lock); 194 spin_unlock_bh(&htc->tx_lock);
189 195
190 if (ep->ep_ops.ep_tx_credits) 196 if (ep->ep_ops.ep_tx_credits)
@@ -234,12 +240,12 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
234 if (report->eid >= ATH10K_HTC_EP_COUNT) 240 if (report->eid >= ATH10K_HTC_EP_COUNT)
235 break; 241 break;
236 242
237 ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
238 report->eid, report->credits);
239
240 ep = &htc->endpoint[report->eid]; 243 ep = &htc->endpoint[report->eid];
241 ep->tx_credits += report->credits; 244 ep->tx_credits += report->credits;
242 245
246 ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
247 report->eid, report->credits, ep->tx_credits);
248
243 if (ep->ep_ops.ep_tx_credits) { 249 if (ep->ep_ops.ep_tx_credits) {
244 spin_unlock_bh(&htc->tx_lock); 250 spin_unlock_bh(&htc->tx_lock);
245 ep->ep_ops.ep_tx_credits(htc->ar); 251 ep->ep_ops.ep_tx_credits(htc->ar);
@@ -824,17 +830,11 @@ int ath10k_htc_start(struct ath10k_htc *htc)
824 return 0; 830 return 0;
825} 831}
826 832
827/*
828 * stop HTC communications, i.e. stop interrupt reception, and flush all
829 * queued buffers
830 */
831void ath10k_htc_stop(struct ath10k_htc *htc) 833void ath10k_htc_stop(struct ath10k_htc *htc)
832{ 834{
833 spin_lock_bh(&htc->tx_lock); 835 spin_lock_bh(&htc->tx_lock);
834 htc->stopped = true; 836 htc->stopped = true;
835 spin_unlock_bh(&htc->tx_lock); 837 spin_unlock_bh(&htc->tx_lock);
836
837 ath10k_hif_stop(htc->ar);
838} 838}
839 839
840/* registered target arrival callback from the HIF layer */ 840/* registered target arrival callback from the HIF layer */
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 69697af59ce0..19c12cc8d663 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -22,7 +22,7 @@
22#include "core.h" 22#include "core.h"
23#include "debug.h" 23#include "debug.h"
24 24
25static int ath10k_htt_htc_attach(struct ath10k_htt *htt) 25int ath10k_htt_connect(struct ath10k_htt *htt)
26{ 26{
27 struct ath10k_htc_svc_conn_req conn_req; 27 struct ath10k_htc_svc_conn_req conn_req;
28 struct ath10k_htc_svc_conn_resp conn_resp; 28 struct ath10k_htc_svc_conn_resp conn_resp;
@@ -48,39 +48,14 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
48 return 0; 48 return 0;
49} 49}
50 50
51int ath10k_htt_attach(struct ath10k *ar) 51int ath10k_htt_init(struct ath10k *ar)
52{ 52{
53 struct ath10k_htt *htt = &ar->htt; 53 struct ath10k_htt *htt = &ar->htt;
54 int ret;
55 54
56 htt->ar = ar; 55 htt->ar = ar;
57 htt->max_throughput_mbps = 800; 56 htt->max_throughput_mbps = 800;
58 57
59 /* 58 /*
60 * Connect to HTC service.
61 * This has to be done before calling ath10k_htt_rx_attach,
62 * since ath10k_htt_rx_attach involves sending a rx ring configure
63 * message to the target.
64 */
65 ret = ath10k_htt_htc_attach(htt);
66 if (ret) {
67 ath10k_err("could not attach htt htc (%d)\n", ret);
68 goto err_htc_attach;
69 }
70
71 ret = ath10k_htt_tx_attach(htt);
72 if (ret) {
73 ath10k_err("could not attach htt tx (%d)\n", ret);
74 goto err_htc_attach;
75 }
76
77 ret = ath10k_htt_rx_attach(htt);
78 if (ret) {
79 ath10k_err("could not attach htt rx (%d)\n", ret);
80 goto err_rx_attach;
81 }
82
83 /*
84 * Prefetch enough data to satisfy target 59 * Prefetch enough data to satisfy target
85 * classification engine. 60 * classification engine.
86 * This is for LL chips. HL chips will probably 61 * This is for LL chips. HL chips will probably
@@ -93,11 +68,6 @@ int ath10k_htt_attach(struct ath10k *ar)
93 2; /* ip4 dscp or ip6 priority */ 68 2; /* ip4 dscp or ip6 priority */
94 69
95 return 0; 70 return 0;
96
97err_rx_attach:
98 ath10k_htt_tx_detach(htt);
99err_htc_attach:
100 return ret;
101} 71}
102 72
103#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ) 73#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
@@ -117,7 +87,7 @@ static int ath10k_htt_verify_version(struct ath10k_htt *htt)
117 return 0; 87 return 0;
118} 88}
119 89
120int ath10k_htt_attach_target(struct ath10k_htt *htt) 90int ath10k_htt_setup(struct ath10k_htt *htt)
121{ 91{
122 int status; 92 int status;
123 93
@@ -140,9 +110,3 @@ int ath10k_htt_attach_target(struct ath10k_htt *htt)
140 110
141 return ath10k_htt_send_rx_ring_cfg_ll(htt); 111 return ath10k_htt_send_rx_ring_cfg_ll(htt);
142} 112}
143
144void ath10k_htt_detach(struct ath10k_htt *htt)
145{
146 ath10k_htt_rx_detach(htt);
147 ath10k_htt_tx_detach(htt);
148}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 654867fc1ae7..9a263462c793 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -21,6 +21,7 @@
21#include <linux/bug.h> 21#include <linux/bug.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/dmapool.h> 23#include <linux/dmapool.h>
24#include <net/mac80211.h>
24 25
25#include "htc.h" 26#include "htc.h"
26#include "rx_desc.h" 27#include "rx_desc.h"
@@ -1172,23 +1173,6 @@ struct htt_peer_unmap_event {
1172 u16 peer_id; 1173 u16 peer_id;
1173}; 1174};
1174 1175
1175struct htt_rx_info {
1176 struct sk_buff *skb;
1177 enum htt_rx_mpdu_status status;
1178 enum htt_rx_mpdu_encrypt_type encrypt_type;
1179 s8 signal;
1180 struct {
1181 u8 info0;
1182 u32 info1;
1183 u32 info2;
1184 } rate;
1185
1186 u32 tsf;
1187 bool fcs_err;
1188 bool amsdu_more;
1189 bool mic_err;
1190};
1191
1192struct ath10k_htt_txbuf { 1176struct ath10k_htt_txbuf {
1193 struct htt_data_tx_desc_frag frags[2]; 1177 struct htt_data_tx_desc_frag frags[2];
1194 struct ath10k_htc_hdr htc_hdr; 1178 struct ath10k_htc_hdr htc_hdr;
@@ -1289,6 +1273,9 @@ struct ath10k_htt {
1289 struct tasklet_struct txrx_compl_task; 1273 struct tasklet_struct txrx_compl_task;
1290 struct sk_buff_head tx_compl_q; 1274 struct sk_buff_head tx_compl_q;
1291 struct sk_buff_head rx_compl_q; 1275 struct sk_buff_head rx_compl_q;
1276
1277 /* rx_status template */
1278 struct ieee80211_rx_status rx_status;
1292}; 1279};
1293 1280
1294#define RX_HTT_HDR_STATUS_LEN 64 1281#define RX_HTT_HDR_STATUS_LEN 64
@@ -1341,14 +1328,16 @@ struct htt_rx_desc {
1341#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */ 1328#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
1342#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1) 1329#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
1343 1330
1344int ath10k_htt_attach(struct ath10k *ar); 1331int ath10k_htt_connect(struct ath10k_htt *htt);
1345int ath10k_htt_attach_target(struct ath10k_htt *htt); 1332int ath10k_htt_init(struct ath10k *ar);
1346void ath10k_htt_detach(struct ath10k_htt *htt); 1333int ath10k_htt_setup(struct ath10k_htt *htt);
1334
1335int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
1336void ath10k_htt_tx_free(struct ath10k_htt *htt);
1337
1338int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
1339void ath10k_htt_rx_free(struct ath10k_htt *htt);
1347 1340
1348int ath10k_htt_tx_attach(struct ath10k_htt *htt);
1349void ath10k_htt_tx_detach(struct ath10k_htt *htt);
1350int ath10k_htt_rx_attach(struct ath10k_htt *htt);
1351void ath10k_htt_rx_detach(struct ath10k_htt *htt);
1352void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); 1341void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
1353void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); 1342void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
1354int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); 1343int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index cdcbe2de95f9..6c102b1312ff 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -225,10 +225,26 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
225 ath10k_htt_rx_msdu_buff_replenish(htt); 225 ath10k_htt_rx_msdu_buff_replenish(htt);
226} 226}
227 227
228void ath10k_htt_rx_detach(struct ath10k_htt *htt) 228static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
229{ 229{
230 int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; 230 struct sk_buff *skb;
231 int i;
232
233 for (i = 0; i < htt->rx_ring.size; i++) {
234 skb = htt->rx_ring.netbufs_ring[i];
235 if (!skb)
236 continue;
231 237
238 dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
239 skb->len + skb_tailroom(skb),
240 DMA_FROM_DEVICE);
241 dev_kfree_skb_any(skb);
242 htt->rx_ring.netbufs_ring[i] = NULL;
243 }
244}
245
246void ath10k_htt_rx_free(struct ath10k_htt *htt)
247{
232 del_timer_sync(&htt->rx_ring.refill_retry_timer); 248 del_timer_sync(&htt->rx_ring.refill_retry_timer);
233 tasklet_kill(&htt->rx_replenish_task); 249 tasklet_kill(&htt->rx_replenish_task);
234 tasklet_kill(&htt->txrx_compl_task); 250 tasklet_kill(&htt->txrx_compl_task);
@@ -236,18 +252,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
236 skb_queue_purge(&htt->tx_compl_q); 252 skb_queue_purge(&htt->tx_compl_q);
237 skb_queue_purge(&htt->rx_compl_q); 253 skb_queue_purge(&htt->rx_compl_q);
238 254
239 while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { 255 ath10k_htt_rx_ring_clean_up(htt);
240 struct sk_buff *skb =
241 htt->rx_ring.netbufs_ring[sw_rd_idx];
242 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
243
244 dma_unmap_single(htt->ar->dev, cb->paddr,
245 skb->len + skb_tailroom(skb),
246 DMA_FROM_DEVICE);
247 dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
248 sw_rd_idx++;
249 sw_rd_idx &= htt->rx_ring.size_mask;
250 }
251 256
252 dma_free_coherent(htt->ar->dev, 257 dma_free_coherent(htt->ar->dev,
253 (htt->rx_ring.size * 258 (htt->rx_ring.size *
@@ -277,6 +282,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
277 282
278 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 283 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
279 msdu = htt->rx_ring.netbufs_ring[idx]; 284 msdu = htt->rx_ring.netbufs_ring[idx];
285 htt->rx_ring.netbufs_ring[idx] = NULL;
280 286
281 idx++; 287 idx++;
282 idx &= htt->rx_ring.size_mask; 288 idx &= htt->rx_ring.size_mask;
@@ -297,6 +303,7 @@ static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
297 } 303 }
298} 304}
299 305
306/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
300static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, 307static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
301 u8 **fw_desc, int *fw_desc_len, 308 u8 **fw_desc, int *fw_desc_len,
302 struct sk_buff **head_msdu, 309 struct sk_buff **head_msdu,
@@ -305,12 +312,13 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
305 int msdu_len, msdu_chaining = 0; 312 int msdu_len, msdu_chaining = 0;
306 struct sk_buff *msdu; 313 struct sk_buff *msdu;
307 struct htt_rx_desc *rx_desc; 314 struct htt_rx_desc *rx_desc;
315 bool corrupted = false;
308 316
309 lockdep_assert_held(&htt->rx_ring.lock); 317 lockdep_assert_held(&htt->rx_ring.lock);
310 318
311 if (htt->rx_confused) { 319 if (htt->rx_confused) {
312 ath10k_warn("htt is confused. refusing rx\n"); 320 ath10k_warn("htt is confused. refusing rx\n");
313 return 0; 321 return -1;
314 } 322 }
315 323
316 msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); 324 msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
@@ -398,7 +406,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
398 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), 406 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
399 RX_MSDU_START_INFO0_MSDU_LENGTH); 407 RX_MSDU_START_INFO0_MSDU_LENGTH);
400 msdu_chained = rx_desc->frag_info.ring2_more_count; 408 msdu_chained = rx_desc->frag_info.ring2_more_count;
401 msdu_chaining = msdu_chained;
402 409
403 if (msdu_len_invalid) 410 if (msdu_len_invalid)
404 msdu_len = 0; 411 msdu_len = 0;
@@ -426,11 +433,15 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
426 433
427 msdu->next = next; 434 msdu->next = next;
428 msdu = next; 435 msdu = next;
436 msdu_chaining = 1;
429 } 437 }
430 438
431 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & 439 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
432 RX_MSDU_END_INFO0_LAST_MSDU; 440 RX_MSDU_END_INFO0_LAST_MSDU;
433 441
442 if (msdu_chaining && !last_msdu)
443 corrupted = true;
444
434 if (last_msdu) { 445 if (last_msdu) {
435 msdu->next = NULL; 446 msdu->next = NULL;
436 break; 447 break;
@@ -442,6 +453,23 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
442 } 453 }
443 *tail_msdu = msdu; 454 *tail_msdu = msdu;
444 455
456 if (*head_msdu == NULL)
457 msdu_chaining = -1;
458
459 /*
460 * Apparently FW sometimes reports weird chained MSDU sequences with
461 * more than one rx descriptor. This seems like a bug but needs more
462 * analyzing. For the time being fix it by dropping such sequences to
463 * avoid blowing up the host system.
464 */
465 if (corrupted) {
466 ath10k_warn("failed to pop chained msdus, dropping\n");
467 ath10k_htt_rx_free_msdu_chain(*head_msdu);
468 *head_msdu = NULL;
469 *tail_msdu = NULL;
470 msdu_chaining = -EINVAL;
471 }
472
445 /* 473 /*
446 * Don't refill the ring yet. 474 * Don't refill the ring yet.
447 * 475 *
@@ -464,7 +492,7 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr)
464 ath10k_htt_rx_msdu_buff_replenish(htt); 492 ath10k_htt_rx_msdu_buff_replenish(htt);
465} 493}
466 494
467int ath10k_htt_rx_attach(struct ath10k_htt *htt) 495int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
468{ 496{
469 dma_addr_t paddr; 497 dma_addr_t paddr;
470 void *vaddr; 498 void *vaddr;
@@ -490,7 +518,7 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
490 htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); 518 htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
491 519
492 htt->rx_ring.netbufs_ring = 520 htt->rx_ring.netbufs_ring =
493 kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *), 521 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
494 GFP_KERNEL); 522 GFP_KERNEL);
495 if (!htt->rx_ring.netbufs_ring) 523 if (!htt->rx_ring.netbufs_ring)
496 goto err_netbuf; 524 goto err_netbuf;
@@ -636,6 +664,203 @@ struct amsdu_subframe_hdr {
636 __be16 len; 664 __be16 len;
637} __packed; 665} __packed;
638 666
667static const u8 rx_legacy_rate_idx[] = {
668 3, /* 0x00 - 11Mbps */
669 2, /* 0x01 - 5.5Mbps */
670 1, /* 0x02 - 2Mbps */
671 0, /* 0x03 - 1Mbps */
672 3, /* 0x04 - 11Mbps */
673 2, /* 0x05 - 5.5Mbps */
674 1, /* 0x06 - 2Mbps */
675 0, /* 0x07 - 1Mbps */
676 10, /* 0x08 - 48Mbps */
677 8, /* 0x09 - 24Mbps */
678 6, /* 0x0A - 12Mbps */
679 4, /* 0x0B - 6Mbps */
680 11, /* 0x0C - 54Mbps */
681 9, /* 0x0D - 36Mbps */
682 7, /* 0x0E - 18Mbps */
683 5, /* 0x0F - 9Mbps */
684};
685
686static void ath10k_htt_rx_h_rates(struct ath10k *ar,
687 enum ieee80211_band band,
688 u8 info0, u32 info1, u32 info2,
689 struct ieee80211_rx_status *status)
690{
691 u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
692 u8 preamble = 0;
693
694 /* Check if valid fields */
695 if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
696 return;
697
698 preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
699
700 switch (preamble) {
701 case HTT_RX_LEGACY:
702 cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
703 rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
704 rate_idx = 0;
705
706 if (rate < 0x08 || rate > 0x0F)
707 break;
708
709 switch (band) {
710 case IEEE80211_BAND_2GHZ:
711 if (cck)
712 rate &= ~BIT(3);
713 rate_idx = rx_legacy_rate_idx[rate];
714 break;
715 case IEEE80211_BAND_5GHZ:
716 rate_idx = rx_legacy_rate_idx[rate];
717 /* We are using same rate table registering
718 HW - ath10k_rates[]. In case of 5GHz skip
719 CCK rates, so -4 here */
720 rate_idx -= 4;
721 break;
722 default:
723 break;
724 }
725
726 status->rate_idx = rate_idx;
727 break;
728 case HTT_RX_HT:
729 case HTT_RX_HT_WITH_TXBF:
730 /* HT-SIG - Table 20-11 in info1 and info2 */
731 mcs = info1 & 0x1F;
732 nss = mcs >> 3;
733 bw = (info1 >> 7) & 1;
734 sgi = (info2 >> 7) & 1;
735
736 status->rate_idx = mcs;
737 status->flag |= RX_FLAG_HT;
738 if (sgi)
739 status->flag |= RX_FLAG_SHORT_GI;
740 if (bw)
741 status->flag |= RX_FLAG_40MHZ;
742 break;
743 case HTT_RX_VHT:
744 case HTT_RX_VHT_WITH_TXBF:
745 /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
746 TODO check this */
747 mcs = (info2 >> 4) & 0x0F;
748 nss = ((info1 >> 10) & 0x07) + 1;
749 bw = info1 & 3;
750 sgi = info2 & 1;
751
752 status->rate_idx = mcs;
753 status->vht_nss = nss;
754
755 if (sgi)
756 status->flag |= RX_FLAG_SHORT_GI;
757
758 switch (bw) {
759 /* 20MHZ */
760 case 0:
761 break;
762 /* 40MHZ */
763 case 1:
764 status->flag |= RX_FLAG_40MHZ;
765 break;
766 /* 80MHZ */
767 case 2:
768 status->vht_flag |= RX_VHT_FLAG_80MHZ;
769 }
770
771 status->flag |= RX_FLAG_VHT;
772 break;
773 default:
774 break;
775 }
776}
777
778static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
779 struct ieee80211_rx_status *rx_status,
780 struct sk_buff *skb,
781 enum htt_rx_mpdu_encrypt_type enctype,
782 enum rx_msdu_decap_format fmt,
783 bool dot11frag)
784{
785 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
786
787 rx_status->flag &= ~(RX_FLAG_DECRYPTED |
788 RX_FLAG_IV_STRIPPED |
789 RX_FLAG_MMIC_STRIPPED);
790
791 if (enctype == HTT_RX_MPDU_ENCRYPT_NONE)
792 return;
793
794 /*
795 * There's no explicit rx descriptor flag to indicate whether a given
796 * frame has been decrypted or not. We're forced to use the decap
797 * format as an implicit indication. However fragmentation rx is always
798 * raw and it probably never reports undecrypted raws.
799 *
800 * This makes sure sniffed frames are reported as-is without stripping
801 * the protected flag.
802 */
803 if (fmt == RX_MSDU_DECAP_RAW && !dot11frag)
804 return;
805
806 rx_status->flag |= RX_FLAG_DECRYPTED |
807 RX_FLAG_IV_STRIPPED |
808 RX_FLAG_MMIC_STRIPPED;
809 hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
810 ~IEEE80211_FCTL_PROTECTED);
811}
812
813static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
814 struct ieee80211_rx_status *status)
815{
816 struct ieee80211_channel *ch;
817
818 spin_lock_bh(&ar->data_lock);
819 ch = ar->scan_channel;
820 if (!ch)
821 ch = ar->rx_channel;
822 spin_unlock_bh(&ar->data_lock);
823
824 if (!ch)
825 return false;
826
827 status->band = ch->band;
828 status->freq = ch->center_freq;
829
830 return true;
831}
832
833static void ath10k_process_rx(struct ath10k *ar,
834 struct ieee80211_rx_status *rx_status,
835 struct sk_buff *skb)
836{
837 struct ieee80211_rx_status *status;
838
839 status = IEEE80211_SKB_RXCB(skb);
840 *status = *rx_status;
841
842 ath10k_dbg(ATH10K_DBG_DATA,
843 "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
844 skb,
845 skb->len,
846 status->flag == 0 ? "legacy" : "",
847 status->flag & RX_FLAG_HT ? "ht" : "",
848 status->flag & RX_FLAG_VHT ? "vht" : "",
849 status->flag & RX_FLAG_40MHZ ? "40" : "",
850 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
851 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
852 status->rate_idx,
853 status->vht_nss,
854 status->freq,
855 status->band, status->flag,
856 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
857 !!(status->flag & RX_FLAG_MMIC_ERROR));
858 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
859 skb->data, skb->len);
860
861 ieee80211_rx(ar->hw, skb);
862}
863
639static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr) 864static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
640{ 865{
641 /* nwifi header is padded to 4 bytes. this fixes 4addr rx */ 866 /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
@@ -643,11 +868,12 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
643} 868}
644 869
645static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, 870static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
646 struct htt_rx_info *info) 871 struct ieee80211_rx_status *rx_status,
872 struct sk_buff *skb_in)
647{ 873{
648 struct htt_rx_desc *rxd; 874 struct htt_rx_desc *rxd;
875 struct sk_buff *skb = skb_in;
649 struct sk_buff *first; 876 struct sk_buff *first;
650 struct sk_buff *skb = info->skb;
651 enum rx_msdu_decap_format fmt; 877 enum rx_msdu_decap_format fmt;
652 enum htt_rx_mpdu_encrypt_type enctype; 878 enum htt_rx_mpdu_encrypt_type enctype;
653 struct ieee80211_hdr *hdr; 879 struct ieee80211_hdr *hdr;
@@ -728,24 +954,28 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
728 break; 954 break;
729 } 955 }
730 956
731 info->skb = skb; 957 skb_in = skb;
732 info->encrypt_type = enctype; 958 ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt,
959 false);
733 skb = skb->next; 960 skb = skb->next;
734 info->skb->next = NULL; 961 skb_in->next = NULL;
735 962
736 if (skb) 963 if (skb)
737 info->amsdu_more = true; 964 rx_status->flag |= RX_FLAG_AMSDU_MORE;
965 else
966 rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
738 967
739 ath10k_process_rx(htt->ar, info); 968 ath10k_process_rx(htt->ar, rx_status, skb_in);
740 } 969 }
741 970
742 /* FIXME: It might be nice to re-assemble the A-MSDU when there's a 971 /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
743 * monitor interface active for sniffing purposes. */ 972 * monitor interface active for sniffing purposes. */
744} 973}
745 974
746static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) 975static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
976 struct ieee80211_rx_status *rx_status,
977 struct sk_buff *skb)
747{ 978{
748 struct sk_buff *skb = info->skb;
749 struct htt_rx_desc *rxd; 979 struct htt_rx_desc *rxd;
750 struct ieee80211_hdr *hdr; 980 struct ieee80211_hdr *hdr;
751 enum rx_msdu_decap_format fmt; 981 enum rx_msdu_decap_format fmt;
@@ -808,66 +1038,9 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
808 break; 1038 break;
809 } 1039 }
810 1040
811 info->skb = skb; 1041 ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false);
812 info->encrypt_type = enctype;
813 1042
814 ath10k_process_rx(htt->ar, info); 1043 ath10k_process_rx(htt->ar, rx_status, skb);
815}
816
817static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
818{
819 struct htt_rx_desc *rxd;
820 u32 flags;
821
822 rxd = (void *)skb->data - sizeof(*rxd);
823 flags = __le32_to_cpu(rxd->attention.flags);
824
825 if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
826 return true;
827
828 return false;
829}
830
831static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
832{
833 struct htt_rx_desc *rxd;
834 u32 flags;
835
836 rxd = (void *)skb->data - sizeof(*rxd);
837 flags = __le32_to_cpu(rxd->attention.flags);
838
839 if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
840 return true;
841
842 return false;
843}
844
845static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
846{
847 struct htt_rx_desc *rxd;
848 u32 flags;
849
850 rxd = (void *)skb->data - sizeof(*rxd);
851 flags = __le32_to_cpu(rxd->attention.flags);
852
853 if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
854 return true;
855
856 return false;
857}
858
859static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
860{
861 struct htt_rx_desc *rxd;
862 u32 flags;
863
864 rxd = (void *)skb->data - sizeof(*rxd);
865 flags = __le32_to_cpu(rxd->attention.flags);
866
867 if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
868 return true;
869
870 return false;
871} 1044}
872 1045
873static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) 1046static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
@@ -952,21 +1125,73 @@ static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
952 return 0; 1125 return 0;
953} 1126}
954 1127
1128static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
1129 struct sk_buff *head,
1130 enum htt_rx_mpdu_status status,
1131 bool channel_set,
1132 u32 attention)
1133{
1134 if (head->len == 0) {
1135 ath10k_dbg(ATH10K_DBG_HTT,
1136 "htt rx dropping due to zero-len\n");
1137 return false;
1138 }
1139
1140 if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
1141 ath10k_dbg(ATH10K_DBG_HTT,
1142 "htt rx dropping due to decrypt-err\n");
1143 return false;
1144 }
1145
1146 if (!channel_set) {
1147 ath10k_warn("no channel configured; ignoring frame!\n");
1148 return false;
1149 }
1150
1151 /* Skip mgmt frames while we handle this in WMI */
1152 if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
1153 attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
1154 ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
1155 return false;
1156 }
1157
1158 if (status != HTT_RX_IND_MPDU_STATUS_OK &&
1159 status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
1160 status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
1161 !htt->ar->monitor_started) {
1162 ath10k_dbg(ATH10K_DBG_HTT,
1163 "htt rx ignoring frame w/ status %d\n",
1164 status);
1165 return false;
1166 }
1167
1168 if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
1169 ath10k_dbg(ATH10K_DBG_HTT,
1170 "htt rx CAC running\n");
1171 return false;
1172 }
1173
1174 return true;
1175}
1176
955static void ath10k_htt_rx_handler(struct ath10k_htt *htt, 1177static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
956 struct htt_rx_indication *rx) 1178 struct htt_rx_indication *rx)
957{ 1179{
958 struct htt_rx_info info; 1180 struct ieee80211_rx_status *rx_status = &htt->rx_status;
959 struct htt_rx_indication_mpdu_range *mpdu_ranges; 1181 struct htt_rx_indication_mpdu_range *mpdu_ranges;
1182 struct htt_rx_desc *rxd;
1183 enum htt_rx_mpdu_status status;
960 struct ieee80211_hdr *hdr; 1184 struct ieee80211_hdr *hdr;
961 int num_mpdu_ranges; 1185 int num_mpdu_ranges;
1186 u32 attention;
962 int fw_desc_len; 1187 int fw_desc_len;
963 u8 *fw_desc; 1188 u8 *fw_desc;
1189 bool channel_set;
964 int i, j; 1190 int i, j;
1191 int ret;
965 1192
966 lockdep_assert_held(&htt->rx_ring.lock); 1193 lockdep_assert_held(&htt->rx_ring.lock);
967 1194
968 memset(&info, 0, sizeof(info));
969
970 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); 1195 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
971 fw_desc = (u8 *)&rx->fw_desc; 1196 fw_desc = (u8 *)&rx->fw_desc;
972 1197
@@ -974,106 +1199,90 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
974 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 1199 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
975 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); 1200 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
976 1201
1202 /* Fill this once, while this is per-ppdu */
1203 if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
1204 memset(rx_status, 0, sizeof(*rx_status));
1205 rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1206 rx->ppdu.combined_rssi;
1207 }
1208
1209 if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
1210 /* TSF available only in 32-bit */
1211 rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
1212 rx_status->flag |= RX_FLAG_MACTIME_END;
1213 }
1214
1215 channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
1216
1217 if (channel_set) {
1218 ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
1219 rx->ppdu.info0,
1220 __le32_to_cpu(rx->ppdu.info1),
1221 __le32_to_cpu(rx->ppdu.info2),
1222 rx_status);
1223 }
1224
977 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", 1225 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
978 rx, sizeof(*rx) + 1226 rx, sizeof(*rx) +
979 (sizeof(struct htt_rx_indication_mpdu_range) * 1227 (sizeof(struct htt_rx_indication_mpdu_range) *
980 num_mpdu_ranges)); 1228 num_mpdu_ranges));
981 1229
982 for (i = 0; i < num_mpdu_ranges; i++) { 1230 for (i = 0; i < num_mpdu_ranges; i++) {
983 info.status = mpdu_ranges[i].mpdu_range_status; 1231 status = mpdu_ranges[i].mpdu_range_status;
984 1232
985 for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) { 1233 for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
986 struct sk_buff *msdu_head, *msdu_tail; 1234 struct sk_buff *msdu_head, *msdu_tail;
987 enum htt_rx_mpdu_status status;
988 int msdu_chaining;
989 1235
990 msdu_head = NULL; 1236 msdu_head = NULL;
991 msdu_tail = NULL; 1237 msdu_tail = NULL;
992 msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, 1238 ret = ath10k_htt_rx_amsdu_pop(htt,
993 &fw_desc, 1239 &fw_desc,
994 &fw_desc_len, 1240 &fw_desc_len,
995 &msdu_head, 1241 &msdu_head,
996 &msdu_tail); 1242 &msdu_tail);
997 1243
998 if (!msdu_head) { 1244 if (ret < 0) {
999 ath10k_warn("htt rx no data!\n"); 1245 ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
1000 continue; 1246 ret);
1001 }
1002
1003 if (msdu_head->len == 0) {
1004 ath10k_dbg(ATH10K_DBG_HTT,
1005 "htt rx dropping due to zero-len\n");
1006 ath10k_htt_rx_free_msdu_chain(msdu_head); 1247 ath10k_htt_rx_free_msdu_chain(msdu_head);
1007 continue; 1248 continue;
1008 } 1249 }
1009 1250
1010 if (ath10k_htt_rx_has_decrypt_err(msdu_head)) { 1251 rxd = container_of((void *)msdu_head->data,
1011 ath10k_dbg(ATH10K_DBG_HTT, 1252 struct htt_rx_desc,
1012 "htt rx dropping due to decrypt-err\n"); 1253 msdu_payload);
1013 ath10k_htt_rx_free_msdu_chain(msdu_head); 1254 attention = __le32_to_cpu(rxd->attention.flags);
1014 continue;
1015 }
1016 1255
1017 status = info.status; 1256 if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
1018 1257 status,
1019 /* Skip mgmt frames while we handle this in WMI */ 1258 channel_set,
1020 if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL || 1259 attention)) {
1021 ath10k_htt_rx_is_mgmt(msdu_head)) {
1022 ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
1023 ath10k_htt_rx_free_msdu_chain(msdu_head); 1260 ath10k_htt_rx_free_msdu_chain(msdu_head);
1024 continue; 1261 continue;
1025 } 1262 }
1026 1263
1027 if (status != HTT_RX_IND_MPDU_STATUS_OK && 1264 if (ret > 0 &&
1028 status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && 1265 ath10k_unchain_msdu(msdu_head) < 0) {
1029 status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
1030 !htt->ar->monitor_enabled) {
1031 ath10k_dbg(ATH10K_DBG_HTT,
1032 "htt rx ignoring frame w/ status %d\n",
1033 status);
1034 ath10k_htt_rx_free_msdu_chain(msdu_head); 1266 ath10k_htt_rx_free_msdu_chain(msdu_head);
1035 continue; 1267 continue;
1036 } 1268 }
1037 1269
1038 if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) { 1270 if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
1039 ath10k_dbg(ATH10K_DBG_HTT, 1271 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
1040 "htt rx CAC running\n"); 1272 else
1041 ath10k_htt_rx_free_msdu_chain(msdu_head); 1273 rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
1042 continue;
1043 }
1044
1045 if (msdu_chaining &&
1046 (ath10k_unchain_msdu(msdu_head) < 0)) {
1047 ath10k_htt_rx_free_msdu_chain(msdu_head);
1048 continue;
1049 }
1050
1051 info.skb = msdu_head;
1052 info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
1053 info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
1054
1055 if (info.fcs_err)
1056 ath10k_dbg(ATH10K_DBG_HTT,
1057 "htt rx has FCS err\n");
1058
1059 if (info.mic_err)
1060 ath10k_dbg(ATH10K_DBG_HTT,
1061 "htt rx has MIC err\n");
1062
1063 info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
1064 info.signal += rx->ppdu.combined_rssi;
1065 1274
1066 info.rate.info0 = rx->ppdu.info0; 1275 if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
1067 info.rate.info1 = __le32_to_cpu(rx->ppdu.info1); 1276 rx_status->flag |= RX_FLAG_MMIC_ERROR;
1068 info.rate.info2 = __le32_to_cpu(rx->ppdu.info2); 1277 else
1069 info.tsf = __le32_to_cpu(rx->ppdu.tsf); 1278 rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
1070 1279
1071 hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); 1280 hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
1072 1281
1073 if (ath10k_htt_rx_hdr_is_amsdu(hdr)) 1282 if (ath10k_htt_rx_hdr_is_amsdu(hdr))
1074 ath10k_htt_rx_amsdu(htt, &info); 1283 ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
1075 else 1284 else
1076 ath10k_htt_rx_msdu(htt, &info); 1285 ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
1077 } 1286 }
1078 } 1287 }
1079 1288
@@ -1084,11 +1293,12 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1084 struct htt_rx_fragment_indication *frag) 1293 struct htt_rx_fragment_indication *frag)
1085{ 1294{
1086 struct sk_buff *msdu_head, *msdu_tail; 1295 struct sk_buff *msdu_head, *msdu_tail;
1296 enum htt_rx_mpdu_encrypt_type enctype;
1087 struct htt_rx_desc *rxd; 1297 struct htt_rx_desc *rxd;
1088 enum rx_msdu_decap_format fmt; 1298 enum rx_msdu_decap_format fmt;
1089 struct htt_rx_info info = {}; 1299 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1090 struct ieee80211_hdr *hdr; 1300 struct ieee80211_hdr *hdr;
1091 int msdu_chaining; 1301 int ret;
1092 bool tkip_mic_err; 1302 bool tkip_mic_err;
1093 bool decrypt_err; 1303 bool decrypt_err;
1094 u8 *fw_desc; 1304 u8 *fw_desc;
@@ -1102,24 +1312,21 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1102 msdu_tail = NULL; 1312 msdu_tail = NULL;
1103 1313
1104 spin_lock_bh(&htt->rx_ring.lock); 1314 spin_lock_bh(&htt->rx_ring.lock);
1105 msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, 1315 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
1106 &msdu_head, &msdu_tail); 1316 &msdu_head, &msdu_tail);
1107 spin_unlock_bh(&htt->rx_ring.lock); 1317 spin_unlock_bh(&htt->rx_ring.lock);
1108 1318
1109 ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); 1319 ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
1110 1320
1111 if (!msdu_head) { 1321 if (ret) {
1112 ath10k_warn("htt rx frag no data\n"); 1322 ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
1113 return; 1323 ret);
1114 }
1115
1116 if (msdu_chaining || msdu_head != msdu_tail) {
1117 ath10k_warn("aggregation with fragmentation?!\n");
1118 ath10k_htt_rx_free_msdu_chain(msdu_head); 1324 ath10k_htt_rx_free_msdu_chain(msdu_head);
1119 return; 1325 return;
1120 } 1326 }
1121 1327
1122 /* FIXME: implement signal strength */ 1328 /* FIXME: implement signal strength */
1329 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1123 1330
1124 hdr = (struct ieee80211_hdr *)msdu_head->data; 1331 hdr = (struct ieee80211_hdr *)msdu_head->data;
1125 rxd = (void *)msdu_head->data - sizeof(*rxd); 1332 rxd = (void *)msdu_head->data - sizeof(*rxd);
@@ -1136,57 +1343,55 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1136 goto end; 1343 goto end;
1137 } 1344 }
1138 1345
1139 info.skb = msdu_head; 1346 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1140 info.status = HTT_RX_IND_MPDU_STATUS_OK; 1347 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1141 info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0), 1348 ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt,
1142 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 1349 true);
1143 info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb); 1350 msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
1144 1351
1145 if (tkip_mic_err) { 1352 if (tkip_mic_err)
1146 ath10k_warn("tkip mic error\n"); 1353 ath10k_warn("tkip mic error\n");
1147 info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
1148 }
1149 1354
1150 if (decrypt_err) { 1355 if (decrypt_err) {
1151 ath10k_warn("decryption err in fragmented rx\n"); 1356 ath10k_warn("decryption err in fragmented rx\n");
1152 dev_kfree_skb_any(info.skb); 1357 dev_kfree_skb_any(msdu_head);
1153 goto end; 1358 goto end;
1154 } 1359 }
1155 1360
1156 if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { 1361 if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
1157 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1362 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1158 paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type); 1363 paramlen = ath10k_htt_rx_crypto_param_len(enctype);
1159 1364
1160 /* It is more efficient to move the header than the payload */ 1365 /* It is more efficient to move the header than the payload */
1161 memmove((void *)info.skb->data + paramlen, 1366 memmove((void *)msdu_head->data + paramlen,
1162 (void *)info.skb->data, 1367 (void *)msdu_head->data,
1163 hdrlen); 1368 hdrlen);
1164 skb_pull(info.skb, paramlen); 1369 skb_pull(msdu_head, paramlen);
1165 hdr = (struct ieee80211_hdr *)info.skb->data; 1370 hdr = (struct ieee80211_hdr *)msdu_head->data;
1166 } 1371 }
1167 1372
1168 /* remove trailing FCS */ 1373 /* remove trailing FCS */
1169 trim = 4; 1374 trim = 4;
1170 1375
1171 /* remove crypto trailer */ 1376 /* remove crypto trailer */
1172 trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type); 1377 trim += ath10k_htt_rx_crypto_tail_len(enctype);
1173 1378
1174 /* last fragment of TKIP frags has MIC */ 1379 /* last fragment of TKIP frags has MIC */
1175 if (!ieee80211_has_morefrags(hdr->frame_control) && 1380 if (!ieee80211_has_morefrags(hdr->frame_control) &&
1176 info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 1381 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1177 trim += 8; 1382 trim += 8;
1178 1383
1179 if (trim > info.skb->len) { 1384 if (trim > msdu_head->len) {
1180 ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n"); 1385 ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
1181 dev_kfree_skb_any(info.skb); 1386 dev_kfree_skb_any(msdu_head);
1182 goto end; 1387 goto end;
1183 } 1388 }
1184 1389
1185 skb_trim(info.skb, info.skb->len - trim); 1390 skb_trim(msdu_head, msdu_head->len - trim);
1186 1391
1187 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ", 1392 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
1188 info.skb->data, info.skb->len); 1393 msdu_head->data, msdu_head->len);
1189 ath10k_process_rx(htt->ar, &info); 1394 ath10k_process_rx(htt->ar, rx_status, msdu_head);
1190 1395
1191end: 1396end:
1192 if (fw_desc_len > 0) { 1397 if (fw_desc_len > 0) {
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 7a3e2e40dd5c..7064354d1f4f 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -83,7 +83,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
83 __clear_bit(msdu_id, htt->used_msdu_ids); 83 __clear_bit(msdu_id, htt->used_msdu_ids);
84} 84}
85 85
86int ath10k_htt_tx_attach(struct ath10k_htt *htt) 86int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
87{ 87{
88 spin_lock_init(&htt->tx_lock); 88 spin_lock_init(&htt->tx_lock);
89 init_waitqueue_head(&htt->empty_tx_wq); 89 init_waitqueue_head(&htt->empty_tx_wq);
@@ -120,7 +120,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
120 return 0; 120 return 0;
121} 121}
122 122
123static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) 123static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
124{ 124{
125 struct htt_tx_done tx_done = {0}; 125 struct htt_tx_done tx_done = {0};
126 int msdu_id; 126 int msdu_id;
@@ -141,9 +141,9 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
141 spin_unlock_bh(&htt->tx_lock); 141 spin_unlock_bh(&htt->tx_lock);
142} 142}
143 143
144void ath10k_htt_tx_detach(struct ath10k_htt *htt) 144void ath10k_htt_tx_free(struct ath10k_htt *htt)
145{ 145{
146 ath10k_htt_tx_cleanup_pending(htt); 146 ath10k_htt_tx_free_pending(htt);
147 kfree(htt->pending_tx); 147 kfree(htt->pending_tx);
148 kfree(htt->used_msdu_ids); 148 kfree(htt->used_msdu_ids);
149 dma_pool_destroy(htt->tx_pool); 149 dma_pool_destroy(htt->tx_pool);
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 35fc44e281f5..007e855f4ba9 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -28,6 +28,7 @@
28#define QCA988X_HW_2_0_CHIP_ID_REV 0x2 28#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
29#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0" 29#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
30#define QCA988X_HW_2_0_FW_FILE "firmware.bin" 30#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
31#define QCA988X_HW_2_0_FW_2_FILE "firmware-2.bin"
31#define QCA988X_HW_2_0_OTP_FILE "otp.bin" 32#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
32#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" 33#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
33#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 34#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 511a2f81e7af..a21080028c54 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -54,7 +54,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
54 switch (key->cipher) { 54 switch (key->cipher) {
55 case WLAN_CIPHER_SUITE_CCMP: 55 case WLAN_CIPHER_SUITE_CCMP:
56 arg.key_cipher = WMI_CIPHER_AES_CCM; 56 arg.key_cipher = WMI_CIPHER_AES_CCM;
57 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; 57 if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
58 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
59 else
60 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
58 break; 61 break;
59 case WLAN_CIPHER_SUITE_TKIP: 62 case WLAN_CIPHER_SUITE_TKIP:
60 arg.key_cipher = WMI_CIPHER_TKIP; 63 arg.key_cipher = WMI_CIPHER_TKIP;
@@ -165,7 +168,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
165 first_errno = ret; 168 first_errno = ret;
166 169
167 if (ret) 170 if (ret)
168 ath10k_warn("could not remove peer wep key %d (%d)\n", 171 ath10k_warn("failed to remove peer wep key %d: %d\n",
169 i, ret); 172 i, ret);
170 173
171 peer->keys[i] = NULL; 174 peer->keys[i] = NULL;
@@ -213,7 +216,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
213 first_errno = ret; 216 first_errno = ret;
214 217
215 if (ret) 218 if (ret)
216 ath10k_warn("could not remove key for %pM\n", addr); 219 ath10k_warn("failed to remove key for %pM: %d\n",
220 addr, ret);
217 } 221 }
218 222
219 return first_errno; 223 return first_errno;
@@ -323,14 +327,14 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
323 327
324 ret = ath10k_wmi_peer_create(ar, vdev_id, addr); 328 ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
325 if (ret) { 329 if (ret) {
326 ath10k_warn("Failed to create wmi peer %pM on vdev %i: %i\n", 330 ath10k_warn("failed to create wmi peer %pM on vdev %i: %i\n",
327 addr, vdev_id, ret); 331 addr, vdev_id, ret);
328 return ret; 332 return ret;
329 } 333 }
330 334
331 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); 335 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
332 if (ret) { 336 if (ret) {
333 ath10k_warn("Failed to wait for created wmi peer %pM on vdev %i: %i\n", 337 ath10k_warn("failed to wait for created wmi peer %pM on vdev %i: %i\n",
334 addr, vdev_id, ret); 338 addr, vdev_id, ret);
335 return ret; 339 return ret;
336 } 340 }
@@ -351,7 +355,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
351 ret = ath10k_wmi_pdev_set_param(ar, param, 355 ret = ath10k_wmi_pdev_set_param(ar, param,
352 ATH10K_KICKOUT_THRESHOLD); 356 ATH10K_KICKOUT_THRESHOLD);
353 if (ret) { 357 if (ret) {
354 ath10k_warn("Failed to set kickout threshold on vdev %i: %d\n", 358 ath10k_warn("failed to set kickout threshold on vdev %i: %d\n",
355 arvif->vdev_id, ret); 359 arvif->vdev_id, ret);
356 return ret; 360 return ret;
357 } 361 }
@@ -360,7 +364,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
360 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 364 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
361 ATH10K_KEEPALIVE_MIN_IDLE); 365 ATH10K_KEEPALIVE_MIN_IDLE);
362 if (ret) { 366 if (ret) {
363 ath10k_warn("Failed to set keepalive minimum idle time on vdev %i : %d\n", 367 ath10k_warn("failed to set keepalive minimum idle time on vdev %i: %d\n",
364 arvif->vdev_id, ret); 368 arvif->vdev_id, ret);
365 return ret; 369 return ret;
366 } 370 }
@@ -369,7 +373,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
369 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 373 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
370 ATH10K_KEEPALIVE_MAX_IDLE); 374 ATH10K_KEEPALIVE_MAX_IDLE);
371 if (ret) { 375 if (ret) {
372 ath10k_warn("Failed to set keepalive maximum idle time on vdev %i: %d\n", 376 ath10k_warn("failed to set keepalive maximum idle time on vdev %i: %d\n",
373 arvif->vdev_id, ret); 377 arvif->vdev_id, ret);
374 return ret; 378 return ret;
375 } 379 }
@@ -378,7 +382,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
378 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 382 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
379 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); 383 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
380 if (ret) { 384 if (ret) {
381 ath10k_warn("Failed to set keepalive maximum unresponsive time on vdev %i: %d\n", 385 ath10k_warn("failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
382 arvif->vdev_id, ret); 386 arvif->vdev_id, ret);
383 return ret; 387 return ret;
384 } 388 }
@@ -488,92 +492,20 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
488 return 0; 492 return 0;
489} 493}
490 494
491static int ath10k_vdev_start(struct ath10k_vif *arvif) 495static bool ath10k_monitor_is_enabled(struct ath10k *ar)
492{ 496{
493 struct ath10k *ar = arvif->ar;
494 struct cfg80211_chan_def *chandef = &ar->chandef;
495 struct wmi_vdev_start_request_arg arg = {};
496 int ret = 0;
497
498 lockdep_assert_held(&ar->conf_mutex); 497 lockdep_assert_held(&ar->conf_mutex);
499 498
500 reinit_completion(&ar->vdev_setup_done);
501
502 arg.vdev_id = arvif->vdev_id;
503 arg.dtim_period = arvif->dtim_period;
504 arg.bcn_intval = arvif->beacon_interval;
505
506 arg.channel.freq = chandef->chan->center_freq;
507 arg.channel.band_center_freq1 = chandef->center_freq1;
508 arg.channel.mode = chan_to_phymode(chandef);
509
510 arg.channel.min_power = 0;
511 arg.channel.max_power = chandef->chan->max_power * 2;
512 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
513 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
514
515 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
516 arg.ssid = arvif->u.ap.ssid;
517 arg.ssid_len = arvif->u.ap.ssid_len;
518 arg.hidden_ssid = arvif->u.ap.hidden_ssid;
519
520 /* For now allow DFS for AP mode */
521 arg.channel.chan_radar =
522 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
523 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
524 arg.ssid = arvif->vif->bss_conf.ssid;
525 arg.ssid_len = arvif->vif->bss_conf.ssid_len;
526 }
527
528 ath10k_dbg(ATH10K_DBG_MAC, 499 ath10k_dbg(ATH10K_DBG_MAC,
529 "mac vdev %d start center_freq %d phymode %s\n", 500 "mac monitor refs: promisc %d monitor %d cac %d\n",
530 arg.vdev_id, arg.channel.freq, 501 ar->promisc, ar->monitor,
531 ath10k_wmi_phymode_str(arg.channel.mode)); 502 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags));
532 503
533 ret = ath10k_wmi_vdev_start(ar, &arg); 504 return ar->promisc || ar->monitor ||
534 if (ret) { 505 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
535 ath10k_warn("WMI vdev %i start failed: ret %d\n",
536 arg.vdev_id, ret);
537 return ret;
538 }
539
540 ret = ath10k_vdev_setup_sync(ar);
541 if (ret) {
542 ath10k_warn("vdev %i setup failed %d\n",
543 arg.vdev_id, ret);
544 return ret;
545 }
546
547 return ret;
548} 506}
549 507
550static int ath10k_vdev_stop(struct ath10k_vif *arvif) 508static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
551{
552 struct ath10k *ar = arvif->ar;
553 int ret;
554
555 lockdep_assert_held(&ar->conf_mutex);
556
557 reinit_completion(&ar->vdev_setup_done);
558
559 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
560 if (ret) {
561 ath10k_warn("WMI vdev %i stop failed: ret %d\n",
562 arvif->vdev_id, ret);
563 return ret;
564 }
565
566 ret = ath10k_vdev_setup_sync(ar);
567 if (ret) {
568 ath10k_warn("vdev %i setup sync failed %d\n",
569 arvif->vdev_id, ret);
570 return ret;
571 }
572
573 return ret;
574}
575
576static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
577{ 509{
578 struct cfg80211_chan_def *chandef = &ar->chandef; 510 struct cfg80211_chan_def *chandef = &ar->chandef;
579 struct ieee80211_channel *channel = chandef->chan; 511 struct ieee80211_channel *channel = chandef->chan;
@@ -582,11 +514,6 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
582 514
583 lockdep_assert_held(&ar->conf_mutex); 515 lockdep_assert_held(&ar->conf_mutex);
584 516
585 if (!ar->monitor_present) {
586 ath10k_warn("mac montor stop -- monitor is not present\n");
587 return -EINVAL;
588 }
589
590 arg.vdev_id = vdev_id; 517 arg.vdev_id = vdev_id;
591 arg.channel.freq = channel->center_freq; 518 arg.channel.freq = channel->center_freq;
592 arg.channel.band_center_freq1 = chandef->center_freq1; 519 arg.channel.band_center_freq1 = chandef->center_freq1;
@@ -604,88 +531,75 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
604 531
605 ret = ath10k_wmi_vdev_start(ar, &arg); 532 ret = ath10k_wmi_vdev_start(ar, &arg);
606 if (ret) { 533 if (ret) {
607 ath10k_warn("Monitor vdev %i start failed: ret %d\n", 534 ath10k_warn("failed to request monitor vdev %i start: %d\n",
608 vdev_id, ret); 535 vdev_id, ret);
609 return ret; 536 return ret;
610 } 537 }
611 538
612 ret = ath10k_vdev_setup_sync(ar); 539 ret = ath10k_vdev_setup_sync(ar);
613 if (ret) { 540 if (ret) {
614 ath10k_warn("Monitor vdev %i setup failed %d\n", 541 ath10k_warn("failed to synchronize setup for monitor vdev %i: %d\n",
615 vdev_id, ret); 542 vdev_id, ret);
616 return ret; 543 return ret;
617 } 544 }
618 545
619 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 546 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
620 if (ret) { 547 if (ret) {
621 ath10k_warn("Monitor vdev %i up failed: %d\n", 548 ath10k_warn("failed to put up monitor vdev %i: %d\n",
622 vdev_id, ret); 549 vdev_id, ret);
623 goto vdev_stop; 550 goto vdev_stop;
624 } 551 }
625 552
626 ar->monitor_vdev_id = vdev_id; 553 ar->monitor_vdev_id = vdev_id;
627 ar->monitor_enabled = true;
628 554
555 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
556 ar->monitor_vdev_id);
629 return 0; 557 return 0;
630 558
631vdev_stop: 559vdev_stop:
632 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 560 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
633 if (ret) 561 if (ret)
634 ath10k_warn("Monitor vdev %i stop failed: %d\n", 562 ath10k_warn("failed to stop monitor vdev %i after start failure: %d\n",
635 ar->monitor_vdev_id, ret); 563 ar->monitor_vdev_id, ret);
636 564
637 return ret; 565 return ret;
638} 566}
639 567
640static int ath10k_monitor_stop(struct ath10k *ar) 568static int ath10k_monitor_vdev_stop(struct ath10k *ar)
641{ 569{
642 int ret = 0; 570 int ret = 0;
643 571
644 lockdep_assert_held(&ar->conf_mutex); 572 lockdep_assert_held(&ar->conf_mutex);
645 573
646 if (!ar->monitor_present) {
647 ath10k_warn("mac montor stop -- monitor is not present\n");
648 return -EINVAL;
649 }
650
651 if (!ar->monitor_enabled) {
652 ath10k_warn("mac montor stop -- monitor is not enabled\n");
653 return -EINVAL;
654 }
655
656 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); 574 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
657 if (ret) 575 if (ret)
658 ath10k_warn("Monitor vdev %i down failed: %d\n", 576 ath10k_warn("failed to put down monitor vdev %i: %d\n",
659 ar->monitor_vdev_id, ret); 577 ar->monitor_vdev_id, ret);
660 578
661 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 579 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
662 if (ret) 580 if (ret)
663 ath10k_warn("Monitor vdev %i stop failed: %d\n", 581 ath10k_warn("failed to to request monitor vdev %i stop: %d\n",
664 ar->monitor_vdev_id, ret); 582 ar->monitor_vdev_id, ret);
665 583
666 ret = ath10k_vdev_setup_sync(ar); 584 ret = ath10k_vdev_setup_sync(ar);
667 if (ret) 585 if (ret)
668 ath10k_warn("Monitor_down sync failed, vdev %i: %d\n", 586 ath10k_warn("failed to synchronise monitor vdev %i: %d\n",
669 ar->monitor_vdev_id, ret); 587 ar->monitor_vdev_id, ret);
670 588
671 ar->monitor_enabled = false; 589 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
590 ar->monitor_vdev_id);
672 return ret; 591 return ret;
673} 592}
674 593
675static int ath10k_monitor_create(struct ath10k *ar) 594static int ath10k_monitor_vdev_create(struct ath10k *ar)
676{ 595{
677 int bit, ret = 0; 596 int bit, ret = 0;
678 597
679 lockdep_assert_held(&ar->conf_mutex); 598 lockdep_assert_held(&ar->conf_mutex);
680 599
681 if (ar->monitor_present) {
682 ath10k_warn("Monitor mode already enabled\n");
683 return 0;
684 }
685
686 bit = ffs(ar->free_vdev_map); 600 bit = ffs(ar->free_vdev_map);
687 if (bit == 0) { 601 if (bit == 0) {
688 ath10k_warn("No free VDEV slots\n"); 602 ath10k_warn("failed to find free vdev id for monitor vdev\n");
689 return -ENOMEM; 603 return -ENOMEM;
690 } 604 }
691 605
@@ -696,7 +610,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
696 WMI_VDEV_TYPE_MONITOR, 610 WMI_VDEV_TYPE_MONITOR,
697 0, ar->mac_addr); 611 0, ar->mac_addr);
698 if (ret) { 612 if (ret) {
699 ath10k_warn("WMI vdev %i monitor create failed: ret %d\n", 613 ath10k_warn("failed to request monitor vdev %i creation: %d\n",
700 ar->monitor_vdev_id, ret); 614 ar->monitor_vdev_id, ret);
701 goto vdev_fail; 615 goto vdev_fail;
702 } 616 }
@@ -704,7 +618,6 @@ static int ath10k_monitor_create(struct ath10k *ar)
704 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n", 618 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
705 ar->monitor_vdev_id); 619 ar->monitor_vdev_id);
706 620
707 ar->monitor_present = true;
708 return 0; 621 return 0;
709 622
710vdev_fail: 623vdev_fail:
@@ -715,48 +628,123 @@ vdev_fail:
715 return ret; 628 return ret;
716} 629}
717 630
718static int ath10k_monitor_destroy(struct ath10k *ar) 631static int ath10k_monitor_vdev_delete(struct ath10k *ar)
719{ 632{
720 int ret = 0; 633 int ret = 0;
721 634
722 lockdep_assert_held(&ar->conf_mutex); 635 lockdep_assert_held(&ar->conf_mutex);
723 636
724 if (!ar->monitor_present)
725 return 0;
726
727 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 637 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
728 if (ret) { 638 if (ret) {
729 ath10k_warn("WMI vdev %i monitor delete failed: %d\n", 639 ath10k_warn("failed to request wmi monitor vdev %i removal: %d\n",
730 ar->monitor_vdev_id, ret); 640 ar->monitor_vdev_id, ret);
731 return ret; 641 return ret;
732 } 642 }
733 643
734 ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); 644 ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
735 ar->monitor_present = false;
736 645
737 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", 646 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
738 ar->monitor_vdev_id); 647 ar->monitor_vdev_id);
739 return ret; 648 return ret;
740} 649}
741 650
742static int ath10k_start_cac(struct ath10k *ar) 651static int ath10k_monitor_start(struct ath10k *ar)
743{ 652{
744 int ret; 653 int ret;
745 654
746 lockdep_assert_held(&ar->conf_mutex); 655 lockdep_assert_held(&ar->conf_mutex);
747 656
748 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 657 if (!ath10k_monitor_is_enabled(ar)) {
658 ath10k_warn("trying to start monitor with no references\n");
659 return 0;
660 }
661
662 if (ar->monitor_started) {
663 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor already started\n");
664 return 0;
665 }
749 666
750 ret = ath10k_monitor_create(ar); 667 ret = ath10k_monitor_vdev_create(ar);
751 if (ret) { 668 if (ret) {
752 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 669 ath10k_warn("failed to create monitor vdev: %d\n", ret);
753 return ret; 670 return ret;
754 } 671 }
755 672
756 ret = ath10k_monitor_start(ar, ar->monitor_vdev_id); 673 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
674 if (ret) {
675 ath10k_warn("failed to start monitor vdev: %d\n", ret);
676 ath10k_monitor_vdev_delete(ar);
677 return ret;
678 }
679
680 ar->monitor_started = true;
681 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor started\n");
682
683 return 0;
684}
685
686static void ath10k_monitor_stop(struct ath10k *ar)
687{
688 int ret;
689
690 lockdep_assert_held(&ar->conf_mutex);
691
692 if (ath10k_monitor_is_enabled(ar)) {
693 ath10k_dbg(ATH10K_DBG_MAC,
694 "mac monitor will be stopped later\n");
695 return;
696 }
697
698 if (!ar->monitor_started) {
699 ath10k_dbg(ATH10K_DBG_MAC,
700 "mac monitor probably failed to start earlier\n");
701 return;
702 }
703
704 ret = ath10k_monitor_vdev_stop(ar);
705 if (ret)
706 ath10k_warn("failed to stop monitor vdev: %d\n", ret);
707
708 ret = ath10k_monitor_vdev_delete(ar);
709 if (ret)
710 ath10k_warn("failed to delete monitor vdev: %d\n", ret);
711
712 ar->monitor_started = false;
713 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor stopped\n");
714}
715
716static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
717{
718 struct ath10k *ar = arvif->ar;
719 u32 vdev_param, rts_cts = 0;
720
721 lockdep_assert_held(&ar->conf_mutex);
722
723 vdev_param = ar->wmi.vdev_param->enable_rtscts;
724
725 if (arvif->use_cts_prot || arvif->num_legacy_stations > 0)
726 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
727
728 if (arvif->num_legacy_stations > 0)
729 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
730 WMI_RTSCTS_PROFILE);
731
732 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
733 rts_cts);
734}
735
736static int ath10k_start_cac(struct ath10k *ar)
737{
738 int ret;
739
740 lockdep_assert_held(&ar->conf_mutex);
741
742 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
743
744 ret = ath10k_monitor_start(ar);
757 if (ret) { 745 if (ret) {
746 ath10k_warn("failed to start monitor (cac): %d\n", ret);
758 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 747 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
759 ath10k_monitor_destroy(ar);
760 return ret; 748 return ret;
761 } 749 }
762 750
@@ -774,58 +762,26 @@ static int ath10k_stop_cac(struct ath10k *ar)
774 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) 762 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
775 return 0; 763 return 0;
776 764
777 ath10k_monitor_stop(ar);
778 ath10k_monitor_destroy(ar);
779 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 765 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
766 ath10k_monitor_stop(ar);
780 767
781 ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n"); 768 ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n");
782 769
783 return 0; 770 return 0;
784} 771}
785 772
786static const char *ath10k_dfs_state(enum nl80211_dfs_state dfs_state) 773static void ath10k_recalc_radar_detection(struct ath10k *ar)
787{ 774{
788 switch (dfs_state) {
789 case NL80211_DFS_USABLE:
790 return "USABLE";
791 case NL80211_DFS_UNAVAILABLE:
792 return "UNAVAILABLE";
793 case NL80211_DFS_AVAILABLE:
794 return "AVAILABLE";
795 default:
796 WARN_ON(1);
797 return "bug";
798 }
799}
800
801static void ath10k_config_radar_detection(struct ath10k *ar)
802{
803 struct ieee80211_channel *chan = ar->hw->conf.chandef.chan;
804 bool radar = ar->hw->conf.radar_enabled;
805 bool chan_radar = !!(chan->flags & IEEE80211_CHAN_RADAR);
806 enum nl80211_dfs_state dfs_state = chan->dfs_state;
807 int ret; 775 int ret;
808 776
809 lockdep_assert_held(&ar->conf_mutex); 777 lockdep_assert_held(&ar->conf_mutex);
810 778
811 ath10k_dbg(ATH10K_DBG_MAC,
812 "mac radar config update: chan %dMHz radar %d chan radar %d chan state %s\n",
813 chan->center_freq, radar, chan_radar,
814 ath10k_dfs_state(dfs_state));
815
816 /*
817 * It's safe to call it even if CAC is not started.
818 * This call here guarantees changing channel, etc. will stop CAC.
819 */
820 ath10k_stop_cac(ar); 779 ath10k_stop_cac(ar);
821 780
822 if (!radar) 781 if (!ar->radar_enabled)
823 return;
824
825 if (!chan_radar)
826 return; 782 return;
827 783
828 if (dfs_state != NL80211_DFS_USABLE) 784 if (ar->num_started_vdevs > 0)
829 return; 785 return;
830 786
831 ret = ath10k_start_cac(ar); 787 ret = ath10k_start_cac(ar);
@@ -835,11 +791,106 @@ static void ath10k_config_radar_detection(struct ath10k *ar)
835 * radiation is not allowed, make this channel DFS_UNAVAILABLE 791 * radiation is not allowed, make this channel DFS_UNAVAILABLE
836 * by indicating that radar was detected. 792 * by indicating that radar was detected.
837 */ 793 */
838 ath10k_warn("failed to start CAC (%d)\n", ret); 794 ath10k_warn("failed to start CAC: %d\n", ret);
839 ieee80211_radar_detected(ar->hw); 795 ieee80211_radar_detected(ar->hw);
840 } 796 }
841} 797}
842 798
799static int ath10k_vdev_start(struct ath10k_vif *arvif)
800{
801 struct ath10k *ar = arvif->ar;
802 struct cfg80211_chan_def *chandef = &ar->chandef;
803 struct wmi_vdev_start_request_arg arg = {};
804 int ret = 0;
805
806 lockdep_assert_held(&ar->conf_mutex);
807
808 reinit_completion(&ar->vdev_setup_done);
809
810 arg.vdev_id = arvif->vdev_id;
811 arg.dtim_period = arvif->dtim_period;
812 arg.bcn_intval = arvif->beacon_interval;
813
814 arg.channel.freq = chandef->chan->center_freq;
815 arg.channel.band_center_freq1 = chandef->center_freq1;
816 arg.channel.mode = chan_to_phymode(chandef);
817
818 arg.channel.min_power = 0;
819 arg.channel.max_power = chandef->chan->max_power * 2;
820 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
821 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
822
823 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
824 arg.ssid = arvif->u.ap.ssid;
825 arg.ssid_len = arvif->u.ap.ssid_len;
826 arg.hidden_ssid = arvif->u.ap.hidden_ssid;
827
828 /* For now allow DFS for AP mode */
829 arg.channel.chan_radar =
830 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
831 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
832 arg.ssid = arvif->vif->bss_conf.ssid;
833 arg.ssid_len = arvif->vif->bss_conf.ssid_len;
834 }
835
836 ath10k_dbg(ATH10K_DBG_MAC,
837 "mac vdev %d start center_freq %d phymode %s\n",
838 arg.vdev_id, arg.channel.freq,
839 ath10k_wmi_phymode_str(arg.channel.mode));
840
841 ret = ath10k_wmi_vdev_start(ar, &arg);
842 if (ret) {
843 ath10k_warn("failed to start WMI vdev %i: %d\n",
844 arg.vdev_id, ret);
845 return ret;
846 }
847
848 ret = ath10k_vdev_setup_sync(ar);
849 if (ret) {
850 ath10k_warn("failed to synchronise setup for vdev %i: %d\n",
851 arg.vdev_id, ret);
852 return ret;
853 }
854
855 ar->num_started_vdevs++;
856 ath10k_recalc_radar_detection(ar);
857
858 return ret;
859}
860
861static int ath10k_vdev_stop(struct ath10k_vif *arvif)
862{
863 struct ath10k *ar = arvif->ar;
864 int ret;
865
866 lockdep_assert_held(&ar->conf_mutex);
867
868 reinit_completion(&ar->vdev_setup_done);
869
870 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
871 if (ret) {
872 ath10k_warn("failed to stop WMI vdev %i: %d\n",
873 arvif->vdev_id, ret);
874 return ret;
875 }
876
877 ret = ath10k_vdev_setup_sync(ar);
878 if (ret) {
879 ath10k_warn("failed to syncronise setup for vdev %i: %d\n",
880 arvif->vdev_id, ret);
881 return ret;
882 }
883
884 WARN_ON(ar->num_started_vdevs == 0);
885
886 if (ar->num_started_vdevs != 0) {
887 ar->num_started_vdevs--;
888 ath10k_recalc_radar_detection(ar);
889 }
890
891 return ret;
892}
893
843static void ath10k_control_beaconing(struct ath10k_vif *arvif, 894static void ath10k_control_beaconing(struct ath10k_vif *arvif,
844 struct ieee80211_bss_conf *info) 895 struct ieee80211_bss_conf *info)
845{ 896{
@@ -880,7 +931,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
880 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 931 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
881 arvif->bssid); 932 arvif->bssid);
882 if (ret) { 933 if (ret) {
883 ath10k_warn("Failed to bring up vdev %d: %i\n", 934 ath10k_warn("failed to bring up vdev %d: %i\n",
884 arvif->vdev_id, ret); 935 arvif->vdev_id, ret);
885 ath10k_vdev_stop(arvif); 936 ath10k_vdev_stop(arvif);
886 return; 937 return;
@@ -904,7 +955,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
904 if (!info->ibss_joined) { 955 if (!info->ibss_joined) {
905 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer); 956 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
906 if (ret) 957 if (ret)
907 ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n", 958 ath10k_warn("failed to delete IBSS self peer %pM for vdev %d: %d\n",
908 self_peer, arvif->vdev_id, ret); 959 self_peer, arvif->vdev_id, ret);
909 960
910 if (is_zero_ether_addr(arvif->bssid)) 961 if (is_zero_ether_addr(arvif->bssid))
@@ -913,7 +964,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
913 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, 964 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
914 arvif->bssid); 965 arvif->bssid);
915 if (ret) { 966 if (ret) {
916 ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n", 967 ath10k_warn("failed to delete IBSS BSSID peer %pM for vdev %d: %d\n",
917 arvif->bssid, arvif->vdev_id, ret); 968 arvif->bssid, arvif->vdev_id, ret);
918 return; 969 return;
919 } 970 }
@@ -925,7 +976,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
925 976
926 ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer); 977 ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
927 if (ret) { 978 if (ret) {
928 ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n", 979 ath10k_warn("failed to create IBSS self peer %pM for vdev %d: %d\n",
929 self_peer, arvif->vdev_id, ret); 980 self_peer, arvif->vdev_id, ret);
930 return; 981 return;
931 } 982 }
@@ -934,7 +985,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
934 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, 985 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
935 ATH10K_DEFAULT_ATIM); 986 ATH10K_DEFAULT_ATIM);
936 if (ret) 987 if (ret)
937 ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n", 988 ath10k_warn("failed to set IBSS ATIM for vdev %d: %d\n",
938 arvif->vdev_id, ret); 989 arvif->vdev_id, ret);
939} 990}
940 991
@@ -961,7 +1012,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
961 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 1012 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
962 conf->dynamic_ps_timeout); 1013 conf->dynamic_ps_timeout);
963 if (ret) { 1014 if (ret) {
964 ath10k_warn("Failed to set inactivity time for vdev %d: %i\n", 1015 ath10k_warn("failed to set inactivity time for vdev %d: %i\n",
965 arvif->vdev_id, ret); 1016 arvif->vdev_id, ret);
966 return ret; 1017 return ret;
967 } 1018 }
@@ -974,8 +1025,8 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
974 1025
975 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); 1026 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
976 if (ret) { 1027 if (ret) {
977 ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n", 1028 ath10k_warn("failed to set PS Mode %d for vdev %d: %d\n",
978 psmode, arvif->vdev_id); 1029 psmode, arvif->vdev_id, ret);
979 return ret; 1030 return ret;
980 } 1031 }
981 1032
@@ -1429,7 +1480,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1429 1480
1430 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 1481 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
1431 if (!ap_sta) { 1482 if (!ap_sta) {
1432 ath10k_warn("Failed to find station entry for %pM, vdev %i\n", 1483 ath10k_warn("failed to find station entry for bss %pM vdev %i\n",
1433 bss_conf->bssid, arvif->vdev_id); 1484 bss_conf->bssid, arvif->vdev_id);
1434 rcu_read_unlock(); 1485 rcu_read_unlock();
1435 return; 1486 return;
@@ -1442,7 +1493,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1442 ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta, 1493 ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
1443 bss_conf, &peer_arg); 1494 bss_conf, &peer_arg);
1444 if (ret) { 1495 if (ret) {
1445 ath10k_warn("Peer assoc prepare failed for %pM vdev %i\n: %d", 1496 ath10k_warn("failed to prepare peer assoc for %pM vdev %i: %d\n",
1446 bss_conf->bssid, arvif->vdev_id, ret); 1497 bss_conf->bssid, arvif->vdev_id, ret);
1447 rcu_read_unlock(); 1498 rcu_read_unlock();
1448 return; 1499 return;
@@ -1452,7 +1503,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1452 1503
1453 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 1504 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
1454 if (ret) { 1505 if (ret) {
1455 ath10k_warn("Peer assoc failed for %pM vdev %i\n: %d", 1506 ath10k_warn("failed to run peer assoc for %pM vdev %i: %d\n",
1456 bss_conf->bssid, arvif->vdev_id, ret); 1507 bss_conf->bssid, arvif->vdev_id, ret);
1457 return; 1508 return;
1458 } 1509 }
@@ -1473,7 +1524,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1473 1524
1474 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); 1525 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
1475 if (ret) { 1526 if (ret) {
1476 ath10k_warn("VDEV: %d up failed: ret %d\n", 1527 ath10k_warn("failed to set vdev %d up: %d\n",
1477 arvif->vdev_id, ret); 1528 arvif->vdev_id, ret);
1478 return; 1529 return;
1479 } 1530 }
@@ -1524,7 +1575,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
1524} 1575}
1525 1576
1526static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, 1577static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
1527 struct ieee80211_sta *sta) 1578 struct ieee80211_sta *sta, bool reassoc)
1528{ 1579{
1529 struct wmi_peer_assoc_complete_arg peer_arg; 1580 struct wmi_peer_assoc_complete_arg peer_arg;
1530 int ret = 0; 1581 int ret = 0;
@@ -1533,34 +1584,46 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
1533 1584
1534 ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg); 1585 ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
1535 if (ret) { 1586 if (ret) {
1536 ath10k_warn("WMI peer assoc prepare failed for %pM vdev %i: %i\n", 1587 ath10k_warn("failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
1537 sta->addr, arvif->vdev_id, ret); 1588 sta->addr, arvif->vdev_id, ret);
1538 return ret; 1589 return ret;
1539 } 1590 }
1540 1591
1592 peer_arg.peer_reassoc = reassoc;
1541 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 1593 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
1542 if (ret) { 1594 if (ret) {
1543 ath10k_warn("Peer assoc failed for STA %pM vdev %i: %d\n", 1595 ath10k_warn("failed to run peer assoc for STA %pM vdev %i: %d\n",
1544 sta->addr, arvif->vdev_id, ret); 1596 sta->addr, arvif->vdev_id, ret);
1545 return ret; 1597 return ret;
1546 } 1598 }
1547 1599
1548 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap); 1600 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
1549 if (ret) { 1601 if (ret) {
1550 ath10k_warn("failed to setup peer SMPS for vdev: %d\n", ret); 1602 ath10k_warn("failed to setup peer SMPS for vdev %d: %d\n",
1603 arvif->vdev_id, ret);
1551 return ret; 1604 return ret;
1552 } 1605 }
1553 1606
1607 if (!sta->wme) {
1608 arvif->num_legacy_stations++;
1609 ret = ath10k_recalc_rtscts_prot(arvif);
1610 if (ret) {
1611 ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
1612 arvif->vdev_id, ret);
1613 return ret;
1614 }
1615 }
1616
1554 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 1617 ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
1555 if (ret) { 1618 if (ret) {
1556 ath10k_warn("could not install peer wep keys for vdev %i: %d\n", 1619 ath10k_warn("failed to install peer wep keys for vdev %i: %d\n",
1557 arvif->vdev_id, ret); 1620 arvif->vdev_id, ret);
1558 return ret; 1621 return ret;
1559 } 1622 }
1560 1623
1561 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); 1624 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
1562 if (ret) { 1625 if (ret) {
1563 ath10k_warn("could not set qos params for STA %pM for vdev %i: %d\n", 1626 ath10k_warn("failed to set qos params for STA %pM for vdev %i: %d\n",
1564 sta->addr, arvif->vdev_id, ret); 1627 sta->addr, arvif->vdev_id, ret);
1565 return ret; 1628 return ret;
1566 } 1629 }
@@ -1575,9 +1638,19 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
1575 1638
1576 lockdep_assert_held(&ar->conf_mutex); 1639 lockdep_assert_held(&ar->conf_mutex);
1577 1640
1641 if (!sta->wme) {
1642 arvif->num_legacy_stations--;
1643 ret = ath10k_recalc_rtscts_prot(arvif);
1644 if (ret) {
1645 ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
1646 arvif->vdev_id, ret);
1647 return ret;
1648 }
1649 }
1650
1578 ret = ath10k_clear_peer_keys(arvif, sta->addr); 1651 ret = ath10k_clear_peer_keys(arvif, sta->addr);
1579 if (ret) { 1652 if (ret) {
1580 ath10k_warn("could not clear all peer wep keys for vdev %i: %d\n", 1653 ath10k_warn("failed to clear all peer wep keys for vdev %i: %d\n",
1581 arvif->vdev_id, ret); 1654 arvif->vdev_id, ret);
1582 return ret; 1655 return ret;
1583 } 1656 }
@@ -1685,19 +1758,44 @@ static int ath10k_update_channel_list(struct ath10k *ar)
1685 return ret; 1758 return ret;
1686} 1759}
1687 1760
1761static enum wmi_dfs_region
1762ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
1763{
1764 switch (dfs_region) {
1765 case NL80211_DFS_UNSET:
1766 return WMI_UNINIT_DFS_DOMAIN;
1767 case NL80211_DFS_FCC:
1768 return WMI_FCC_DFS_DOMAIN;
1769 case NL80211_DFS_ETSI:
1770 return WMI_ETSI_DFS_DOMAIN;
1771 case NL80211_DFS_JP:
1772 return WMI_MKK4_DFS_DOMAIN;
1773 }
1774 return WMI_UNINIT_DFS_DOMAIN;
1775}
1776
1688static void ath10k_regd_update(struct ath10k *ar) 1777static void ath10k_regd_update(struct ath10k *ar)
1689{ 1778{
1690 struct reg_dmn_pair_mapping *regpair; 1779 struct reg_dmn_pair_mapping *regpair;
1691 int ret; 1780 int ret;
1781 enum wmi_dfs_region wmi_dfs_reg;
1782 enum nl80211_dfs_regions nl_dfs_reg;
1692 1783
1693 lockdep_assert_held(&ar->conf_mutex); 1784 lockdep_assert_held(&ar->conf_mutex);
1694 1785
1695 ret = ath10k_update_channel_list(ar); 1786 ret = ath10k_update_channel_list(ar);
1696 if (ret) 1787 if (ret)
1697 ath10k_warn("could not update channel list (%d)\n", ret); 1788 ath10k_warn("failed to update channel list: %d\n", ret);
1698 1789
1699 regpair = ar->ath_common.regulatory.regpair; 1790 regpair = ar->ath_common.regulatory.regpair;
1700 1791
1792 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
1793 nl_dfs_reg = ar->dfs_detector->region;
1794 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
1795 } else {
1796 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
1797 }
1798
1701 /* Target allows setting up per-band regdomain but ath_common provides 1799 /* Target allows setting up per-band regdomain but ath_common provides
1702 * a combined one only */ 1800 * a combined one only */
1703 ret = ath10k_wmi_pdev_set_regdomain(ar, 1801 ret = ath10k_wmi_pdev_set_regdomain(ar,
@@ -1705,9 +1803,10 @@ static void ath10k_regd_update(struct ath10k *ar)
1705 regpair->reg_domain, /* 2ghz */ 1803 regpair->reg_domain, /* 2ghz */
1706 regpair->reg_domain, /* 5ghz */ 1804 regpair->reg_domain, /* 5ghz */
1707 regpair->reg_2ghz_ctl, 1805 regpair->reg_2ghz_ctl,
1708 regpair->reg_5ghz_ctl); 1806 regpair->reg_5ghz_ctl,
1807 wmi_dfs_reg);
1709 if (ret) 1808 if (ret)
1710 ath10k_warn("could not set pdev regdomain (%d)\n", ret); 1809 ath10k_warn("failed to set pdev regdomain: %d\n", ret);
1711} 1810}
1712 1811
1713static void ath10k_reg_notifier(struct wiphy *wiphy, 1812static void ath10k_reg_notifier(struct wiphy *wiphy,
@@ -1725,7 +1824,7 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
1725 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, 1824 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
1726 request->dfs_region); 1825 request->dfs_region);
1727 if (!result) 1826 if (!result)
1728 ath10k_warn("dfs region 0x%X not supported, will trigger radar for every pulse\n", 1827 ath10k_warn("DFS region 0x%X not supported, will trigger radar for every pulse\n",
1729 request->dfs_region); 1828 request->dfs_region);
1730 } 1829 }
1731 1830
@@ -1759,10 +1858,10 @@ static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
1759 if (info->control.vif) 1858 if (info->control.vif)
1760 return ath10k_vif_to_arvif(info->control.vif)->vdev_id; 1859 return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
1761 1860
1762 if (ar->monitor_enabled) 1861 if (ar->monitor_started)
1763 return ar->monitor_vdev_id; 1862 return ar->monitor_vdev_id;
1764 1863
1765 ath10k_warn("could not resolve vdev id\n"); 1864 ath10k_warn("failed to resolve vdev id\n");
1766 return 0; 1865 return 0;
1767} 1866}
1768 1867
@@ -1792,8 +1891,13 @@ static void ath10k_tx_wep_key_work(struct work_struct *work)
1792 wep_key_work); 1891 wep_key_work);
1793 int ret, keyidx = arvif->def_wep_key_newidx; 1892 int ret, keyidx = arvif->def_wep_key_newidx;
1794 1893
1894 mutex_lock(&arvif->ar->conf_mutex);
1895
1896 if (arvif->ar->state != ATH10K_STATE_ON)
1897 goto unlock;
1898
1795 if (arvif->def_wep_key_idx == keyidx) 1899 if (arvif->def_wep_key_idx == keyidx)
1796 return; 1900 goto unlock;
1797 1901
1798 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", 1902 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
1799 arvif->vdev_id, keyidx); 1903 arvif->vdev_id, keyidx);
@@ -1803,11 +1907,16 @@ static void ath10k_tx_wep_key_work(struct work_struct *work)
1803 arvif->ar->wmi.vdev_param->def_keyid, 1907 arvif->ar->wmi.vdev_param->def_keyid,
1804 keyidx); 1908 keyidx);
1805 if (ret) { 1909 if (ret) {
1806 ath10k_warn("could not update wep keyidx (%d)\n", ret); 1910 ath10k_warn("failed to update wep key index for vdev %d: %d\n",
1807 return; 1911 arvif->vdev_id,
1912 ret);
1913 goto unlock;
1808 } 1914 }
1809 1915
1810 arvif->def_wep_key_idx = keyidx; 1916 arvif->def_wep_key_idx = keyidx;
1917
1918unlock:
1919 mutex_unlock(&arvif->ar->conf_mutex);
1811} 1920}
1812 1921
1813static void ath10k_tx_h_update_wep_key(struct sk_buff *skb) 1922static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
@@ -1879,7 +1988,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
1879 ar->fw_features)) { 1988 ar->fw_features)) {
1880 if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >= 1989 if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
1881 ATH10K_MAX_NUM_MGMT_PENDING) { 1990 ATH10K_MAX_NUM_MGMT_PENDING) {
1882 ath10k_warn("wmi mgmt_tx queue limit reached\n"); 1991 ath10k_warn("reached WMI management tranmist queue limit\n");
1883 ret = -EBUSY; 1992 ret = -EBUSY;
1884 goto exit; 1993 goto exit;
1885 } 1994 }
@@ -1903,7 +2012,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
1903 2012
1904exit: 2013exit:
1905 if (ret) { 2014 if (ret) {
1906 ath10k_warn("tx failed (%d). dropping packet.\n", ret); 2015 ath10k_warn("failed to transmit packet, dropping: %d\n", ret);
1907 ieee80211_free_txskb(ar->hw, skb); 2016 ieee80211_free_txskb(ar->hw, skb);
1908 } 2017 }
1909} 2018}
@@ -1964,7 +2073,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
1964 if (!peer) { 2073 if (!peer) {
1965 ret = ath10k_peer_create(ar, vdev_id, peer_addr); 2074 ret = ath10k_peer_create(ar, vdev_id, peer_addr);
1966 if (ret) 2075 if (ret)
1967 ath10k_warn("peer %pM on vdev %d not created (%d)\n", 2076 ath10k_warn("failed to create peer %pM on vdev %d: %d\n",
1968 peer_addr, vdev_id, ret); 2077 peer_addr, vdev_id, ret);
1969 } 2078 }
1970 2079
@@ -1984,7 +2093,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
1984 if (!peer) { 2093 if (!peer) {
1985 ret = ath10k_peer_delete(ar, vdev_id, peer_addr); 2094 ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
1986 if (ret) 2095 if (ret)
1987 ath10k_warn("peer %pM on vdev %d not deleted (%d)\n", 2096 ath10k_warn("failed to delete peer %pM on vdev %d: %d\n",
1988 peer_addr, vdev_id, ret); 2097 peer_addr, vdev_id, ret);
1989 } 2098 }
1990 2099
@@ -2018,7 +2127,8 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
2018 2127
2019 ret = ath10k_wmi_mgmt_tx(ar, skb); 2128 ret = ath10k_wmi_mgmt_tx(ar, skb);
2020 if (ret) { 2129 if (ret) {
2021 ath10k_warn("wmi mgmt_tx failed (%d)\n", ret); 2130 ath10k_warn("failed to transmit management frame via WMI: %d\n",
2131 ret);
2022 ieee80211_free_txskb(ar->hw, skb); 2132 ieee80211_free_txskb(ar->hw, skb);
2023 } 2133 }
2024 } 2134 }
@@ -2043,7 +2153,7 @@ void ath10k_reset_scan(unsigned long ptr)
2043 return; 2153 return;
2044 } 2154 }
2045 2155
2046 ath10k_warn("scan timeout. resetting. fw issue?\n"); 2156 ath10k_warn("scan timed out, firmware problem?\n");
2047 2157
2048 if (ar->scan.is_roc) 2158 if (ar->scan.is_roc)
2049 ieee80211_remain_on_channel_expired(ar->hw); 2159 ieee80211_remain_on_channel_expired(ar->hw);
@@ -2079,7 +2189,7 @@ static int ath10k_abort_scan(struct ath10k *ar)
2079 2189
2080 ret = ath10k_wmi_stop_scan(ar, &arg); 2190 ret = ath10k_wmi_stop_scan(ar, &arg);
2081 if (ret) { 2191 if (ret) {
2082 ath10k_warn("could not submit wmi stop scan (%d)\n", ret); 2192 ath10k_warn("failed to stop wmi scan: %d\n", ret);
2083 spin_lock_bh(&ar->data_lock); 2193 spin_lock_bh(&ar->data_lock);
2084 ar->scan.in_progress = false; 2194 ar->scan.in_progress = false;
2085 ath10k_offchan_tx_purge(ar); 2195 ath10k_offchan_tx_purge(ar);
@@ -2099,7 +2209,7 @@ static int ath10k_abort_scan(struct ath10k *ar)
2099 2209
2100 spin_lock_bh(&ar->data_lock); 2210 spin_lock_bh(&ar->data_lock);
2101 if (ar->scan.in_progress) { 2211 if (ar->scan.in_progress) {
2102 ath10k_warn("could not stop scan. its still in progress\n"); 2212 ath10k_warn("failed to stop scan, it's still in progress\n");
2103 ar->scan.in_progress = false; 2213 ar->scan.in_progress = false;
2104 ath10k_offchan_tx_purge(ar); 2214 ath10k_offchan_tx_purge(ar);
2105 ret = -ETIMEDOUT; 2215 ret = -ETIMEDOUT;
@@ -2187,72 +2297,171 @@ static void ath10k_tx(struct ieee80211_hw *hw,
2187 ath10k_tx_htt(ar, skb); 2297 ath10k_tx_htt(ar, skb);
2188} 2298}
2189 2299
2190/* 2300/* Must not be called with conf_mutex held as workers can use that also. */
2191 * Initialize various parameters with default vaules. 2301static void ath10k_drain_tx(struct ath10k *ar)
2192 */ 2302{
2303 /* make sure rcu-protected mac80211 tx path itself is drained */
2304 synchronize_net();
2305
2306 ath10k_offchan_tx_purge(ar);
2307 ath10k_mgmt_over_wmi_tx_purge(ar);
2308
2309 cancel_work_sync(&ar->offchan_tx_work);
2310 cancel_work_sync(&ar->wmi_mgmt_tx_work);
2311}
2312
2193void ath10k_halt(struct ath10k *ar) 2313void ath10k_halt(struct ath10k *ar)
2194{ 2314{
2315 struct ath10k_vif *arvif;
2316
2195 lockdep_assert_held(&ar->conf_mutex); 2317 lockdep_assert_held(&ar->conf_mutex);
2196 2318
2197 ath10k_stop_cac(ar); 2319 if (ath10k_monitor_is_enabled(ar)) {
2320 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
2321 ar->promisc = false;
2322 ar->monitor = false;
2323 ath10k_monitor_stop(ar);
2324 }
2325
2198 del_timer_sync(&ar->scan.timeout); 2326 del_timer_sync(&ar->scan.timeout);
2199 ath10k_offchan_tx_purge(ar); 2327 ath10k_reset_scan((unsigned long)ar);
2200 ath10k_mgmt_over_wmi_tx_purge(ar);
2201 ath10k_peer_cleanup_all(ar); 2328 ath10k_peer_cleanup_all(ar);
2202 ath10k_core_stop(ar); 2329 ath10k_core_stop(ar);
2203 ath10k_hif_power_down(ar); 2330 ath10k_hif_power_down(ar);
2204 2331
2205 spin_lock_bh(&ar->data_lock); 2332 spin_lock_bh(&ar->data_lock);
2206 if (ar->scan.in_progress) { 2333 list_for_each_entry(arvif, &ar->arvifs, list) {
2207 del_timer(&ar->scan.timeout); 2334 if (!arvif->beacon)
2208 ar->scan.in_progress = false; 2335 continue;
2209 ieee80211_scan_completed(ar->hw, true); 2336
2337 dma_unmap_single(arvif->ar->dev,
2338 ATH10K_SKB_CB(arvif->beacon)->paddr,
2339 arvif->beacon->len, DMA_TO_DEVICE);
2340 dev_kfree_skb_any(arvif->beacon);
2341 arvif->beacon = NULL;
2210 } 2342 }
2211 spin_unlock_bh(&ar->data_lock); 2343 spin_unlock_bh(&ar->data_lock);
2212} 2344}
2213 2345
2346static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
2347{
2348 struct ath10k *ar = hw->priv;
2349
2350 mutex_lock(&ar->conf_mutex);
2351
2352 if (ar->cfg_tx_chainmask) {
2353 *tx_ant = ar->cfg_tx_chainmask;
2354 *rx_ant = ar->cfg_rx_chainmask;
2355 } else {
2356 *tx_ant = ar->supp_tx_chainmask;
2357 *rx_ant = ar->supp_rx_chainmask;
2358 }
2359
2360 mutex_unlock(&ar->conf_mutex);
2361
2362 return 0;
2363}
2364
2365static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
2366{
2367 int ret;
2368
2369 lockdep_assert_held(&ar->conf_mutex);
2370
2371 ar->cfg_tx_chainmask = tx_ant;
2372 ar->cfg_rx_chainmask = rx_ant;
2373
2374 if ((ar->state != ATH10K_STATE_ON) &&
2375 (ar->state != ATH10K_STATE_RESTARTED))
2376 return 0;
2377
2378 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
2379 tx_ant);
2380 if (ret) {
2381 ath10k_warn("failed to set tx-chainmask: %d, req 0x%x\n",
2382 ret, tx_ant);
2383 return ret;
2384 }
2385
2386 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
2387 rx_ant);
2388 if (ret) {
2389 ath10k_warn("failed to set rx-chainmask: %d, req 0x%x\n",
2390 ret, rx_ant);
2391 return ret;
2392 }
2393
2394 return 0;
2395}
2396
2397static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
2398{
2399 struct ath10k *ar = hw->priv;
2400 int ret;
2401
2402 mutex_lock(&ar->conf_mutex);
2403 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
2404 mutex_unlock(&ar->conf_mutex);
2405 return ret;
2406}
2407
2214static int ath10k_start(struct ieee80211_hw *hw) 2408static int ath10k_start(struct ieee80211_hw *hw)
2215{ 2409{
2216 struct ath10k *ar = hw->priv; 2410 struct ath10k *ar = hw->priv;
2217 int ret = 0; 2411 int ret = 0;
2218 2412
2413 /*
2414 * This makes sense only when restarting hw. It is harmless to call
2415 * uncoditionally. This is necessary to make sure no HTT/WMI tx
2416 * commands will be submitted while restarting.
2417 */
2418 ath10k_drain_tx(ar);
2419
2219 mutex_lock(&ar->conf_mutex); 2420 mutex_lock(&ar->conf_mutex);
2220 2421
2221 if (ar->state != ATH10K_STATE_OFF && 2422 switch (ar->state) {
2222 ar->state != ATH10K_STATE_RESTARTING) { 2423 case ATH10K_STATE_OFF:
2424 ar->state = ATH10K_STATE_ON;
2425 break;
2426 case ATH10K_STATE_RESTARTING:
2427 ath10k_halt(ar);
2428 ar->state = ATH10K_STATE_RESTARTED;
2429 break;
2430 case ATH10K_STATE_ON:
2431 case ATH10K_STATE_RESTARTED:
2432 case ATH10K_STATE_WEDGED:
2433 WARN_ON(1);
2223 ret = -EINVAL; 2434 ret = -EINVAL;
2224 goto exit; 2435 goto err;
2225 } 2436 }
2226 2437
2227 ret = ath10k_hif_power_up(ar); 2438 ret = ath10k_hif_power_up(ar);
2228 if (ret) { 2439 if (ret) {
2229 ath10k_err("could not init hif (%d)\n", ret); 2440 ath10k_err("Could not init hif: %d\n", ret);
2230 ar->state = ATH10K_STATE_OFF; 2441 goto err_off;
2231 goto exit;
2232 } 2442 }
2233 2443
2234 ret = ath10k_core_start(ar); 2444 ret = ath10k_core_start(ar);
2235 if (ret) { 2445 if (ret) {
2236 ath10k_err("could not init core (%d)\n", ret); 2446 ath10k_err("Could not init core: %d\n", ret);
2237 ath10k_hif_power_down(ar); 2447 goto err_power_down;
2238 ar->state = ATH10K_STATE_OFF;
2239 goto exit;
2240 } 2448 }
2241 2449
2242 if (ar->state == ATH10K_STATE_OFF)
2243 ar->state = ATH10K_STATE_ON;
2244 else if (ar->state == ATH10K_STATE_RESTARTING)
2245 ar->state = ATH10K_STATE_RESTARTED;
2246
2247 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1); 2450 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
2248 if (ret) 2451 if (ret) {
2249 ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n", 2452 ath10k_warn("failed to enable PMF QOS: %d\n", ret);
2250 ret); 2453 goto err_core_stop;
2454 }
2251 2455
2252 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1); 2456 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
2253 if (ret) 2457 if (ret) {
2254 ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", 2458 ath10k_warn("failed to enable dynamic BW: %d\n", ret);
2255 ret); 2459 goto err_core_stop;
2460 }
2461
2462 if (ar->cfg_tx_chainmask)
2463 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
2464 ar->cfg_rx_chainmask);
2256 2465
2257 /* 2466 /*
2258 * By default FW set ARP frames ac to voice (6). In that case ARP 2467 * By default FW set ARP frames ac to voice (6). In that case ARP
@@ -2266,15 +2475,27 @@ static int ath10k_start(struct ieee80211_hw *hw)
2266 ret = ath10k_wmi_pdev_set_param(ar, 2475 ret = ath10k_wmi_pdev_set_param(ar,
2267 ar->wmi.pdev_param->arp_ac_override, 0); 2476 ar->wmi.pdev_param->arp_ac_override, 0);
2268 if (ret) { 2477 if (ret) {
2269 ath10k_warn("could not set arp ac override parameter: %d\n", 2478 ath10k_warn("failed to set arp ac override parameter: %d\n",
2270 ret); 2479 ret);
2271 goto exit; 2480 goto err_core_stop;
2272 } 2481 }
2273 2482
2483 ar->num_started_vdevs = 0;
2274 ath10k_regd_update(ar); 2484 ath10k_regd_update(ar);
2275 ret = 0;
2276 2485
2277exit: 2486 mutex_unlock(&ar->conf_mutex);
2487 return 0;
2488
2489err_core_stop:
2490 ath10k_core_stop(ar);
2491
2492err_power_down:
2493 ath10k_hif_power_down(ar);
2494
2495err_off:
2496 ar->state = ATH10K_STATE_OFF;
2497
2498err:
2278 mutex_unlock(&ar->conf_mutex); 2499 mutex_unlock(&ar->conf_mutex);
2279 return ret; 2500 return ret;
2280} 2501}
@@ -2283,19 +2504,15 @@ static void ath10k_stop(struct ieee80211_hw *hw)
2283{ 2504{
2284 struct ath10k *ar = hw->priv; 2505 struct ath10k *ar = hw->priv;
2285 2506
2507 ath10k_drain_tx(ar);
2508
2286 mutex_lock(&ar->conf_mutex); 2509 mutex_lock(&ar->conf_mutex);
2287 if (ar->state == ATH10K_STATE_ON || 2510 if (ar->state != ATH10K_STATE_OFF) {
2288 ar->state == ATH10K_STATE_RESTARTED ||
2289 ar->state == ATH10K_STATE_WEDGED)
2290 ath10k_halt(ar); 2511 ath10k_halt(ar);
2291 2512 ar->state = ATH10K_STATE_OFF;
2292 ar->state = ATH10K_STATE_OFF; 2513 }
2293 mutex_unlock(&ar->conf_mutex); 2514 mutex_unlock(&ar->conf_mutex);
2294 2515
2295 ath10k_mgmt_over_wmi_tx_purge(ar);
2296
2297 cancel_work_sync(&ar->offchan_tx_work);
2298 cancel_work_sync(&ar->wmi_mgmt_tx_work);
2299 cancel_work_sync(&ar->restart_work); 2516 cancel_work_sync(&ar->restart_work);
2300} 2517}
2301 2518
@@ -2309,7 +2526,7 @@ static int ath10k_config_ps(struct ath10k *ar)
2309 list_for_each_entry(arvif, &ar->arvifs, list) { 2526 list_for_each_entry(arvif, &ar->arvifs, list) {
2310 ret = ath10k_mac_vif_setup_ps(arvif); 2527 ret = ath10k_mac_vif_setup_ps(arvif);
2311 if (ret) { 2528 if (ret) {
2312 ath10k_warn("could not setup powersave (%d)\n", ret); 2529 ath10k_warn("failed to setup powersave: %d\n", ret);
2313 break; 2530 break;
2314 } 2531 }
2315 } 2532 }
@@ -2343,7 +2560,6 @@ static const char *chandef_get_width(enum nl80211_chan_width width)
2343static void ath10k_config_chan(struct ath10k *ar) 2560static void ath10k_config_chan(struct ath10k *ar)
2344{ 2561{
2345 struct ath10k_vif *arvif; 2562 struct ath10k_vif *arvif;
2346 bool monitor_was_enabled;
2347 int ret; 2563 int ret;
2348 2564
2349 lockdep_assert_held(&ar->conf_mutex); 2565 lockdep_assert_held(&ar->conf_mutex);
@@ -2357,10 +2573,8 @@ static void ath10k_config_chan(struct ath10k *ar)
2357 2573
2358 /* First stop monitor interface. Some FW versions crash if there's a 2574 /* First stop monitor interface. Some FW versions crash if there's a
2359 * lone monitor interface. */ 2575 * lone monitor interface. */
2360 monitor_was_enabled = ar->monitor_enabled; 2576 if (ar->monitor_started)
2361 2577 ath10k_monitor_vdev_stop(ar);
2362 if (ar->monitor_enabled)
2363 ath10k_monitor_stop(ar);
2364 2578
2365 list_for_each_entry(arvif, &ar->arvifs, list) { 2579 list_for_each_entry(arvif, &ar->arvifs, list) {
2366 if (!arvif->is_started) 2580 if (!arvif->is_started)
@@ -2371,7 +2585,7 @@ static void ath10k_config_chan(struct ath10k *ar)
2371 2585
2372 ret = ath10k_vdev_stop(arvif); 2586 ret = ath10k_vdev_stop(arvif);
2373 if (ret) { 2587 if (ret) {
2374 ath10k_warn("could not stop vdev %d (%d)\n", 2588 ath10k_warn("failed to stop vdev %d: %d\n",
2375 arvif->vdev_id, ret); 2589 arvif->vdev_id, ret);
2376 continue; 2590 continue;
2377 } 2591 }
@@ -2388,7 +2602,7 @@ static void ath10k_config_chan(struct ath10k *ar)
2388 2602
2389 ret = ath10k_vdev_start(arvif); 2603 ret = ath10k_vdev_start(arvif);
2390 if (ret) { 2604 if (ret) {
2391 ath10k_warn("could not start vdev %d (%d)\n", 2605 ath10k_warn("failed to start vdev %d: %d\n",
2392 arvif->vdev_id, ret); 2606 arvif->vdev_id, ret);
2393 continue; 2607 continue;
2394 } 2608 }
@@ -2399,14 +2613,14 @@ static void ath10k_config_chan(struct ath10k *ar)
2399 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 2613 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
2400 arvif->bssid); 2614 arvif->bssid);
2401 if (ret) { 2615 if (ret) {
2402 ath10k_warn("could not bring vdev up %d (%d)\n", 2616 ath10k_warn("failed to bring vdev up %d: %d\n",
2403 arvif->vdev_id, ret); 2617 arvif->vdev_id, ret);
2404 continue; 2618 continue;
2405 } 2619 }
2406 } 2620 }
2407 2621
2408 if (monitor_was_enabled) 2622 if (ath10k_monitor_is_enabled(ar))
2409 ath10k_monitor_start(ar, ar->monitor_vdev_id); 2623 ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
2410} 2624}
2411 2625
2412static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 2626static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2420,15 +2634,17 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
2420 2634
2421 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 2635 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2422 ath10k_dbg(ATH10K_DBG_MAC, 2636 ath10k_dbg(ATH10K_DBG_MAC,
2423 "mac config channel %d mhz flags 0x%x\n", 2637 "mac config channel %dMHz flags 0x%x radar %d\n",
2424 conf->chandef.chan->center_freq, 2638 conf->chandef.chan->center_freq,
2425 conf->chandef.chan->flags); 2639 conf->chandef.chan->flags,
2640 conf->radar_enabled);
2426 2641
2427 spin_lock_bh(&ar->data_lock); 2642 spin_lock_bh(&ar->data_lock);
2428 ar->rx_channel = conf->chandef.chan; 2643 ar->rx_channel = conf->chandef.chan;
2429 spin_unlock_bh(&ar->data_lock); 2644 spin_unlock_bh(&ar->data_lock);
2430 2645
2431 ath10k_config_radar_detection(ar); 2646 ar->radar_enabled = conf->radar_enabled;
2647 ath10k_recalc_radar_detection(ar);
2432 2648
2433 if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) { 2649 if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
2434 ar->chandef = conf->chandef; 2650 ar->chandef = conf->chandef;
@@ -2444,14 +2660,14 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
2444 ret = ath10k_wmi_pdev_set_param(ar, param, 2660 ret = ath10k_wmi_pdev_set_param(ar, param,
2445 hw->conf.power_level * 2); 2661 hw->conf.power_level * 2);
2446 if (ret) 2662 if (ret)
2447 ath10k_warn("mac failed to set 2g txpower %d (%d)\n", 2663 ath10k_warn("failed to set 2g txpower %d: %d\n",
2448 hw->conf.power_level, ret); 2664 hw->conf.power_level, ret);
2449 2665
2450 param = ar->wmi.pdev_param->txpower_limit5g; 2666 param = ar->wmi.pdev_param->txpower_limit5g;
2451 ret = ath10k_wmi_pdev_set_param(ar, param, 2667 ret = ath10k_wmi_pdev_set_param(ar, param,
2452 hw->conf.power_level * 2); 2668 hw->conf.power_level * 2);
2453 if (ret) 2669 if (ret)
2454 ath10k_warn("mac failed to set 5g txpower %d (%d)\n", 2670 ath10k_warn("failed to set 5g txpower %d: %d\n",
2455 hw->conf.power_level, ret); 2671 hw->conf.power_level, ret);
2456 } 2672 }
2457 2673
@@ -2459,10 +2675,19 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
2459 ath10k_config_ps(ar); 2675 ath10k_config_ps(ar);
2460 2676
2461 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 2677 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
2462 if (conf->flags & IEEE80211_CONF_MONITOR) 2678 if (conf->flags & IEEE80211_CONF_MONITOR && !ar->monitor) {
2463 ret = ath10k_monitor_create(ar); 2679 ar->monitor = true;
2464 else 2680 ret = ath10k_monitor_start(ar);
2465 ret = ath10k_monitor_destroy(ar); 2681 if (ret) {
2682 ath10k_warn("failed to start monitor (config): %d\n",
2683 ret);
2684 ar->monitor = false;
2685 }
2686 } else if (!(conf->flags & IEEE80211_CONF_MONITOR) &&
2687 ar->monitor) {
2688 ar->monitor = false;
2689 ath10k_monitor_stop(ar);
2690 }
2466 } 2691 }
2467 2692
2468 mutex_unlock(&ar->conf_mutex); 2693 mutex_unlock(&ar->conf_mutex);
@@ -2497,12 +2722,6 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2497 INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work); 2722 INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
2498 INIT_LIST_HEAD(&arvif->list); 2723 INIT_LIST_HEAD(&arvif->list);
2499 2724
2500 if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
2501 ath10k_warn("Only one monitor interface allowed\n");
2502 ret = -EBUSY;
2503 goto err;
2504 }
2505
2506 bit = ffs(ar->free_vdev_map); 2725 bit = ffs(ar->free_vdev_map);
2507 if (bit == 0) { 2726 if (bit == 0) {
2508 ret = -EBUSY; 2727 ret = -EBUSY;
@@ -2545,7 +2764,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2545 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 2764 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
2546 arvif->vdev_subtype, vif->addr); 2765 arvif->vdev_subtype, vif->addr);
2547 if (ret) { 2766 if (ret) {
2548 ath10k_warn("WMI vdev %i create failed: ret %d\n", 2767 ath10k_warn("failed to create WMI vdev %i: %d\n",
2549 arvif->vdev_id, ret); 2768 arvif->vdev_id, ret);
2550 goto err; 2769 goto err;
2551 } 2770 }
@@ -2557,7 +2776,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2557 ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param, 2776 ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
2558 arvif->def_wep_key_idx); 2777 arvif->def_wep_key_idx);
2559 if (ret) { 2778 if (ret) {
2560 ath10k_warn("Failed to set vdev %i default keyid: %d\n", 2779 ath10k_warn("failed to set vdev %i default key id: %d\n",
2561 arvif->vdev_id, ret); 2780 arvif->vdev_id, ret);
2562 goto err_vdev_delete; 2781 goto err_vdev_delete;
2563 } 2782 }
@@ -2567,7 +2786,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2567 ATH10K_HW_TXRX_NATIVE_WIFI); 2786 ATH10K_HW_TXRX_NATIVE_WIFI);
2568 /* 10.X firmware does not support this VDEV parameter. Do not warn */ 2787 /* 10.X firmware does not support this VDEV parameter. Do not warn */
2569 if (ret && ret != -EOPNOTSUPP) { 2788 if (ret && ret != -EOPNOTSUPP) {
2570 ath10k_warn("Failed to set vdev %i TX encap: %d\n", 2789 ath10k_warn("failed to set vdev %i TX encapsulation: %d\n",
2571 arvif->vdev_id, ret); 2790 arvif->vdev_id, ret);
2572 goto err_vdev_delete; 2791 goto err_vdev_delete;
2573 } 2792 }
@@ -2575,14 +2794,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2575 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 2794 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
2576 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); 2795 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
2577 if (ret) { 2796 if (ret) {
2578 ath10k_warn("Failed to create vdev %i peer for AP: %d\n", 2797 ath10k_warn("failed to create vdev %i peer for AP: %d\n",
2579 arvif->vdev_id, ret); 2798 arvif->vdev_id, ret);
2580 goto err_vdev_delete; 2799 goto err_vdev_delete;
2581 } 2800 }
2582 2801
2583 ret = ath10k_mac_set_kickout(arvif); 2802 ret = ath10k_mac_set_kickout(arvif);
2584 if (ret) { 2803 if (ret) {
2585 ath10k_warn("Failed to set vdev %i kickout parameters: %d\n", 2804 ath10k_warn("failed to set vdev %i kickout parameters: %d\n",
2586 arvif->vdev_id, ret); 2805 arvif->vdev_id, ret);
2587 goto err_peer_delete; 2806 goto err_peer_delete;
2588 } 2807 }
@@ -2594,7 +2813,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2594 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2813 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2595 param, value); 2814 param, value);
2596 if (ret) { 2815 if (ret) {
2597 ath10k_warn("Failed to set vdev %i RX wake policy: %d\n", 2816 ath10k_warn("failed to set vdev %i RX wake policy: %d\n",
2598 arvif->vdev_id, ret); 2817 arvif->vdev_id, ret);
2599 goto err_peer_delete; 2818 goto err_peer_delete;
2600 } 2819 }
@@ -2604,7 +2823,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2604 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2823 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2605 param, value); 2824 param, value);
2606 if (ret) { 2825 if (ret) {
2607 ath10k_warn("Failed to set vdev %i TX wake thresh: %d\n", 2826 ath10k_warn("failed to set vdev %i TX wake thresh: %d\n",
2608 arvif->vdev_id, ret); 2827 arvif->vdev_id, ret);
2609 goto err_peer_delete; 2828 goto err_peer_delete;
2610 } 2829 }
@@ -2614,7 +2833,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2614 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2833 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2615 param, value); 2834 param, value);
2616 if (ret) { 2835 if (ret) {
2617 ath10k_warn("Failed to set vdev %i PSPOLL count: %d\n", 2836 ath10k_warn("failed to set vdev %i PSPOLL count: %d\n",
2618 arvif->vdev_id, ret); 2837 arvif->vdev_id, ret);
2619 goto err_peer_delete; 2838 goto err_peer_delete;
2620 } 2839 }
@@ -2622,21 +2841,18 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2622 2841
2623 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); 2842 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
2624 if (ret) { 2843 if (ret) {
2625 ath10k_warn("failed to set rts threshold for vdev %d (%d)\n", 2844 ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
2626 arvif->vdev_id, ret); 2845 arvif->vdev_id, ret);
2627 goto err_peer_delete; 2846 goto err_peer_delete;
2628 } 2847 }
2629 2848
2630 ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold); 2849 ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
2631 if (ret) { 2850 if (ret) {
2632 ath10k_warn("failed to set frag threshold for vdev %d (%d)\n", 2851 ath10k_warn("failed to set frag threshold for vdev %d: %d\n",
2633 arvif->vdev_id, ret); 2852 arvif->vdev_id, ret);
2634 goto err_peer_delete; 2853 goto err_peer_delete;
2635 } 2854 }
2636 2855
2637 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
2638 ar->monitor_present = true;
2639
2640 mutex_unlock(&ar->conf_mutex); 2856 mutex_unlock(&ar->conf_mutex);
2641 return 0; 2857 return 0;
2642 2858
@@ -2668,6 +2884,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
2668 2884
2669 spin_lock_bh(&ar->data_lock); 2885 spin_lock_bh(&ar->data_lock);
2670 if (arvif->beacon) { 2886 if (arvif->beacon) {
2887 dma_unmap_single(arvif->ar->dev,
2888 ATH10K_SKB_CB(arvif->beacon)->paddr,
2889 arvif->beacon->len, DMA_TO_DEVICE);
2671 dev_kfree_skb_any(arvif->beacon); 2890 dev_kfree_skb_any(arvif->beacon);
2672 arvif->beacon = NULL; 2891 arvif->beacon = NULL;
2673 } 2892 }
@@ -2679,7 +2898,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
2679 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 2898 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
2680 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr); 2899 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
2681 if (ret) 2900 if (ret)
2682 ath10k_warn("Failed to remove peer for AP vdev %i: %d\n", 2901 ath10k_warn("failed to remove peer for AP vdev %i: %d\n",
2683 arvif->vdev_id, ret); 2902 arvif->vdev_id, ret);
2684 2903
2685 kfree(arvif->u.ap.noa_data); 2904 kfree(arvif->u.ap.noa_data);
@@ -2690,12 +2909,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
2690 2909
2691 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 2910 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
2692 if (ret) 2911 if (ret)
2693 ath10k_warn("WMI vdev %i delete failed: %d\n", 2912 ath10k_warn("failed to delete WMI vdev %i: %d\n",
2694 arvif->vdev_id, ret); 2913 arvif->vdev_id, ret);
2695 2914
2696 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
2697 ar->monitor_present = false;
2698
2699 ath10k_peer_cleanup(ar, arvif->vdev_id); 2915 ath10k_peer_cleanup(ar, arvif->vdev_id);
2700 2916
2701 mutex_unlock(&ar->conf_mutex); 2917 mutex_unlock(&ar->conf_mutex);
@@ -2728,28 +2944,17 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
2728 *total_flags &= SUPPORTED_FILTERS; 2944 *total_flags &= SUPPORTED_FILTERS;
2729 ar->filter_flags = *total_flags; 2945 ar->filter_flags = *total_flags;
2730 2946
2731 /* Monitor must not be started if it wasn't created first. 2947 if (ar->filter_flags & FIF_PROMISC_IN_BSS && !ar->promisc) {
2732 * Promiscuous mode may be started on a non-monitor interface - in 2948 ar->promisc = true;
2733 * such case the monitor vdev is not created so starting the 2949 ret = ath10k_monitor_start(ar);
2734 * monitor makes no sense. Since ath10k uses no special RX filters 2950 if (ret) {
2735 * (only BSS filter in STA mode) there's no need for any special 2951 ath10k_warn("failed to start monitor (promisc): %d\n",
2736 * action here. */ 2952 ret);
2737 if ((ar->filter_flags & FIF_PROMISC_IN_BSS) && 2953 ar->promisc = false;
2738 !ar->monitor_enabled && ar->monitor_present) { 2954 }
2739 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n", 2955 } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && ar->promisc) {
2740 ar->monitor_vdev_id); 2956 ar->promisc = false;
2741 2957 ath10k_monitor_stop(ar);
2742 ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
2743 if (ret)
2744 ath10k_warn("Unable to start monitor mode\n");
2745 } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
2746 ar->monitor_enabled && ar->monitor_present) {
2747 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
2748 ar->monitor_vdev_id);
2749
2750 ret = ath10k_monitor_stop(ar);
2751 if (ret)
2752 ath10k_warn("Unable to stop monitor mode\n");
2753 } 2958 }
2754 2959
2755 mutex_unlock(&ar->conf_mutex); 2960 mutex_unlock(&ar->conf_mutex);
@@ -2780,7 +2985,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2780 arvif->vdev_id, arvif->beacon_interval); 2985 arvif->vdev_id, arvif->beacon_interval);
2781 2986
2782 if (ret) 2987 if (ret)
2783 ath10k_warn("Failed to set beacon interval for vdev %d: %i\n", 2988 ath10k_warn("failed to set beacon interval for vdev %d: %i\n",
2784 arvif->vdev_id, ret); 2989 arvif->vdev_id, ret);
2785 } 2990 }
2786 2991
@@ -2793,7 +2998,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2793 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, 2998 ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
2794 WMI_BEACON_STAGGERED_MODE); 2999 WMI_BEACON_STAGGERED_MODE);
2795 if (ret) 3000 if (ret)
2796 ath10k_warn("Failed to set beacon mode for vdev %d: %i\n", 3001 ath10k_warn("failed to set beacon mode for vdev %d: %i\n",
2797 arvif->vdev_id, ret); 3002 arvif->vdev_id, ret);
2798 } 3003 }
2799 3004
@@ -2808,7 +3013,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2808 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 3013 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2809 arvif->dtim_period); 3014 arvif->dtim_period);
2810 if (ret) 3015 if (ret)
2811 ath10k_warn("Failed to set dtim period for vdev %d: %i\n", 3016 ath10k_warn("failed to set dtim period for vdev %d: %i\n",
2812 arvif->vdev_id, ret); 3017 arvif->vdev_id, ret);
2813 } 3018 }
2814 3019
@@ -2820,7 +3025,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2820 arvif->u.ap.hidden_ssid = info->hidden_ssid; 3025 arvif->u.ap.hidden_ssid = info->hidden_ssid;
2821 } 3026 }
2822 3027
2823 if (changed & BSS_CHANGED_BSSID) { 3028 /*
3029 * Firmware manages AP self-peer internally so make sure to not create
3030 * it in driver. Otherwise AP self-peer deletion may timeout later.
3031 */
3032 if (changed & BSS_CHANGED_BSSID &&
3033 vif->type != NL80211_IFTYPE_AP) {
2824 if (!is_zero_ether_addr(info->bssid)) { 3034 if (!is_zero_ether_addr(info->bssid)) {
2825 ath10k_dbg(ATH10K_DBG_MAC, 3035 ath10k_dbg(ATH10K_DBG_MAC,
2826 "mac vdev %d create peer %pM\n", 3036 "mac vdev %d create peer %pM\n",
@@ -2829,7 +3039,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2829 ret = ath10k_peer_create(ar, arvif->vdev_id, 3039 ret = ath10k_peer_create(ar, arvif->vdev_id,
2830 info->bssid); 3040 info->bssid);
2831 if (ret) 3041 if (ret)
2832 ath10k_warn("Failed to add peer %pM for vdev %d when changing bssid: %i\n", 3042 ath10k_warn("failed to add peer %pM for vdev %d when changing bssid: %i\n",
2833 info->bssid, arvif->vdev_id, ret); 3043 info->bssid, arvif->vdev_id, ret);
2834 3044
2835 if (vif->type == NL80211_IFTYPE_STATION) { 3045 if (vif->type == NL80211_IFTYPE_STATION) {
@@ -2868,20 +3078,13 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2868 ath10k_control_beaconing(arvif, info); 3078 ath10k_control_beaconing(arvif, info);
2869 3079
2870 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 3080 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2871 u32 cts_prot; 3081 arvif->use_cts_prot = info->use_cts_prot;
2872 if (info->use_cts_prot)
2873 cts_prot = 1;
2874 else
2875 cts_prot = 0;
2876
2877 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", 3082 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
2878 arvif->vdev_id, cts_prot); 3083 arvif->vdev_id, info->use_cts_prot);
2879 3084
2880 vdev_param = ar->wmi.vdev_param->enable_rtscts; 3085 ret = ath10k_recalc_rtscts_prot(arvif);
2881 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2882 cts_prot);
2883 if (ret) 3086 if (ret)
2884 ath10k_warn("Failed to set CTS prot for vdev %d: %d\n", 3087 ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
2885 arvif->vdev_id, ret); 3088 arvif->vdev_id, ret);
2886 } 3089 }
2887 3090
@@ -2900,7 +3103,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2900 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 3103 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2901 slottime); 3104 slottime);
2902 if (ret) 3105 if (ret)
2903 ath10k_warn("Failed to set erp slot for vdev %d: %i\n", 3106 ath10k_warn("failed to set erp slot for vdev %d: %i\n",
2904 arvif->vdev_id, ret); 3107 arvif->vdev_id, ret);
2905 } 3108 }
2906 3109
@@ -2919,7 +3122,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2919 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 3122 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2920 preamble); 3123 preamble);
2921 if (ret) 3124 if (ret)
2922 ath10k_warn("Failed to set preamble for vdev %d: %i\n", 3125 ath10k_warn("failed to set preamble for vdev %d: %i\n",
2923 arvif->vdev_id, ret); 3126 arvif->vdev_id, ret);
2924 } 3127 }
2925 3128
@@ -2990,7 +3193,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
2990 3193
2991 ret = ath10k_start_scan(ar, &arg); 3194 ret = ath10k_start_scan(ar, &arg);
2992 if (ret) { 3195 if (ret) {
2993 ath10k_warn("could not start hw scan (%d)\n", ret); 3196 ath10k_warn("failed to start hw scan: %d\n", ret);
2994 spin_lock_bh(&ar->data_lock); 3197 spin_lock_bh(&ar->data_lock);
2995 ar->scan.in_progress = false; 3198 ar->scan.in_progress = false;
2996 spin_unlock_bh(&ar->data_lock); 3199 spin_unlock_bh(&ar->data_lock);
@@ -3010,8 +3213,7 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
3010 mutex_lock(&ar->conf_mutex); 3213 mutex_lock(&ar->conf_mutex);
3011 ret = ath10k_abort_scan(ar); 3214 ret = ath10k_abort_scan(ar);
3012 if (ret) { 3215 if (ret) {
3013 ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n", 3216 ath10k_warn("failed to abort scan: %d\n", ret);
3014 ret);
3015 ieee80211_scan_completed(hw, 1 /* aborted */); 3217 ieee80211_scan_completed(hw, 1 /* aborted */);
3016 } 3218 }
3017 mutex_unlock(&ar->conf_mutex); 3219 mutex_unlock(&ar->conf_mutex);
@@ -3089,7 +3291,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3089 3291
3090 if (!peer) { 3292 if (!peer) {
3091 if (cmd == SET_KEY) { 3293 if (cmd == SET_KEY) {
3092 ath10k_warn("cannot install key for non-existent peer %pM\n", 3294 ath10k_warn("failed to install key for non-existent peer %pM\n",
3093 peer_addr); 3295 peer_addr);
3094 ret = -EOPNOTSUPP; 3296 ret = -EOPNOTSUPP;
3095 goto exit; 3297 goto exit;
@@ -3112,7 +3314,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3112 3314
3113 ret = ath10k_install_key(arvif, key, cmd, peer_addr); 3315 ret = ath10k_install_key(arvif, key, cmd, peer_addr);
3114 if (ret) { 3316 if (ret) {
3115 ath10k_warn("key installation failed for vdev %i peer %pM: %d\n", 3317 ath10k_warn("failed to install key for vdev %i peer %pM: %d\n",
3116 arvif->vdev_id, peer_addr, ret); 3318 arvif->vdev_id, peer_addr, ret);
3117 goto exit; 3319 goto exit;
3118 } 3320 }
@@ -3127,7 +3329,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3127 peer->keys[key->keyidx] = NULL; 3329 peer->keys[key->keyidx] = NULL;
3128 else if (peer == NULL) 3330 else if (peer == NULL)
3129 /* impossible unless FW goes crazy */ 3331 /* impossible unless FW goes crazy */
3130 ath10k_warn("peer %pM disappeared!\n", peer_addr); 3332 ath10k_warn("Peer %pM disappeared!\n", peer_addr);
3131 spin_unlock_bh(&ar->data_lock); 3333 spin_unlock_bh(&ar->data_lock);
3132 3334
3133exit: 3335exit:
@@ -3195,6 +3397,16 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
3195 sta->addr, smps, err); 3397 sta->addr, smps, err);
3196 } 3398 }
3197 3399
3400 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
3401 ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
3402 sta->addr);
3403
3404 err = ath10k_station_assoc(ar, arvif, sta, true);
3405 if (err)
3406 ath10k_warn("failed to reassociate station: %pM\n",
3407 sta->addr);
3408 }
3409
3198 mutex_unlock(&ar->conf_mutex); 3410 mutex_unlock(&ar->conf_mutex);
3199} 3411}
3200 3412
@@ -3236,7 +3448,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3236 max_num_peers = TARGET_NUM_PEERS; 3448 max_num_peers = TARGET_NUM_PEERS;
3237 3449
3238 if (ar->num_peers >= max_num_peers) { 3450 if (ar->num_peers >= max_num_peers) {
3239 ath10k_warn("Number of peers exceeded: peers number %d (max peers %d)\n", 3451 ath10k_warn("number of peers exceeded: peers number %d (max peers %d)\n",
3240 ar->num_peers, max_num_peers); 3452 ar->num_peers, max_num_peers);
3241 ret = -ENOBUFS; 3453 ret = -ENOBUFS;
3242 goto exit; 3454 goto exit;
@@ -3248,7 +3460,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3248 3460
3249 ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); 3461 ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
3250 if (ret) 3462 if (ret)
3251 ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n", 3463 ath10k_warn("failed to add peer %pM for vdev %d when adding a new sta: %i\n",
3252 sta->addr, arvif->vdev_id, ret); 3464 sta->addr, arvif->vdev_id, ret);
3253 } else if ((old_state == IEEE80211_STA_NONE && 3465 } else if ((old_state == IEEE80211_STA_NONE &&
3254 new_state == IEEE80211_STA_NOTEXIST)) { 3466 new_state == IEEE80211_STA_NOTEXIST)) {
@@ -3260,7 +3472,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3260 arvif->vdev_id, sta->addr); 3472 arvif->vdev_id, sta->addr);
3261 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 3473 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
3262 if (ret) 3474 if (ret)
3263 ath10k_warn("Failed to delete peer %pM for vdev %d: %i\n", 3475 ath10k_warn("failed to delete peer %pM for vdev %d: %i\n",
3264 sta->addr, arvif->vdev_id, ret); 3476 sta->addr, arvif->vdev_id, ret);
3265 3477
3266 if (vif->type == NL80211_IFTYPE_STATION) 3478 if (vif->type == NL80211_IFTYPE_STATION)
@@ -3275,9 +3487,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3275 ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n", 3487 ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
3276 sta->addr); 3488 sta->addr);
3277 3489
3278 ret = ath10k_station_assoc(ar, arvif, sta); 3490 ret = ath10k_station_assoc(ar, arvif, sta, false);
3279 if (ret) 3491 if (ret)
3280 ath10k_warn("Failed to associate station %pM for vdev %i: %i\n", 3492 ath10k_warn("failed to associate station %pM for vdev %i: %i\n",
3281 sta->addr, arvif->vdev_id, ret); 3493 sta->addr, arvif->vdev_id, ret);
3282 } else if (old_state == IEEE80211_STA_ASSOC && 3494 } else if (old_state == IEEE80211_STA_ASSOC &&
3283 new_state == IEEE80211_STA_AUTH && 3495 new_state == IEEE80211_STA_AUTH &&
@@ -3291,7 +3503,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3291 3503
3292 ret = ath10k_station_disassoc(ar, arvif, sta); 3504 ret = ath10k_station_disassoc(ar, arvif, sta);
3293 if (ret) 3505 if (ret)
3294 ath10k_warn("Failed to disassociate station: %pM vdev %i ret %i\n", 3506 ath10k_warn("failed to disassociate station: %pM vdev %i: %i\n",
3295 sta->addr, arvif->vdev_id, ret); 3507 sta->addr, arvif->vdev_id, ret);
3296 } 3508 }
3297exit: 3509exit:
@@ -3339,7 +3551,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
3339 WMI_STA_PS_PARAM_UAPSD, 3551 WMI_STA_PS_PARAM_UAPSD,
3340 arvif->u.sta.uapsd); 3552 arvif->u.sta.uapsd);
3341 if (ret) { 3553 if (ret) {
3342 ath10k_warn("could not set uapsd params %d\n", ret); 3554 ath10k_warn("failed to set uapsd params: %d\n", ret);
3343 goto exit; 3555 goto exit;
3344 } 3556 }
3345 3557
@@ -3352,7 +3564,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
3352 WMI_STA_PS_PARAM_RX_WAKE_POLICY, 3564 WMI_STA_PS_PARAM_RX_WAKE_POLICY,
3353 value); 3565 value);
3354 if (ret) 3566 if (ret)
3355 ath10k_warn("could not set rx wake param %d\n", ret); 3567 ath10k_warn("failed to set rx wake param: %d\n", ret);
3356 3568
3357exit: 3569exit:
3358 return ret; 3570 return ret;
@@ -3402,13 +3614,13 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
3402 /* FIXME: FW accepts wmm params per hw, not per vif */ 3614 /* FIXME: FW accepts wmm params per hw, not per vif */
3403 ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params); 3615 ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
3404 if (ret) { 3616 if (ret) {
3405 ath10k_warn("could not set wmm params %d\n", ret); 3617 ath10k_warn("failed to set wmm params: %d\n", ret);
3406 goto exit; 3618 goto exit;
3407 } 3619 }
3408 3620
3409 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); 3621 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
3410 if (ret) 3622 if (ret)
3411 ath10k_warn("could not set sta uapsd %d\n", ret); 3623 ath10k_warn("failed to set sta uapsd: %d\n", ret);
3412 3624
3413exit: 3625exit:
3414 mutex_unlock(&ar->conf_mutex); 3626 mutex_unlock(&ar->conf_mutex);
@@ -3461,7 +3673,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
3461 3673
3462 ret = ath10k_start_scan(ar, &arg); 3674 ret = ath10k_start_scan(ar, &arg);
3463 if (ret) { 3675 if (ret) {
3464 ath10k_warn("could not start roc scan (%d)\n", ret); 3676 ath10k_warn("failed to start roc scan: %d\n", ret);
3465 spin_lock_bh(&ar->data_lock); 3677 spin_lock_bh(&ar->data_lock);
3466 ar->scan.in_progress = false; 3678 ar->scan.in_progress = false;
3467 spin_unlock_bh(&ar->data_lock); 3679 spin_unlock_bh(&ar->data_lock);
@@ -3470,7 +3682,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
3470 3682
3471 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ); 3683 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
3472 if (ret == 0) { 3684 if (ret == 0) {
3473 ath10k_warn("could not switch to channel for roc scan\n"); 3685 ath10k_warn("failed to switch to channel for roc scan\n");
3474 ath10k_abort_scan(ar); 3686 ath10k_abort_scan(ar);
3475 ret = -ETIMEDOUT; 3687 ret = -ETIMEDOUT;
3476 goto exit; 3688 goto exit;
@@ -3511,7 +3723,7 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3511 3723
3512 ret = ath10k_mac_set_rts(arvif, value); 3724 ret = ath10k_mac_set_rts(arvif, value);
3513 if (ret) { 3725 if (ret) {
3514 ath10k_warn("could not set rts threshold for vdev %d (%d)\n", 3726 ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
3515 arvif->vdev_id, ret); 3727 arvif->vdev_id, ret);
3516 break; 3728 break;
3517 } 3729 }
@@ -3534,7 +3746,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3534 3746
3535 ret = ath10k_mac_set_rts(arvif, value); 3747 ret = ath10k_mac_set_rts(arvif, value);
3536 if (ret) { 3748 if (ret) {
3537 ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n", 3749 ath10k_warn("failed to set fragmentation threshold for vdev %d: %d\n",
3538 arvif->vdev_id, ret); 3750 arvif->vdev_id, ret);
3539 break; 3751 break;
3540 } 3752 }
@@ -3544,7 +3756,8 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3544 return ret; 3756 return ret;
3545} 3757}
3546 3758
3547static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 3759static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3760 u32 queues, bool drop)
3548{ 3761{
3549 struct ath10k *ar = hw->priv; 3762 struct ath10k *ar = hw->priv;
3550 bool skip; 3763 bool skip;
@@ -3573,7 +3786,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
3573 }), ATH10K_FLUSH_TIMEOUT_HZ); 3786 }), ATH10K_FLUSH_TIMEOUT_HZ);
3574 3787
3575 if (ret <= 0 || skip) 3788 if (ret <= 0 || skip)
3576 ath10k_warn("tx not flushed (skip %i ar-state %i): %i\n", 3789 ath10k_warn("failed to flush transmit queue (skip %i ar-state %i): %i\n",
3577 skip, ar->state, ret); 3790 skip, ar->state, ret);
3578 3791
3579skip: 3792skip:
@@ -3608,7 +3821,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
3608 3821
3609 ret = ath10k_hif_suspend(ar); 3822 ret = ath10k_hif_suspend(ar);
3610 if (ret) { 3823 if (ret) {
3611 ath10k_warn("could not suspend hif (%d)\n", ret); 3824 ath10k_warn("failed to suspend hif: %d\n", ret);
3612 goto resume; 3825 goto resume;
3613 } 3826 }
3614 3827
@@ -3617,7 +3830,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
3617resume: 3830resume:
3618 ret = ath10k_wmi_pdev_resume_target(ar); 3831 ret = ath10k_wmi_pdev_resume_target(ar);
3619 if (ret) 3832 if (ret)
3620 ath10k_warn("could not resume target (%d)\n", ret); 3833 ath10k_warn("failed to resume target: %d\n", ret);
3621 3834
3622 ret = 1; 3835 ret = 1;
3623exit: 3836exit:
@@ -3634,14 +3847,14 @@ static int ath10k_resume(struct ieee80211_hw *hw)
3634 3847
3635 ret = ath10k_hif_resume(ar); 3848 ret = ath10k_hif_resume(ar);
3636 if (ret) { 3849 if (ret) {
3637 ath10k_warn("could not resume hif (%d)\n", ret); 3850 ath10k_warn("failed to resume hif: %d\n", ret);
3638 ret = 1; 3851 ret = 1;
3639 goto exit; 3852 goto exit;
3640 } 3853 }
3641 3854
3642 ret = ath10k_wmi_pdev_resume_target(ar); 3855 ret = ath10k_wmi_pdev_resume_target(ar);
3643 if (ret) { 3856 if (ret) {
3644 ath10k_warn("could not resume target (%d)\n", ret); 3857 ath10k_warn("failed to resume target: %d\n", ret);
3645 ret = 1; 3858 ret = 1;
3646 goto exit; 3859 goto exit;
3647 } 3860 }
@@ -3964,7 +4177,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
3964 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 4177 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
3965 vdev_param, fixed_rate); 4178 vdev_param, fixed_rate);
3966 if (ret) { 4179 if (ret) {
3967 ath10k_warn("Could not set fixed_rate param 0x%02x: %d\n", 4180 ath10k_warn("failed to set fixed rate param 0x%02x: %d\n",
3968 fixed_rate, ret); 4181 fixed_rate, ret);
3969 ret = -EINVAL; 4182 ret = -EINVAL;
3970 goto exit; 4183 goto exit;
@@ -3977,7 +4190,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
3977 vdev_param, fixed_nss); 4190 vdev_param, fixed_nss);
3978 4191
3979 if (ret) { 4192 if (ret) {
3980 ath10k_warn("Could not set fixed_nss param %d: %d\n", 4193 ath10k_warn("failed to set fixed nss param %d: %d\n",
3981 fixed_nss, ret); 4194 fixed_nss, ret);
3982 ret = -EINVAL; 4195 ret = -EINVAL;
3983 goto exit; 4196 goto exit;
@@ -3990,7 +4203,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
3990 force_sgi); 4203 force_sgi);
3991 4204
3992 if (ret) { 4205 if (ret) {
3993 ath10k_warn("Could not set sgi param %d: %d\n", 4206 ath10k_warn("failed to set sgi param %d: %d\n",
3994 force_sgi, ret); 4207 force_sgi, ret);
3995 ret = -EINVAL; 4208 ret = -EINVAL;
3996 goto exit; 4209 goto exit;
@@ -4026,7 +4239,7 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
4026 } 4239 }
4027 4240
4028 if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) { 4241 if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
4029 ath10k_warn("Could not force SGI usage for default rate settings\n"); 4242 ath10k_warn("failed to force SGI usage for default rate settings\n");
4030 return -EINVAL; 4243 return -EINVAL;
4031 } 4244 }
4032 4245
@@ -4034,14 +4247,6 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
4034 fixed_nss, force_sgi); 4247 fixed_nss, force_sgi);
4035} 4248}
4036 4249
4037static void ath10k_channel_switch_beacon(struct ieee80211_hw *hw,
4038 struct ieee80211_vif *vif,
4039 struct cfg80211_chan_def *chandef)
4040{
4041 /* there's no need to do anything here. vif->csa_active is enough */
4042 return;
4043}
4044
4045static void ath10k_sta_rc_update(struct ieee80211_hw *hw, 4250static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
4046 struct ieee80211_vif *vif, 4251 struct ieee80211_vif *vif,
4047 struct ieee80211_sta *sta, 4252 struct ieee80211_sta *sta,
@@ -4072,8 +4277,8 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
4072 bw = WMI_PEER_CHWIDTH_80MHZ; 4277 bw = WMI_PEER_CHWIDTH_80MHZ;
4073 break; 4278 break;
4074 case IEEE80211_STA_RX_BW_160: 4279 case IEEE80211_STA_RX_BW_160:
4075 ath10k_warn("mac sta rc update for %pM: invalid bw %d\n", 4280 ath10k_warn("Invalid bandwith %d in rc update for %pM\n",
4076 sta->addr, sta->bandwidth); 4281 sta->bandwidth, sta->addr);
4077 bw = WMI_PEER_CHWIDTH_20MHZ; 4282 bw = WMI_PEER_CHWIDTH_20MHZ;
4078 break; 4283 break;
4079 } 4284 }
@@ -4099,8 +4304,8 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
4099 smps = WMI_PEER_SMPS_DYNAMIC; 4304 smps = WMI_PEER_SMPS_DYNAMIC;
4100 break; 4305 break;
4101 case IEEE80211_SMPS_NUM_MODES: 4306 case IEEE80211_SMPS_NUM_MODES:
4102 ath10k_warn("mac sta rc update for %pM: invalid smps: %d\n", 4307 ath10k_warn("Invalid smps %d in sta rc update for %pM\n",
4103 sta->addr, sta->smps_mode); 4308 sta->smps_mode, sta->addr);
4104 smps = WMI_PEER_SMPS_PS_NONE; 4309 smps = WMI_PEER_SMPS_PS_NONE;
4105 break; 4310 break;
4106 } 4311 }
@@ -4108,15 +4313,6 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
4108 arsta->smps = smps; 4313 arsta->smps = smps;
4109 } 4314 }
4110 4315
4111 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
4112 /* FIXME: Not implemented. Probably the only way to do it would
4113 * be to re-assoc the peer. */
4114 changed &= ~IEEE80211_RC_SUPP_RATES_CHANGED;
4115 ath10k_dbg(ATH10K_DBG_MAC,
4116 "mac sta rc update for %pM: changing supported rates not implemented\n",
4117 sta->addr);
4118 }
4119
4120 arsta->changed |= changed; 4316 arsta->changed |= changed;
4121 4317
4122 spin_unlock_bh(&ar->data_lock); 4318 spin_unlock_bh(&ar->data_lock);
@@ -4154,10 +4350,11 @@ static const struct ieee80211_ops ath10k_ops = {
4154 .set_frag_threshold = ath10k_set_frag_threshold, 4350 .set_frag_threshold = ath10k_set_frag_threshold,
4155 .flush = ath10k_flush, 4351 .flush = ath10k_flush,
4156 .tx_last_beacon = ath10k_tx_last_beacon, 4352 .tx_last_beacon = ath10k_tx_last_beacon,
4353 .set_antenna = ath10k_set_antenna,
4354 .get_antenna = ath10k_get_antenna,
4157 .restart_complete = ath10k_restart_complete, 4355 .restart_complete = ath10k_restart_complete,
4158 .get_survey = ath10k_get_survey, 4356 .get_survey = ath10k_get_survey,
4159 .set_bitrate_mask = ath10k_set_bitrate_mask, 4357 .set_bitrate_mask = ath10k_set_bitrate_mask,
4160 .channel_switch_beacon = ath10k_channel_switch_beacon,
4161 .sta_rc_update = ath10k_sta_rc_update, 4358 .sta_rc_update = ath10k_sta_rc_update,
4162 .get_tsf = ath10k_get_tsf, 4359 .get_tsf = ath10k_get_tsf,
4163#ifdef CONFIG_PM 4360#ifdef CONFIG_PM
@@ -4503,6 +4700,18 @@ int ath10k_mac_register(struct ath10k *ar)
4503 BIT(NL80211_IFTYPE_ADHOC) | 4700 BIT(NL80211_IFTYPE_ADHOC) |
4504 BIT(NL80211_IFTYPE_AP); 4701 BIT(NL80211_IFTYPE_AP);
4505 4702
4703 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
4704 /* TODO: Have to deal with 2x2 chips if/when the come out. */
4705 ar->supp_tx_chainmask = TARGET_10X_TX_CHAIN_MASK;
4706 ar->supp_rx_chainmask = TARGET_10X_RX_CHAIN_MASK;
4707 } else {
4708 ar->supp_tx_chainmask = TARGET_TX_CHAIN_MASK;
4709 ar->supp_rx_chainmask = TARGET_RX_CHAIN_MASK;
4710 }
4711
4712 ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
4713 ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;
4714
4506 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features)) 4715 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
4507 ar->hw->wiphy->interface_modes |= 4716 ar->hw->wiphy->interface_modes |=
4508 BIT(NL80211_IFTYPE_P2P_CLIENT) | 4717 BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -4516,7 +4725,6 @@ int ath10k_mac_register(struct ath10k *ar)
4516 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 4725 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
4517 IEEE80211_HW_HAS_RATE_CONTROL | 4726 IEEE80211_HW_HAS_RATE_CONTROL |
4518 IEEE80211_HW_SUPPORTS_STATIC_SMPS | 4727 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
4519 IEEE80211_HW_WANT_MONITOR_VIF |
4520 IEEE80211_HW_AP_LINK_PS | 4728 IEEE80211_HW_AP_LINK_PS |
4521 IEEE80211_HW_SPECTRUM_MGMT; 4729 IEEE80211_HW_SPECTRUM_MGMT;
4522 4730
@@ -4570,19 +4778,19 @@ int ath10k_mac_register(struct ath10k *ar)
4570 NL80211_DFS_UNSET); 4778 NL80211_DFS_UNSET);
4571 4779
4572 if (!ar->dfs_detector) 4780 if (!ar->dfs_detector)
4573 ath10k_warn("dfs pattern detector init failed\n"); 4781 ath10k_warn("failed to initialise DFS pattern detector\n");
4574 } 4782 }
4575 4783
4576 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 4784 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
4577 ath10k_reg_notifier); 4785 ath10k_reg_notifier);
4578 if (ret) { 4786 if (ret) {
4579 ath10k_err("Regulatory initialization failed: %i\n", ret); 4787 ath10k_err("failed to initialise regulatory: %i\n", ret);
4580 goto err_free; 4788 goto err_free;
4581 } 4789 }
4582 4790
4583 ret = ieee80211_register_hw(ar->hw); 4791 ret = ieee80211_register_hw(ar->hw);
4584 if (ret) { 4792 if (ret) {
4585 ath10k_err("ieee80211 registration failed: %d\n", ret); 4793 ath10k_err("failed to register ieee80211: %d\n", ret);
4586 goto err_free; 4794 goto err_free;
4587 } 4795 }
4588 4796
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 9d242d801d9d..d0004d59c97e 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -39,15 +39,28 @@ enum ath10k_pci_irq_mode {
39 ATH10K_PCI_IRQ_MSI = 2, 39 ATH10K_PCI_IRQ_MSI = 2,
40}; 40};
41 41
42static unsigned int ath10k_target_ps; 42enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
45};
46
47static unsigned int ath10k_pci_target_ps;
43static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; 48static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
49static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
44 50
45module_param(ath10k_target_ps, uint, 0644); 51module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
46MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); 52MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
47 53
48module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); 54module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
49MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); 55MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
50 56
57module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
58MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
59
60/* how long wait to wait for target to initialise, in ms */
61#define ATH10K_PCI_TARGET_WAIT 3000
62#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
63
51#define QCA988X_2_0_DEVICE_ID (0x003c) 64#define QCA988X_2_0_DEVICE_ID (0x003c)
52 65
53static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { 66static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
@@ -346,9 +359,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
346 * 2) Buffer in DMA-able space 359 * 2) Buffer in DMA-able space
347 */ 360 */
348 orig_nbytes = nbytes; 361 orig_nbytes = nbytes;
349 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, 362 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
350 orig_nbytes, 363 orig_nbytes,
351 &ce_data_base); 364 &ce_data_base,
365 GFP_ATOMIC);
352 366
353 if (!data_buf) { 367 if (!data_buf) {
354 ret = -ENOMEM; 368 ret = -ENOMEM;
@@ -442,12 +456,12 @@ done:
442 __le32_to_cpu(((__le32 *)data_buf)[i]); 456 __le32_to_cpu(((__le32 *)data_buf)[i]);
443 } 457 }
444 } else 458 } else
445 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", 459 ath10k_warn("failed to read diag value at 0x%x: %d\n",
446 __func__, address); 460 address, ret);
447 461
448 if (data_buf) 462 if (data_buf)
449 pci_free_consistent(ar_pci->pdev, orig_nbytes, 463 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
450 data_buf, ce_data_base); 464 ce_data_base);
451 465
452 return ret; 466 return ret;
453} 467}
@@ -490,9 +504,10 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
490 * 2) Buffer in DMA-able space 504 * 2) Buffer in DMA-able space
491 */ 505 */
492 orig_nbytes = nbytes; 506 orig_nbytes = nbytes;
493 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, 507 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
494 orig_nbytes, 508 orig_nbytes,
495 &ce_data_base); 509 &ce_data_base,
510 GFP_ATOMIC);
496 if (!data_buf) { 511 if (!data_buf) {
497 ret = -ENOMEM; 512 ret = -ENOMEM;
498 goto done; 513 goto done;
@@ -588,13 +603,13 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
588 603
589done: 604done:
590 if (data_buf) { 605 if (data_buf) {
591 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf, 606 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
592 ce_data_base); 607 ce_data_base);
593 } 608 }
594 609
595 if (ret != 0) 610 if (ret != 0)
596 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__, 611 ath10k_warn("failed to write diag value at 0x%x: %d\n",
597 address); 612 address, ret);
598 613
599 return ret; 614 return ret;
600} 615}
@@ -747,17 +762,21 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
747 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; 762 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
748 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; 763 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
749 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; 764 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
750 unsigned int nentries_mask = src_ring->nentries_mask; 765 unsigned int nentries_mask;
751 unsigned int sw_index = src_ring->sw_index; 766 unsigned int sw_index;
752 unsigned int write_index = src_ring->write_index; 767 unsigned int write_index;
753 int err, i; 768 int err, i = 0;
754 769
755 spin_lock_bh(&ar_pci->ce_lock); 770 spin_lock_bh(&ar_pci->ce_lock);
756 771
772 nentries_mask = src_ring->nentries_mask;
773 sw_index = src_ring->sw_index;
774 write_index = src_ring->write_index;
775
757 if (unlikely(CE_RING_DELTA(nentries_mask, 776 if (unlikely(CE_RING_DELTA(nentries_mask,
758 write_index, sw_index - 1) < n_items)) { 777 write_index, sw_index - 1) < n_items)) {
759 err = -ENOBUFS; 778 err = -ENOBUFS;
760 goto unlock; 779 goto err;
761 } 780 }
762 781
763 for (i = 0; i < n_items - 1; i++) { 782 for (i = 0; i < n_items - 1; i++) {
@@ -774,7 +793,7 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
774 items[i].transfer_id, 793 items[i].transfer_id,
775 CE_SEND_FLAG_GATHER); 794 CE_SEND_FLAG_GATHER);
776 if (err) 795 if (err)
777 goto unlock; 796 goto err;
778 } 797 }
779 798
780 /* `i` is equal to `n_items -1` after for() */ 799 /* `i` is equal to `n_items -1` after for() */
@@ -792,10 +811,15 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
792 items[i].transfer_id, 811 items[i].transfer_id,
793 0); 812 0);
794 if (err) 813 if (err)
795 goto unlock; 814 goto err;
815
816 spin_unlock_bh(&ar_pci->ce_lock);
817 return 0;
818
819err:
820 for (; i > 0; i--)
821 __ath10k_ce_send_revert(ce_pipe);
796 822
797 err = 0;
798unlock:
799 spin_unlock_bh(&ar_pci->ce_lock); 823 spin_unlock_bh(&ar_pci->ce_lock);
800 return err; 824 return err;
801} 825}
@@ -803,6 +827,9 @@ unlock:
803static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 827static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
804{ 828{
805 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 829 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
830
831 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
832
806 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); 833 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
807} 834}
808 835
@@ -854,6 +881,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
854static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, 881static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
855 int force) 882 int force)
856{ 883{
884 ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
885
857 if (!force) { 886 if (!force) {
858 int resources; 887 int resources;
859 /* 888 /*
@@ -880,7 +909,7 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
880{ 909{
881 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 910 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
882 911
883 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); 912 ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
884 913
885 memcpy(&ar_pci->msg_callbacks_current, callbacks, 914 memcpy(&ar_pci->msg_callbacks_current, callbacks,
886 sizeof(ar_pci->msg_callbacks_current)); 915 sizeof(ar_pci->msg_callbacks_current));
@@ -938,6 +967,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
938{ 967{
939 int ret = 0; 968 int ret = 0;
940 969
970 ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
971
941 /* polling for received messages not supported */ 972 /* polling for received messages not supported */
942 *dl_is_polled = 0; 973 *dl_is_polled = 0;
943 974
@@ -997,6 +1028,8 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
997{ 1028{
998 int ul_is_polled, dl_is_polled; 1029 int ul_is_polled, dl_is_polled;
999 1030
1031 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
1032
1000 (void)ath10k_pci_hif_map_service_to_pipe(ar, 1033 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1001 ATH10K_HTC_SVC_ID_RSVD_CTRL, 1034 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1002 ul_pipe, 1035 ul_pipe,
@@ -1098,6 +1131,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
1098 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1131 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1099 int ret, ret_early; 1132 int ret, ret_early;
1100 1133
1134 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
1135
1101 ath10k_pci_free_early_irq(ar); 1136 ath10k_pci_free_early_irq(ar);
1102 ath10k_pci_kill_tasklet(ar); 1137 ath10k_pci_kill_tasklet(ar);
1103 1138
@@ -1233,18 +1268,10 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1233 1268
1234static void ath10k_pci_ce_deinit(struct ath10k *ar) 1269static void ath10k_pci_ce_deinit(struct ath10k *ar)
1235{ 1270{
1236 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1271 int i;
1237 struct ath10k_pci_pipe *pipe_info;
1238 int pipe_num;
1239 1272
1240 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 1273 for (i = 0; i < CE_COUNT; i++)
1241 pipe_info = &ar_pci->pipe_info[pipe_num]; 1274 ath10k_ce_deinit_pipe(ar, i);
1242 if (pipe_info->ce_hdl) {
1243 ath10k_ce_deinit(pipe_info->ce_hdl);
1244 pipe_info->ce_hdl = NULL;
1245 pipe_info->buf_sz = 0;
1246 }
1247 }
1248} 1275}
1249 1276
1250static void ath10k_pci_hif_stop(struct ath10k *ar) 1277static void ath10k_pci_hif_stop(struct ath10k *ar)
@@ -1252,7 +1279,10 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
1252 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1279 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1253 int ret; 1280 int ret;
1254 1281
1255 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); 1282 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
1283
1284 if (WARN_ON(!ar_pci->started))
1285 return;
1256 1286
1257 ret = ath10k_ce_disable_interrupts(ar); 1287 ret = ath10k_ce_disable_interrupts(ar);
1258 if (ret) 1288 if (ret)
@@ -1697,30 +1727,49 @@ static int ath10k_pci_init_config(struct ath10k *ar)
1697 return 0; 1727 return 0;
1698} 1728}
1699 1729
1730static int ath10k_pci_alloc_ce(struct ath10k *ar)
1731{
1732 int i, ret;
1733
1734 for (i = 0; i < CE_COUNT; i++) {
1735 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1736 if (ret) {
1737 ath10k_err("failed to allocate copy engine pipe %d: %d\n",
1738 i, ret);
1739 return ret;
1740 }
1741 }
1700 1742
1743 return 0;
1744}
1745
1746static void ath10k_pci_free_ce(struct ath10k *ar)
1747{
1748 int i;
1749
1750 for (i = 0; i < CE_COUNT; i++)
1751 ath10k_ce_free_pipe(ar, i);
1752}
1701 1753
1702static int ath10k_pci_ce_init(struct ath10k *ar) 1754static int ath10k_pci_ce_init(struct ath10k *ar)
1703{ 1755{
1704 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1756 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1705 struct ath10k_pci_pipe *pipe_info; 1757 struct ath10k_pci_pipe *pipe_info;
1706 const struct ce_attr *attr; 1758 const struct ce_attr *attr;
1707 int pipe_num; 1759 int pipe_num, ret;
1708 1760
1709 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 1761 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1710 pipe_info = &ar_pci->pipe_info[pipe_num]; 1762 pipe_info = &ar_pci->pipe_info[pipe_num];
1763 pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
1711 pipe_info->pipe_num = pipe_num; 1764 pipe_info->pipe_num = pipe_num;
1712 pipe_info->hif_ce_state = ar; 1765 pipe_info->hif_ce_state = ar;
1713 attr = &host_ce_config_wlan[pipe_num]; 1766 attr = &host_ce_config_wlan[pipe_num];
1714 1767
1715 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr); 1768 ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
1716 if (pipe_info->ce_hdl == NULL) { 1769 if (ret) {
1717 ath10k_err("failed to initialize CE for pipe: %d\n", 1770 ath10k_err("failed to initialize copy engine pipe %d: %d\n",
1718 pipe_num); 1771 pipe_num, ret);
1719 1772 return ret;
1720 /* It is safe to call it here. It checks if ce_hdl is
1721 * valid for each pipe */
1722 ath10k_pci_ce_deinit(ar);
1723 return -1;
1724 } 1773 }
1725 1774
1726 if (pipe_num == CE_COUNT - 1) { 1775 if (pipe_num == CE_COUNT - 1) {
@@ -1741,16 +1790,15 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
1741static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) 1790static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1742{ 1791{
1743 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1792 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1744 u32 fw_indicator_address, fw_indicator; 1793 u32 fw_indicator;
1745 1794
1746 ath10k_pci_wake(ar); 1795 ath10k_pci_wake(ar);
1747 1796
1748 fw_indicator_address = ar_pci->fw_indicator_address; 1797 fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1749 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1750 1798
1751 if (fw_indicator & FW_IND_EVENT_PENDING) { 1799 if (fw_indicator & FW_IND_EVENT_PENDING) {
1752 /* ACK: clear Target-side pending event */ 1800 /* ACK: clear Target-side pending event */
1753 ath10k_pci_write32(ar, fw_indicator_address, 1801 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
1754 fw_indicator & ~FW_IND_EVENT_PENDING); 1802 fw_indicator & ~FW_IND_EVENT_PENDING);
1755 1803
1756 if (ar_pci->started) { 1804 if (ar_pci->started) {
@@ -1767,13 +1815,32 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1767 ath10k_pci_sleep(ar); 1815 ath10k_pci_sleep(ar);
1768} 1816}
1769 1817
1818/* this function effectively clears target memory controller assert line */
1819static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1820{
1821 u32 val;
1822
1823 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1824 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1825 val | SOC_RESET_CONTROL_SI0_RST_MASK);
1826 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1827
1828 msleep(10);
1829
1830 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1831 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1832 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1833 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1834
1835 msleep(10);
1836}
1837
1770static int ath10k_pci_warm_reset(struct ath10k *ar) 1838static int ath10k_pci_warm_reset(struct ath10k *ar)
1771{ 1839{
1772 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1773 int ret = 0; 1840 int ret = 0;
1774 u32 val; 1841 u32 val;
1775 1842
1776 ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n"); 1843 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
1777 1844
1778 ret = ath10k_do_pci_wake(ar); 1845 ret = ath10k_do_pci_wake(ar);
1779 if (ret) { 1846 if (ret) {
@@ -1801,7 +1868,7 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
1801 msleep(100); 1868 msleep(100);
1802 1869
1803 /* clear fw indicator */ 1870 /* clear fw indicator */
1804 ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0); 1871 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1805 1872
1806 /* clear target LF timer interrupts */ 1873 /* clear target LF timer interrupts */
1807 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 1874 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
@@ -1826,6 +1893,8 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
1826 SOC_RESET_CONTROL_ADDRESS); 1893 SOC_RESET_CONTROL_ADDRESS);
1827 msleep(10); 1894 msleep(10);
1828 1895
1896 ath10k_pci_warm_reset_si0(ar);
1897
1829 /* debug */ 1898 /* debug */
1830 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1899 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1831 PCIE_INTR_CAUSE_ADDRESS); 1900 PCIE_INTR_CAUSE_ADDRESS);
@@ -1934,7 +2003,9 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1934 irq_mode = "legacy"; 2003 irq_mode = "legacy";
1935 2004
1936 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) 2005 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
1937 ath10k_info("pci irq %s\n", irq_mode); 2006 ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
2007 irq_mode, ath10k_pci_irq_mode,
2008 ath10k_pci_reset_mode);
1938 2009
1939 return 0; 2010 return 0;
1940 2011
@@ -1952,23 +2023,52 @@ err:
1952 return ret; 2023 return ret;
1953} 2024}
1954 2025
2026static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
2027{
2028 int i, ret;
2029
2030 /*
2031 * Sometime warm reset succeeds after retries.
2032 *
2033 * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
2034 * at first try.
2035 */
2036 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2037 ret = __ath10k_pci_hif_power_up(ar, false);
2038 if (ret == 0)
2039 break;
2040
2041 ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n",
2042 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
2043 }
2044
2045 return ret;
2046}
2047
1955static int ath10k_pci_hif_power_up(struct ath10k *ar) 2048static int ath10k_pci_hif_power_up(struct ath10k *ar)
1956{ 2049{
1957 int ret; 2050 int ret;
1958 2051
2052 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
2053
1959 /* 2054 /*
1960 * Hardware CUS232 version 2 has some issues with cold reset and the 2055 * Hardware CUS232 version 2 has some issues with cold reset and the
1961 * preferred (and safer) way to perform a device reset is through a 2056 * preferred (and safer) way to perform a device reset is through a
1962 * warm reset. 2057 * warm reset.
1963 * 2058 *
1964 * Warm reset doesn't always work though (notably after a firmware 2059 * Warm reset doesn't always work though so fall back to cold reset may
1965 * crash) so fall back to cold reset if necessary. 2060 * be necessary.
1966 */ 2061 */
1967 ret = __ath10k_pci_hif_power_up(ar, false); 2062 ret = ath10k_pci_hif_power_up_warm(ar);
1968 if (ret) { 2063 if (ret) {
1969 ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n", 2064 ath10k_warn("failed to power up target using warm reset: %d\n",
1970 ret); 2065 ret);
1971 2066
2067 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
2068 return ret;
2069
2070 ath10k_warn("trying cold reset\n");
2071
1972 ret = __ath10k_pci_hif_power_up(ar, true); 2072 ret = __ath10k_pci_hif_power_up(ar, true);
1973 if (ret) { 2073 if (ret) {
1974 ath10k_err("failed to power up target using cold reset too (%d)\n", 2074 ath10k_err("failed to power up target using cold reset too (%d)\n",
@@ -1984,12 +2084,14 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
1984{ 2084{
1985 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2085 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1986 2086
2087 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
2088
1987 ath10k_pci_free_early_irq(ar); 2089 ath10k_pci_free_early_irq(ar);
1988 ath10k_pci_kill_tasklet(ar); 2090 ath10k_pci_kill_tasklet(ar);
1989 ath10k_pci_deinit_irq(ar); 2091 ath10k_pci_deinit_irq(ar);
2092 ath10k_pci_ce_deinit(ar);
1990 ath10k_pci_warm_reset(ar); 2093 ath10k_pci_warm_reset(ar);
1991 2094
1992 ath10k_pci_ce_deinit(ar);
1993 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) 2095 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1994 ath10k_do_pci_sleep(ar); 2096 ath10k_do_pci_sleep(ar);
1995} 2097}
@@ -2137,7 +2239,6 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2137static void ath10k_pci_early_irq_tasklet(unsigned long data) 2239static void ath10k_pci_early_irq_tasklet(unsigned long data)
2138{ 2240{
2139 struct ath10k *ar = (struct ath10k *)data; 2241 struct ath10k *ar = (struct ath10k *)data;
2140 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2141 u32 fw_ind; 2242 u32 fw_ind;
2142 int ret; 2243 int ret;
2143 2244
@@ -2148,14 +2249,11 @@ static void ath10k_pci_early_irq_tasklet(unsigned long data)
2148 return; 2249 return;
2149 } 2250 }
2150 2251
2151 fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address); 2252 fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2152 if (fw_ind & FW_IND_EVENT_PENDING) { 2253 if (fw_ind & FW_IND_EVENT_PENDING) {
2153 ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 2254 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2154 fw_ind & ~FW_IND_EVENT_PENDING); 2255 fw_ind & ~FW_IND_EVENT_PENDING);
2155 2256 ath10k_pci_hif_dump_area(ar);
2156 /* Some structures are unavailable during early boot or at
2157 * driver teardown so just print that the device has crashed. */
2158 ath10k_warn("device crashed - no diagnostics available\n");
2159 } 2257 }
2160 2258
2161 ath10k_pci_sleep(ar); 2259 ath10k_pci_sleep(ar);
@@ -2385,33 +2483,69 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
2385static int ath10k_pci_wait_for_target_init(struct ath10k *ar) 2483static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2386{ 2484{
2387 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2485 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2388 int wait_limit = 300; /* 3 sec */ 2486 unsigned long timeout;
2389 int ret; 2487 int ret;
2488 u32 val;
2489
2490 ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2390 2491
2391 ret = ath10k_pci_wake(ar); 2492 ret = ath10k_pci_wake(ar);
2392 if (ret) { 2493 if (ret) {
2393 ath10k_err("failed to wake up target: %d\n", ret); 2494 ath10k_err("failed to wake up target for init: %d\n", ret);
2394 return ret; 2495 return ret;
2395 } 2496 }
2396 2497
2397 while (wait_limit-- && 2498 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2398 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) & 2499
2399 FW_IND_INITIALIZED)) { 2500 do {
2501 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2502
2503 ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
2504
2505 /* target should never return this */
2506 if (val == 0xffffffff)
2507 continue;
2508
2509 /* the device has crashed so don't bother trying anymore */
2510 if (val & FW_IND_EVENT_PENDING)
2511 break;
2512
2513 if (val & FW_IND_INITIALIZED)
2514 break;
2515
2400 if (ar_pci->num_msi_intrs == 0) 2516 if (ar_pci->num_msi_intrs == 0)
2401 /* Fix potential race by repeating CORE_BASE writes */ 2517 /* Fix potential race by repeating CORE_BASE writes */
2402 iowrite32(PCIE_INTR_FIRMWARE_MASK | 2518 ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
2403 PCIE_INTR_CE_MASK_ALL, 2519 PCIE_INTR_FIRMWARE_MASK |
2404 ar_pci->mem + (SOC_CORE_BASE_ADDRESS | 2520 PCIE_INTR_CE_MASK_ALL);
2405 PCIE_INTR_ENABLE_ADDRESS)); 2521
2406 mdelay(10); 2522 mdelay(10);
2407 } 2523 } while (time_before(jiffies, timeout));
2408 2524
2409 if (wait_limit < 0) { 2525 if (val == 0xffffffff) {
2410 ath10k_err("target stalled\n"); 2526 ath10k_err("failed to read device register, device is gone\n");
2411 ret = -EIO; 2527 ret = -EIO;
2412 goto out; 2528 goto out;
2413 } 2529 }
2414 2530
2531 if (val & FW_IND_EVENT_PENDING) {
2532 ath10k_warn("device has crashed during init\n");
2533 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2534 val & ~FW_IND_EVENT_PENDING);
2535 ath10k_pci_hif_dump_area(ar);
2536 ret = -ECOMM;
2537 goto out;
2538 }
2539
2540 if (!(val & FW_IND_INITIALIZED)) {
2541 ath10k_err("failed to receive initialized event from target: %08x\n",
2542 val);
2543 ret = -ETIMEDOUT;
2544 goto out;
2545 }
2546
2547 ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
2548
2415out: 2549out:
2416 ath10k_pci_sleep(ar); 2550 ath10k_pci_sleep(ar);
2417 return ret; 2551 return ret;
@@ -2422,6 +2556,8 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
2422 int i, ret; 2556 int i, ret;
2423 u32 val; 2557 u32 val;
2424 2558
2559 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
2560
2425 ret = ath10k_do_pci_wake(ar); 2561 ret = ath10k_do_pci_wake(ar);
2426 if (ret) { 2562 if (ret) {
2427 ath10k_err("failed to wake up target: %d\n", 2563 ath10k_err("failed to wake up target: %d\n",
@@ -2453,6 +2589,9 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
2453 } 2589 }
2454 2590
2455 ath10k_do_pci_sleep(ar); 2591 ath10k_do_pci_sleep(ar);
2592
2593 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
2594
2456 return 0; 2595 return 0;
2457} 2596}
2458 2597
@@ -2484,7 +2623,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2484 struct ath10k_pci *ar_pci; 2623 struct ath10k_pci *ar_pci;
2485 u32 lcr_val, chip_id; 2624 u32 lcr_val, chip_id;
2486 2625
2487 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); 2626 ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
2488 2627
2489 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL); 2628 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2490 if (ar_pci == NULL) 2629 if (ar_pci == NULL)
@@ -2503,7 +2642,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2503 goto err_ar_pci; 2642 goto err_ar_pci;
2504 } 2643 }
2505 2644
2506 if (ath10k_target_ps) 2645 if (ath10k_pci_target_ps)
2507 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features); 2646 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2508 2647
2509 ath10k_pci_dump_features(ar_pci); 2648 ath10k_pci_dump_features(ar_pci);
@@ -2516,23 +2655,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2516 } 2655 }
2517 2656
2518 ar_pci->ar = ar; 2657 ar_pci->ar = ar;
2519 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2520 atomic_set(&ar_pci->keep_awake_count, 0); 2658 atomic_set(&ar_pci->keep_awake_count, 0);
2521 2659
2522 pci_set_drvdata(pdev, ar); 2660 pci_set_drvdata(pdev, ar);
2523 2661
2524 /*
2525 * Without any knowledge of the Host, the Target may have been reset or
2526 * power cycled and its Config Space may no longer reflect the PCI
2527 * address space that was assigned earlier by the PCI infrastructure.
2528 * Refresh it now.
2529 */
2530 ret = pci_assign_resource(pdev, BAR_NUM);
2531 if (ret) {
2532 ath10k_err("failed to assign PCI space: %d\n", ret);
2533 goto err_ar;
2534 }
2535
2536 ret = pci_enable_device(pdev); 2662 ret = pci_enable_device(pdev);
2537 if (ret) { 2663 if (ret) {
2538 ath10k_err("failed to enable PCI device: %d\n", ret); 2664 ath10k_err("failed to enable PCI device: %d\n", ret);
@@ -2594,16 +2720,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2594 2720
2595 ath10k_do_pci_sleep(ar); 2721 ath10k_do_pci_sleep(ar);
2596 2722
2723 ret = ath10k_pci_alloc_ce(ar);
2724 if (ret) {
2725 ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
2726 goto err_iomap;
2727 }
2728
2597 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); 2729 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2598 2730
2599 ret = ath10k_core_register(ar, chip_id); 2731 ret = ath10k_core_register(ar, chip_id);
2600 if (ret) { 2732 if (ret) {
2601 ath10k_err("failed to register driver core: %d\n", ret); 2733 ath10k_err("failed to register driver core: %d\n", ret);
2602 goto err_iomap; 2734 goto err_free_ce;
2603 } 2735 }
2604 2736
2605 return 0; 2737 return 0;
2606 2738
2739err_free_ce:
2740 ath10k_pci_free_ce(ar);
2607err_iomap: 2741err_iomap:
2608 pci_iounmap(pdev, mem); 2742 pci_iounmap(pdev, mem);
2609err_master: 2743err_master:
@@ -2626,7 +2760,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
2626 struct ath10k *ar = pci_get_drvdata(pdev); 2760 struct ath10k *ar = pci_get_drvdata(pdev);
2627 struct ath10k_pci *ar_pci; 2761 struct ath10k_pci *ar_pci;
2628 2762
2629 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); 2763 ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
2630 2764
2631 if (!ar) 2765 if (!ar)
2632 return; 2766 return;
@@ -2636,9 +2770,8 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
2636 if (!ar_pci) 2770 if (!ar_pci)
2637 return; 2771 return;
2638 2772
2639 tasklet_kill(&ar_pci->msi_fw_err);
2640
2641 ath10k_core_unregister(ar); 2773 ath10k_core_unregister(ar);
2774 ath10k_pci_free_ce(ar);
2642 2775
2643 pci_iounmap(pdev, ar_pci->mem); 2776 pci_iounmap(pdev, ar_pci->mem);
2644 pci_release_region(pdev, BAR_NUM); 2777 pci_release_region(pdev, BAR_NUM);
@@ -2680,6 +2813,5 @@ module_exit(ath10k_pci_exit);
2680MODULE_AUTHOR("Qualcomm Atheros"); 2813MODULE_AUTHOR("Qualcomm Atheros");
2681MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); 2814MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2682MODULE_LICENSE("Dual BSD/GPL"); 2815MODULE_LICENSE("Dual BSD/GPL");
2683MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); 2816MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
2684MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2685MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); 2817MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index b43fdb4f7319..dfdebb4157aa 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -189,9 +189,6 @@ struct ath10k_pci {
189 189
190 struct ath10k_hif_cb msg_callbacks_current; 190 struct ath10k_hif_cb msg_callbacks_current;
191 191
192 /* Target address used to signal a pending firmware event */
193 u32 fw_indicator_address;
194
195 /* Copy Engine used for Diagnostic Accesses */ 192 /* Copy Engine used for Diagnostic Accesses */
196 struct ath10k_ce_pipe *ce_diag; 193 struct ath10k_ce_pipe *ce_diag;
197 194
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 0541dd939ce9..82669a77e553 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -100,189 +100,6 @@ exit:
100 wake_up(&htt->empty_tx_wq); 100 wake_up(&htt->empty_tx_wq);
101} 101}
102 102
103static const u8 rx_legacy_rate_idx[] = {
104 3, /* 0x00 - 11Mbps */
105 2, /* 0x01 - 5.5Mbps */
106 1, /* 0x02 - 2Mbps */
107 0, /* 0x03 - 1Mbps */
108 3, /* 0x04 - 11Mbps */
109 2, /* 0x05 - 5.5Mbps */
110 1, /* 0x06 - 2Mbps */
111 0, /* 0x07 - 1Mbps */
112 10, /* 0x08 - 48Mbps */
113 8, /* 0x09 - 24Mbps */
114 6, /* 0x0A - 12Mbps */
115 4, /* 0x0B - 6Mbps */
116 11, /* 0x0C - 54Mbps */
117 9, /* 0x0D - 36Mbps */
118 7, /* 0x0E - 18Mbps */
119 5, /* 0x0F - 9Mbps */
120};
121
122static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
123 enum ieee80211_band band,
124 struct ieee80211_rx_status *status)
125{
126 u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
127 u8 info0 = info->rate.info0;
128 u32 info1 = info->rate.info1;
129 u32 info2 = info->rate.info2;
130 u8 preamble = 0;
131
132 /* Check if valid fields */
133 if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
134 return;
135
136 preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
137
138 switch (preamble) {
139 case HTT_RX_LEGACY:
140 cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
141 rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
142 rate_idx = 0;
143
144 if (rate < 0x08 || rate > 0x0F)
145 break;
146
147 switch (band) {
148 case IEEE80211_BAND_2GHZ:
149 if (cck)
150 rate &= ~BIT(3);
151 rate_idx = rx_legacy_rate_idx[rate];
152 break;
153 case IEEE80211_BAND_5GHZ:
154 rate_idx = rx_legacy_rate_idx[rate];
155 /* We are using same rate table registering
156 HW - ath10k_rates[]. In case of 5GHz skip
157 CCK rates, so -4 here */
158 rate_idx -= 4;
159 break;
160 default:
161 break;
162 }
163
164 status->rate_idx = rate_idx;
165 break;
166 case HTT_RX_HT:
167 case HTT_RX_HT_WITH_TXBF:
168 /* HT-SIG - Table 20-11 in info1 and info2 */
169 mcs = info1 & 0x1F;
170 nss = mcs >> 3;
171 bw = (info1 >> 7) & 1;
172 sgi = (info2 >> 7) & 1;
173
174 status->rate_idx = mcs;
175 status->flag |= RX_FLAG_HT;
176 if (sgi)
177 status->flag |= RX_FLAG_SHORT_GI;
178 if (bw)
179 status->flag |= RX_FLAG_40MHZ;
180 break;
181 case HTT_RX_VHT:
182 case HTT_RX_VHT_WITH_TXBF:
183 /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
184 TODO check this */
185 mcs = (info2 >> 4) & 0x0F;
186 nss = ((info1 >> 10) & 0x07) + 1;
187 bw = info1 & 3;
188 sgi = info2 & 1;
189
190 status->rate_idx = mcs;
191 status->vht_nss = nss;
192
193 if (sgi)
194 status->flag |= RX_FLAG_SHORT_GI;
195
196 switch (bw) {
197 /* 20MHZ */
198 case 0:
199 break;
200 /* 40MHZ */
201 case 1:
202 status->flag |= RX_FLAG_40MHZ;
203 break;
204 /* 80MHZ */
205 case 2:
206 status->vht_flag |= RX_VHT_FLAG_80MHZ;
207 }
208
209 status->flag |= RX_FLAG_VHT;
210 break;
211 default:
212 break;
213 }
214}
215
216void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
217{
218 struct ieee80211_rx_status *status;
219 struct ieee80211_channel *ch;
220 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
221
222 status = IEEE80211_SKB_RXCB(info->skb);
223 memset(status, 0, sizeof(*status));
224
225 if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
226 status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
227 RX_FLAG_MMIC_STRIPPED;
228 hdr->frame_control = __cpu_to_le16(
229 __le16_to_cpu(hdr->frame_control) &
230 ~IEEE80211_FCTL_PROTECTED);
231 }
232
233 if (info->mic_err)
234 status->flag |= RX_FLAG_MMIC_ERROR;
235
236 if (info->fcs_err)
237 status->flag |= RX_FLAG_FAILED_FCS_CRC;
238
239 if (info->amsdu_more)
240 status->flag |= RX_FLAG_AMSDU_MORE;
241
242 status->signal = info->signal;
243
244 spin_lock_bh(&ar->data_lock);
245 ch = ar->scan_channel;
246 if (!ch)
247 ch = ar->rx_channel;
248 spin_unlock_bh(&ar->data_lock);
249
250 if (!ch) {
251 ath10k_warn("no channel configured; ignoring frame!\n");
252 dev_kfree_skb_any(info->skb);
253 return;
254 }
255
256 process_rx_rates(ar, info, ch->band, status);
257 status->band = ch->band;
258 status->freq = ch->center_freq;
259
260 if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
261 /* TSF available only in 32-bit */
262 status->mactime = info->tsf & 0xffffffff;
263 status->flag |= RX_FLAG_MACTIME_END;
264 }
265
266 ath10k_dbg(ATH10K_DBG_DATA,
267 "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
268 info->skb,
269 info->skb->len,
270 status->flag == 0 ? "legacy" : "",
271 status->flag & RX_FLAG_HT ? "ht" : "",
272 status->flag & RX_FLAG_VHT ? "vht" : "",
273 status->flag & RX_FLAG_40MHZ ? "40" : "",
274 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
275 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
276 status->rate_idx,
277 status->vht_nss,
278 status->freq,
279 status->band, status->flag, info->fcs_err);
280 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
281 info->skb->data, info->skb->len);
282
283 ieee80211_rx(ar->hw, info->skb);
284}
285
286struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, 103struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
287 const u8 *addr) 104 const u8 *addr)
288{ 105{
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index 356dc9c04c9e..aee3e20058f8 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -21,7 +21,6 @@
21 21
22void ath10k_txrx_tx_unref(struct ath10k_htt *htt, 22void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
23 const struct htt_tx_done *tx_done); 23 const struct htt_tx_done *tx_done);
24void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
25 24
26struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, 25struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
27 const u8 *addr); 26 const u8 *addr);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index cb1f7b5bcf4c..4b7782a529ac 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -639,6 +639,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
639 struct sk_buff *wmi_skb; 639 struct sk_buff *wmi_skb;
640 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 640 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
641 int len; 641 int len;
642 u32 buf_len = skb->len;
642 u16 fc; 643 u16 fc;
643 644
644 hdr = (struct ieee80211_hdr *)skb->data; 645 hdr = (struct ieee80211_hdr *)skb->data;
@@ -648,6 +649,15 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
648 return -EINVAL; 649 return -EINVAL;
649 650
650 len = sizeof(cmd->hdr) + skb->len; 651 len = sizeof(cmd->hdr) + skb->len;
652
653 if ((ieee80211_is_action(hdr->frame_control) ||
654 ieee80211_is_deauth(hdr->frame_control) ||
655 ieee80211_is_disassoc(hdr->frame_control)) &&
656 ieee80211_has_protected(hdr->frame_control)) {
657 len += IEEE80211_CCMP_MIC_LEN;
658 buf_len += IEEE80211_CCMP_MIC_LEN;
659 }
660
651 len = round_up(len, 4); 661 len = round_up(len, 4);
652 662
653 wmi_skb = ath10k_wmi_alloc_skb(len); 663 wmi_skb = ath10k_wmi_alloc_skb(len);
@@ -659,7 +669,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
659 cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id); 669 cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
660 cmd->hdr.tx_rate = 0; 670 cmd->hdr.tx_rate = 0;
661 cmd->hdr.tx_power = 0; 671 cmd->hdr.tx_power = 0;
662 cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len)); 672 cmd->hdr.buf_len = __cpu_to_le32(buf_len);
663 673
664 memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN); 674 memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
665 memcpy(cmd->buf, skb->data, skb->len); 675 memcpy(cmd->buf, skb->data, skb->len);
@@ -957,10 +967,16 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
957 * frames with Protected Bit set. */ 967 * frames with Protected Bit set. */
958 if (ieee80211_has_protected(hdr->frame_control) && 968 if (ieee80211_has_protected(hdr->frame_control) &&
959 !ieee80211_is_auth(hdr->frame_control)) { 969 !ieee80211_is_auth(hdr->frame_control)) {
960 status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | 970 status->flag |= RX_FLAG_DECRYPTED;
961 RX_FLAG_MMIC_STRIPPED; 971
962 hdr->frame_control = __cpu_to_le16(fc & 972 if (!ieee80211_is_action(hdr->frame_control) &&
973 !ieee80211_is_deauth(hdr->frame_control) &&
974 !ieee80211_is_disassoc(hdr->frame_control)) {
975 status->flag |= RX_FLAG_IV_STRIPPED |
976 RX_FLAG_MMIC_STRIPPED;
977 hdr->frame_control = __cpu_to_le16(fc &
963 ~IEEE80211_FCTL_PROTECTED); 978 ~IEEE80211_FCTL_PROTECTED);
979 }
964 } 980 }
965 981
966 ath10k_dbg(ATH10K_DBG_MGMT, 982 ath10k_dbg(ATH10K_DBG_MGMT,
@@ -1362,13 +1378,10 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1362 struct sk_buff *bcn; 1378 struct sk_buff *bcn;
1363 int ret, vdev_id = 0; 1379 int ret, vdev_id = 0;
1364 1380
1365 ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
1366
1367 ev = (struct wmi_host_swba_event *)skb->data; 1381 ev = (struct wmi_host_swba_event *)skb->data;
1368 map = __le32_to_cpu(ev->vdev_map); 1382 map = __le32_to_cpu(ev->vdev_map);
1369 1383
1370 ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n" 1384 ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
1371 "-vdev map 0x%x\n",
1372 ev->vdev_map); 1385 ev->vdev_map);
1373 1386
1374 for (; map; map >>= 1, vdev_id++) { 1387 for (; map; map >>= 1, vdev_id++) {
@@ -1385,12 +1398,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1385 bcn_info = &ev->bcn_info[i]; 1398 bcn_info = &ev->bcn_info[i];
1386 1399
1387 ath10k_dbg(ATH10K_DBG_MGMT, 1400 ath10k_dbg(ATH10K_DBG_MGMT,
1388 "-bcn_info[%d]:\n" 1401 "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
1389 "--tim_len %d\n"
1390 "--tim_mcast %d\n"
1391 "--tim_changed %d\n"
1392 "--tim_num_ps_pending %d\n"
1393 "--tim_bitmap 0x%08x%08x%08x%08x\n",
1394 i, 1402 i,
1395 __le32_to_cpu(bcn_info->tim_info.tim_len), 1403 __le32_to_cpu(bcn_info->tim_info.tim_len),
1396 __le32_to_cpu(bcn_info->tim_info.tim_mcast), 1404 __le32_to_cpu(bcn_info->tim_info.tim_mcast),
@@ -1439,6 +1447,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1439 ATH10K_SKB_CB(arvif->beacon)->paddr, 1447 ATH10K_SKB_CB(arvif->beacon)->paddr,
1440 arvif->beacon->len, DMA_TO_DEVICE); 1448 arvif->beacon->len, DMA_TO_DEVICE);
1441 dev_kfree_skb_any(arvif->beacon); 1449 dev_kfree_skb_any(arvif->beacon);
1450 arvif->beacon = NULL;
1442 } 1451 }
1443 1452
1444 ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev, 1453 ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
@@ -1448,6 +1457,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1448 ATH10K_SKB_CB(bcn)->paddr); 1457 ATH10K_SKB_CB(bcn)->paddr);
1449 if (ret) { 1458 if (ret) {
1450 ath10k_warn("failed to map beacon: %d\n", ret); 1459 ath10k_warn("failed to map beacon: %d\n", ret);
1460 dev_kfree_skb_any(bcn);
1451 goto skip; 1461 goto skip;
1452 } 1462 }
1453 1463
@@ -2365,7 +2375,7 @@ void ath10k_wmi_detach(struct ath10k *ar)
2365 ar->wmi.num_mem_chunks = 0; 2375 ar->wmi.num_mem_chunks = 0;
2366} 2376}
2367 2377
2368int ath10k_wmi_connect_htc_service(struct ath10k *ar) 2378int ath10k_wmi_connect(struct ath10k *ar)
2369{ 2379{
2370 int status; 2380 int status;
2371 struct ath10k_htc_svc_conn_req conn_req; 2381 struct ath10k_htc_svc_conn_req conn_req;
@@ -2393,8 +2403,9 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
2393 return 0; 2403 return 0;
2394} 2404}
2395 2405
2396int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, 2406static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
2397 u16 rd5g, u16 ctl2g, u16 ctl5g) 2407 u16 rd2g, u16 rd5g, u16 ctl2g,
2408 u16 ctl5g)
2398{ 2409{
2399 struct wmi_pdev_set_regdomain_cmd *cmd; 2410 struct wmi_pdev_set_regdomain_cmd *cmd;
2400 struct sk_buff *skb; 2411 struct sk_buff *skb;
@@ -2418,6 +2429,46 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
2418 ar->wmi.cmd->pdev_set_regdomain_cmdid); 2429 ar->wmi.cmd->pdev_set_regdomain_cmdid);
2419} 2430}
2420 2431
2432static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
2433 u16 rd2g, u16 rd5g,
2434 u16 ctl2g, u16 ctl5g,
2435 enum wmi_dfs_region dfs_reg)
2436{
2437 struct wmi_pdev_set_regdomain_cmd_10x *cmd;
2438 struct sk_buff *skb;
2439
2440 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2441 if (!skb)
2442 return -ENOMEM;
2443
2444 cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
2445 cmd->reg_domain = __cpu_to_le32(rd);
2446 cmd->reg_domain_2G = __cpu_to_le32(rd2g);
2447 cmd->reg_domain_5G = __cpu_to_le32(rd5g);
2448 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
2449 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
2450 cmd->dfs_domain = __cpu_to_le32(dfs_reg);
2451
2452 ath10k_dbg(ATH10K_DBG_WMI,
2453 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
2454 rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
2455
2456 return ath10k_wmi_cmd_send(ar, skb,
2457 ar->wmi.cmd->pdev_set_regdomain_cmdid);
2458}
2459
2460int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
2461 u16 rd5g, u16 ctl2g, u16 ctl5g,
2462 enum wmi_dfs_region dfs_reg)
2463{
2464 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
2465 return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
2466 ctl2g, ctl5g, dfs_reg);
2467 else
2468 return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
2469 ctl2g, ctl5g);
2470}
2471
2421int ath10k_wmi_pdev_set_channel(struct ath10k *ar, 2472int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
2422 const struct wmi_channel_arg *arg) 2473 const struct wmi_channel_arg *arg)
2423{ 2474{
@@ -3456,8 +3507,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
3456 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); 3507 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
3457 3508
3458 ath10k_dbg(ATH10K_DBG_WMI, 3509 ath10k_dbg(ATH10K_DBG_WMI,
3459 "wmi peer assoc vdev %d addr %pM\n", 3510 "wmi peer assoc vdev %d addr %pM (%s)\n",
3460 arg->vdev_id, arg->addr); 3511 arg->vdev_id, arg->addr,
3512 arg->peer_reassoc ? "reassociate" : "new");
3461 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 3513 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
3462} 3514}
3463 3515
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index f51d5ca0141f..e93df2c10413 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -198,16 +198,6 @@ struct wmi_mac_addr {
198 } __packed; 198 } __packed;
199} __packed; 199} __packed;
200 200
201/* macro to convert MAC address from WMI word format to char array */
202#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \
203 (c_macaddr)[0] = ((pwmi_mac_addr)->word0) & 0xff; \
204 (c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \
205 (c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \
206 (c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \
207 (c_macaddr)[4] = ((pwmi_mac_addr)->word1) & 0xff; \
208 (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
209 } while (0)
210
211struct wmi_cmd_map { 201struct wmi_cmd_map {
212 u32 init_cmdid; 202 u32 init_cmdid;
213 u32 start_scan_cmdid; 203 u32 start_scan_cmdid;
@@ -2185,6 +2175,31 @@ struct wmi_pdev_set_regdomain_cmd {
2185 __le32 conformance_test_limit_5G; 2175 __le32 conformance_test_limit_5G;
2186} __packed; 2176} __packed;
2187 2177
2178enum wmi_dfs_region {
2179 /* Uninitialized dfs domain */
2180 WMI_UNINIT_DFS_DOMAIN = 0,
2181
2182 /* FCC3 dfs domain */
2183 WMI_FCC_DFS_DOMAIN = 1,
2184
2185 /* ETSI dfs domain */
2186 WMI_ETSI_DFS_DOMAIN = 2,
2187
2188 /*Japan dfs domain */
2189 WMI_MKK4_DFS_DOMAIN = 3,
2190};
2191
2192struct wmi_pdev_set_regdomain_cmd_10x {
2193 __le32 reg_domain;
2194 __le32 reg_domain_2G;
2195 __le32 reg_domain_5G;
2196 __le32 conformance_test_limit_2G;
2197 __le32 conformance_test_limit_5G;
2198
2199 /* dfs domain from wmi_dfs_region */
2200 __le32 dfs_domain;
2201} __packed;
2202
2188/* Command to set/unset chip in quiet mode */ 2203/* Command to set/unset chip in quiet mode */
2189struct wmi_pdev_set_quiet_cmd { 2204struct wmi_pdev_set_quiet_cmd {
2190 /* period in TUs */ 2205 /* period in TUs */
@@ -2210,6 +2225,19 @@ enum ath10k_protmode {
2210 ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */ 2225 ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */
2211}; 2226};
2212 2227
2228enum wmi_rtscts_profile {
2229 WMI_RTSCTS_FOR_NO_RATESERIES = 0,
2230 WMI_RTSCTS_FOR_SECOND_RATESERIES,
2231 WMI_RTSCTS_ACROSS_SW_RETRIES
2232};
2233
2234#define WMI_RTSCTS_ENABLED 1
2235#define WMI_RTSCTS_SET_MASK 0x0f
2236#define WMI_RTSCTS_SET_LSB 0
2237
2238#define WMI_RTSCTS_PROFILE_MASK 0xf0
2239#define WMI_RTSCTS_PROFILE_LSB 4
2240
2213enum wmi_beacon_gen_mode { 2241enum wmi_beacon_gen_mode {
2214 WMI_BEACON_STAGGERED_MODE = 0, 2242 WMI_BEACON_STAGGERED_MODE = 0,
2215 WMI_BEACON_BURST_MODE = 1 2243 WMI_BEACON_BURST_MODE = 1
@@ -2295,9 +2323,9 @@ struct wmi_pdev_param_map {
2295#define WMI_PDEV_PARAM_UNSUPPORTED 0 2323#define WMI_PDEV_PARAM_UNSUPPORTED 0
2296 2324
2297enum wmi_pdev_param { 2325enum wmi_pdev_param {
2298 /* TX chian mask */ 2326 /* TX chain mask */
2299 WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1, 2327 WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
2300 /* RX chian mask */ 2328 /* RX chain mask */
2301 WMI_PDEV_PARAM_RX_CHAIN_MASK, 2329 WMI_PDEV_PARAM_RX_CHAIN_MASK,
2302 /* TX power limit for 2G Radio */ 2330 /* TX power limit for 2G Radio */
2303 WMI_PDEV_PARAM_TXPOWER_LIMIT2G, 2331 WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
@@ -2682,6 +2710,9 @@ struct wal_dbg_tx_stats {
2682 /* wal pdev resets */ 2710 /* wal pdev resets */
2683 __le32 pdev_resets; 2711 __le32 pdev_resets;
2684 2712
2713 /* frames dropped due to non-availability of stateless TIDs */
2714 __le32 stateless_tid_alloc_failure;
2715
2685 __le32 phy_underrun; 2716 __le32 phy_underrun;
2686 2717
2687 /* MPDU is more than txop limit */ 2718 /* MPDU is more than txop limit */
@@ -2738,13 +2769,21 @@ enum wmi_stats_id {
2738 WMI_REQUEST_AP_STAT = 0x02 2769 WMI_REQUEST_AP_STAT = 0x02
2739}; 2770};
2740 2771
2772struct wlan_inst_rssi_args {
2773 __le16 cfg_retry_count;
2774 __le16 retry_count;
2775};
2776
2741struct wmi_request_stats_cmd { 2777struct wmi_request_stats_cmd {
2742 __le32 stats_id; 2778 __le32 stats_id;
2743 2779
2744 /* 2780 __le32 vdev_id;
2745 * Space to add parameters like 2781
2746 * peer mac addr 2782 /* peer MAC address */
2747 */ 2783 struct wmi_mac_addr peer_macaddr;
2784
2785 /* Instantaneous RSSI arguments */
2786 struct wlan_inst_rssi_args inst_rssi_args;
2748} __packed; 2787} __packed;
2749 2788
2750/* Suspend option */ 2789/* Suspend option */
@@ -2795,7 +2834,18 @@ struct wmi_stats_event {
2795 * PDEV statistics 2834 * PDEV statistics
2796 * TODO: add all PDEV stats here 2835 * TODO: add all PDEV stats here
2797 */ 2836 */
2798struct wmi_pdev_stats { 2837struct wmi_pdev_stats_old {
2838 __le32 chan_nf; /* Channel noise floor */
2839 __le32 tx_frame_count; /* TX frame count */
2840 __le32 rx_frame_count; /* RX frame count */
2841 __le32 rx_clear_count; /* rx clear count */
2842 __le32 cycle_count; /* cycle count */
2843 __le32 phy_err_count; /* Phy error count */
2844 __le32 chan_tx_pwr; /* channel tx power */
2845 struct wal_dbg_stats wal; /* WAL dbg stats */
2846} __packed;
2847
2848struct wmi_pdev_stats_10x {
2799 __le32 chan_nf; /* Channel noise floor */ 2849 __le32 chan_nf; /* Channel noise floor */
2800 __le32 tx_frame_count; /* TX frame count */ 2850 __le32 tx_frame_count; /* TX frame count */
2801 __le32 rx_frame_count; /* RX frame count */ 2851 __le32 rx_frame_count; /* RX frame count */
@@ -2804,6 +2854,12 @@ struct wmi_pdev_stats {
2804 __le32 phy_err_count; /* Phy error count */ 2854 __le32 phy_err_count; /* Phy error count */
2805 __le32 chan_tx_pwr; /* channel tx power */ 2855 __le32 chan_tx_pwr; /* channel tx power */
2806 struct wal_dbg_stats wal; /* WAL dbg stats */ 2856 struct wal_dbg_stats wal; /* WAL dbg stats */
2857 __le32 ack_rx_bad;
2858 __le32 rts_bad;
2859 __le32 rts_good;
2860 __le32 fcs_bad;
2861 __le32 no_beacons;
2862 __le32 mib_int_count;
2807} __packed; 2863} __packed;
2808 2864
2809/* 2865/*
@@ -2818,10 +2874,17 @@ struct wmi_vdev_stats {
2818 * peer statistics. 2874 * peer statistics.
2819 * TODO: add more stats 2875 * TODO: add more stats
2820 */ 2876 */
2821struct wmi_peer_stats { 2877struct wmi_peer_stats_old {
2878 struct wmi_mac_addr peer_macaddr;
2879 __le32 peer_rssi;
2880 __le32 peer_tx_rate;
2881} __packed;
2882
2883struct wmi_peer_stats_10x {
2822 struct wmi_mac_addr peer_macaddr; 2884 struct wmi_mac_addr peer_macaddr;
2823 __le32 peer_rssi; 2885 __le32 peer_rssi;
2824 __le32 peer_tx_rate; 2886 __le32 peer_tx_rate;
2887 __le32 peer_rx_rate;
2825} __packed; 2888} __packed;
2826 2889
2827struct wmi_vdev_create_cmd { 2890struct wmi_vdev_create_cmd {
@@ -4196,13 +4259,14 @@ void ath10k_wmi_detach(struct ath10k *ar);
4196int ath10k_wmi_wait_for_service_ready(struct ath10k *ar); 4259int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
4197int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar); 4260int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
4198 4261
4199int ath10k_wmi_connect_htc_service(struct ath10k *ar); 4262int ath10k_wmi_connect(struct ath10k *ar);
4200int ath10k_wmi_pdev_set_channel(struct ath10k *ar, 4263int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
4201 const struct wmi_channel_arg *); 4264 const struct wmi_channel_arg *);
4202int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt); 4265int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
4203int ath10k_wmi_pdev_resume_target(struct ath10k *ar); 4266int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
4204int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, 4267int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
4205 u16 rd5g, u16 ctl2g, u16 ctl5g); 4268 u16 rd5g, u16 ctl2g, u16 ctl5g,
4269 enum wmi_dfs_region dfs_reg);
4206int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value); 4270int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
4207int ath10k_wmi_cmd_init(struct ath10k *ar); 4271int ath10k_wmi_cmd_init(struct ath10k *ar);
4208int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *); 4272int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 1a2973b7acf2..0fce1c76638e 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -3709,8 +3709,8 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3709 AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP), 3709 AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP),
3710 AR5K_TPC); 3710 AR5K_TPC);
3711 } else { 3711 } else {
3712 ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX | 3712 ath5k_hw_reg_write(ah, AR5K_TUNE_MAX_TXPOWER,
3713 AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX); 3713 AR5K_PHY_TXPOWER_RATE_MAX);
3714 } 3714 }
3715 3715
3716 return 0; 3716 return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index e39e5860a2e9..9c125ff083f7 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -1,11 +1,19 @@
1config ATH6KL 1config ATH6KL
2 tristate "Atheros mobile chipsets support" 2 tristate "Atheros mobile chipsets support"
3 depends on CFG80211
4 ---help---
5 This module adds core support for wireless adapters based on
6 Atheros AR6003 and AR6004 chipsets. You still need separate
7 bus drivers for USB and SDIO to be able to use real devices.
8
9 If you choose to build it as a module, it will be called
10 ath6kl_core. Please note that AR6002 and AR6001 are not
11 supported by this driver.
3 12
4config ATH6KL_SDIO 13config ATH6KL_SDIO
5 tristate "Atheros ath6kl SDIO support" 14 tristate "Atheros ath6kl SDIO support"
6 depends on ATH6KL 15 depends on ATH6KL
7 depends on MMC 16 depends on MMC
8 depends on CFG80211
9 ---help--- 17 ---help---
10 This module adds support for wireless adapters based on 18 This module adds support for wireless adapters based on
11 Atheros AR6003 and AR6004 chipsets running over SDIO. If you 19 Atheros AR6003 and AR6004 chipsets running over SDIO. If you
@@ -17,25 +25,31 @@ config ATH6KL_USB
17 tristate "Atheros ath6kl USB support" 25 tristate "Atheros ath6kl USB support"
18 depends on ATH6KL 26 depends on ATH6KL
19 depends on USB 27 depends on USB
20 depends on CFG80211
21 ---help--- 28 ---help---
22 This module adds support for wireless adapters based on 29 This module adds support for wireless adapters based on
23 Atheros AR6004 chipset running over USB. This is still under 30 Atheros AR6004 chipset and chipsets based on it running over
24 implementation and it isn't functional. If you choose to 31 USB. If you choose to build it as a module, it will be
25 build it as a module, it will be called ath6kl_usb. 32 called ath6kl_usb.
26 33
27config ATH6KL_DEBUG 34config ATH6KL_DEBUG
28 bool "Atheros ath6kl debugging" 35 bool "Atheros ath6kl debugging"
29 depends on ATH6KL 36 depends on ATH6KL
30 ---help--- 37 ---help---
31 Enables debug support 38 Enables ath6kl debug support, including debug messages
39 enabled with debug_mask module parameter and debugfs
40 interface.
41
42 If unsure, say Y to make it easier to debug problems.
32 43
33config ATH6KL_TRACING 44config ATH6KL_TRACING
34 bool "Atheros ath6kl tracing support" 45 bool "Atheros ath6kl tracing support"
35 depends on ATH6KL 46 depends on ATH6KL
36 depends on EVENT_TRACING 47 depends on EVENT_TRACING
37 ---help--- 48 ---help---
38 Select this to ath6kl use tracing infrastructure. 49 Select this to ath6kl use tracing infrastructure which, for
50 example, can be enabled with help of trace-cmd. All debug
51 messages and commands are delivered to using individually
52 enablable trace points.
39 53
40 If unsure, say Y to make it easier to debug problems. 54 If unsure, say Y to make it easier to debug problems.
41 55
@@ -47,3 +61,5 @@ config ATH6KL_REGDOMAIN
47 Enabling this makes it possible to change the regdomain in 61 Enabling this makes it possible to change the regdomain in
48 the firmware. This can be only enabled if regulatory requirements 62 the firmware. This can be only enabled if regulatory requirements
49 are taken into account. 63 are taken into account.
64
65 If unsure, say N.
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index c2c6f4604958..0e26f4a34fda 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -724,8 +724,9 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
724 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 724 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
725 "added bss %pM to cfg80211\n", bssid); 725 "added bss %pM to cfg80211\n", bssid);
726 kfree(ie); 726 kfree(ie);
727 } else 727 } else {
728 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n"); 728 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
729 }
729 730
730 return bss; 731 return bss;
731} 732}
@@ -970,7 +971,6 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar,
970 ssid_list[i].flag, 971 ssid_list[i].flag,
971 ssid_list[i].ssid.ssid_len, 972 ssid_list[i].ssid.ssid_len,
972 ssid_list[i].ssid.ssid); 973 ssid_list[i].ssid.ssid);
973
974 } 974 }
975 975
976 /* Make sure no old entries are left behind */ 976 /* Make sure no old entries are left behind */
@@ -1759,7 +1759,7 @@ static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi)
1759} 1759}
1760 1760
1761static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, 1761static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
1762 u8 *mac, struct station_info *sinfo) 1762 const u8 *mac, struct station_info *sinfo)
1763{ 1763{
1764 struct ath6kl *ar = ath6kl_priv(dev); 1764 struct ath6kl *ar = ath6kl_priv(dev);
1765 struct ath6kl_vif *vif = netdev_priv(dev); 1765 struct ath6kl_vif *vif = netdev_priv(dev);
@@ -1897,7 +1897,6 @@ static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
1897 1897
1898 /* Configure the patterns that we received from the user. */ 1898 /* Configure the patterns that we received from the user. */
1899 for (i = 0; i < wow->n_patterns; i++) { 1899 for (i = 0; i < wow->n_patterns; i++) {
1900
1901 /* 1900 /*
1902 * Convert given nl80211 specific mask value to equivalent 1901 * Convert given nl80211 specific mask value to equivalent
1903 * driver specific mask value and send it to the chip along 1902 * driver specific mask value and send it to the chip along
@@ -2850,8 +2849,9 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2850 if (p.prwise_crypto_type == 0) { 2849 if (p.prwise_crypto_type == 0) {
2851 p.prwise_crypto_type = NONE_CRYPT; 2850 p.prwise_crypto_type = NONE_CRYPT;
2852 ath6kl_set_cipher(vif, 0, true); 2851 ath6kl_set_cipher(vif, 0, true);
2853 } else if (info->crypto.n_ciphers_pairwise == 1) 2852 } else if (info->crypto.n_ciphers_pairwise == 1) {
2854 ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true); 2853 ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
2854 }
2855 2855
2856 switch (info->crypto.cipher_group) { 2856 switch (info->crypto.cipher_group) {
2857 case WLAN_CIPHER_SUITE_WEP40: 2857 case WLAN_CIPHER_SUITE_WEP40:
@@ -2897,7 +2897,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2897 } 2897 }
2898 2898
2899 if (info->inactivity_timeout) { 2899 if (info->inactivity_timeout) {
2900
2901 inactivity_timeout = info->inactivity_timeout; 2900 inactivity_timeout = info->inactivity_timeout;
2902 2901
2903 if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS) 2902 if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS)
@@ -2975,7 +2974,7 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
2975static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 2974static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2976 2975
2977static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev, 2976static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
2978 u8 *mac) 2977 const u8 *mac)
2979{ 2978{
2980 struct ath6kl *ar = ath6kl_priv(dev); 2979 struct ath6kl *ar = ath6kl_priv(dev);
2981 struct ath6kl_vif *vif = netdev_priv(dev); 2980 struct ath6kl_vif *vif = netdev_priv(dev);
@@ -2986,7 +2985,8 @@ static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
2986} 2985}
2987 2986
2988static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev, 2987static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
2989 u8 *mac, struct station_parameters *params) 2988 const u8 *mac,
2989 struct station_parameters *params)
2990{ 2990{
2991 struct ath6kl *ar = ath6kl_priv(dev); 2991 struct ath6kl *ar = ath6kl_priv(dev);
2992 struct ath6kl_vif *vif = netdev_priv(dev); 2992 struct ath6kl_vif *vif = netdev_priv(dev);
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 4b46adbe8c92..b0b652042760 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -45,9 +45,9 @@ module_param(testmode, uint, 0644);
45module_param(recovery_enable, uint, 0644); 45module_param(recovery_enable, uint, 0644);
46module_param(heart_beat_poll, uint, 0644); 46module_param(heart_beat_poll, uint, 0644);
47MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error"); 47MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error");
48MODULE_PARM_DESC(heart_beat_poll, "Enable fw error detection periodic" \ 48MODULE_PARM_DESC(heart_beat_poll,
49 "polling. This also specifies the polling interval in" \ 49 "Enable fw error detection periodic polling in msecs - Also set recovery_enable for this to be effective");
50 "msecs. Set reocvery_enable for this to be effective"); 50
51 51
52void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb) 52void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
53{ 53{
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index dbfd17d0a5fa..55c4064dd506 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -172,7 +172,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
172 struct ath6kl_irq_proc_registers *irq_proc_reg, 172 struct ath6kl_irq_proc_registers *irq_proc_reg,
173 struct ath6kl_irq_enable_reg *irq_enable_reg) 173 struct ath6kl_irq_enable_reg *irq_enable_reg)
174{ 174{
175
176 ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n")); 175 ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n"));
177 176
178 if (irq_proc_reg != NULL) { 177 if (irq_proc_reg != NULL) {
@@ -219,7 +218,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
219 "GMBOX lookahead alias 1: 0x%x\n", 218 "GMBOX lookahead alias 1: 0x%x\n",
220 irq_proc_reg->rx_gmbox_lkahd_alias[1]); 219 irq_proc_reg->rx_gmbox_lkahd_alias[1]);
221 } 220 }
222
223 } 221 }
224 222
225 if (irq_enable_reg != NULL) { 223 if (irq_enable_reg != NULL) {
@@ -1396,7 +1394,6 @@ static ssize_t ath6kl_create_qos_write(struct file *file,
1396 const char __user *user_buf, 1394 const char __user *user_buf,
1397 size_t count, loff_t *ppos) 1395 size_t count, loff_t *ppos)
1398{ 1396{
1399
1400 struct ath6kl *ar = file->private_data; 1397 struct ath6kl *ar = file->private_data;
1401 struct ath6kl_vif *vif; 1398 struct ath6kl_vif *vif;
1402 char buf[200]; 1399 char buf[200];
@@ -1575,7 +1572,6 @@ static ssize_t ath6kl_delete_qos_write(struct file *file,
1575 const char __user *user_buf, 1572 const char __user *user_buf,
1576 size_t count, loff_t *ppos) 1573 size_t count, loff_t *ppos)
1577{ 1574{
1578
1579 struct ath6kl *ar = file->private_data; 1575 struct ath6kl *ar = file->private_data;
1580 struct ath6kl_vif *vif; 1576 struct ath6kl_vif *vif;
1581 char buf[100]; 1577 char buf[100];
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index ca9ba005f287..e194c10d9f00 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -97,8 +97,8 @@ static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
97 struct ath6kl_irq_proc_registers *irq_proc_reg, 97 struct ath6kl_irq_proc_registers *irq_proc_reg,
98 struct ath6kl_irq_enable_reg *irq_en_reg) 98 struct ath6kl_irq_enable_reg *irq_en_reg)
99{ 99{
100
101} 100}
101
102static inline void dump_cred_dist_stats(struct htc_target *target) 102static inline void dump_cred_dist_stats(struct htc_target *target)
103{ 103{
104} 104}
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index fea7709b5dda..18c070850a09 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -37,7 +37,6 @@ static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
37 buf = req->virt_dma_buf; 37 buf = req->virt_dma_buf;
38 38
39 for (i = 0; i < req->scat_entries; i++) { 39 for (i = 0; i < req->scat_entries; i++) {
40
41 if (from_dma) 40 if (from_dma)
42 memcpy(req->scat_list[i].buf, buf, 41 memcpy(req->scat_list[i].buf, buf,
43 req->scat_list[i].len); 42 req->scat_list[i].len);
@@ -116,7 +115,6 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
116 le32_to_cpu(regdump_val[i + 2]), 115 le32_to_cpu(regdump_val[i + 2]),
117 le32_to_cpu(regdump_val[i + 3])); 116 le32_to_cpu(regdump_val[i + 3]));
118 } 117 }
119
120} 118}
121 119
122static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev) 120static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
@@ -701,5 +699,4 @@ int ath6kl_hif_setup(struct ath6kl_device *dev)
701 699
702fail_setup: 700fail_setup:
703 return status; 701 return status;
704
705} 702}
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index 61f6b21fb0ae..dc6bd8cd9b83 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -197,9 +197,9 @@ struct hif_scatter_req {
197 /* bounce buffer for upper layers to copy to/from */ 197 /* bounce buffer for upper layers to copy to/from */
198 u8 *virt_dma_buf; 198 u8 *virt_dma_buf;
199 199
200 struct hif_scatter_item scat_list[1];
201
202 u32 scat_q_depth; 200 u32 scat_q_depth;
201
202 struct hif_scatter_item scat_list[0];
203}; 203};
204 204
205struct ath6kl_irq_proc_registers { 205struct ath6kl_irq_proc_registers {
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 65e5b719093d..e481f14b9878 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -112,9 +112,9 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
112 if (cur_ep_dist->endpoint == ENDPOINT_0) 112 if (cur_ep_dist->endpoint == ENDPOINT_0)
113 continue; 113 continue;
114 114
115 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) 115 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
116 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg; 116 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
117 else { 117 } else {
118 /* 118 /*
119 * For the remaining data endpoints, we assume that 119 * For the remaining data endpoints, we assume that
120 * each cred_per_msg are the same. We use a simple 120 * each cred_per_msg are the same. We use a simple
@@ -129,7 +129,6 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
129 count = (count * 3) >> 2; 129 count = (count * 3) >> 2;
130 count = max(count, cur_ep_dist->cred_per_msg); 130 count = max(count, cur_ep_dist->cred_per_msg);
131 cur_ep_dist->cred_norm = count; 131 cur_ep_dist->cred_norm = count;
132
133 } 132 }
134 133
135 ath6kl_dbg(ATH6KL_DBG_CREDIT, 134 ath6kl_dbg(ATH6KL_DBG_CREDIT,
@@ -549,7 +548,6 @@ static int htc_check_credits(struct htc_target *target,
549 enum htc_endpoint_id eid, unsigned int len, 548 enum htc_endpoint_id eid, unsigned int len,
550 int *req_cred) 549 int *req_cred)
551{ 550{
552
553 *req_cred = (len > target->tgt_cred_sz) ? 551 *req_cred = (len > target->tgt_cred_sz) ?
554 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1; 552 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
555 553
@@ -608,7 +606,6 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
608 unsigned int len; 606 unsigned int len;
609 607
610 while (true) { 608 while (true) {
611
612 flags = 0; 609 flags = 0;
613 610
614 if (list_empty(&endpoint->txq)) 611 if (list_empty(&endpoint->txq))
@@ -889,7 +886,6 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
889 ac = target->dev->ar->ep2ac_map[endpoint->eid]; 886 ac = target->dev->ar->ep2ac_map[endpoint->eid];
890 887
891 while (true) { 888 while (true) {
892
893 if (list_empty(&endpoint->txq)) 889 if (list_empty(&endpoint->txq))
894 break; 890 break;
895 891
@@ -1190,7 +1186,6 @@ static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
1190 list_add_tail(&packet->list, &container); 1186 list_add_tail(&packet->list, &container);
1191 htc_tx_complete(endpoint, &container); 1187 htc_tx_complete(endpoint, &container);
1192 } 1188 }
1193
1194} 1189}
1195 1190
1196static void ath6kl_htc_flush_txep_all(struct htc_target *target) 1191static void ath6kl_htc_flush_txep_all(struct htc_target *target)
@@ -1394,7 +1389,6 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
1394 1389
1395 ep_cb = ep->ep_cb; 1390 ep_cb = ep->ep_cb;
1396 for (j = 0; j < n_msg; j++) { 1391 for (j = 0; j < n_msg; j++) {
1397
1398 /* 1392 /*
1399 * Reset flag, any packets allocated using the 1393 * Reset flag, any packets allocated using the
1400 * rx_alloc() API cannot be recycled on 1394 * rx_alloc() API cannot be recycled on
@@ -1424,9 +1418,9 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
1424 } 1418 }
1425 } 1419 }
1426 1420
1427 if (list_empty(&ep->rx_bufq)) 1421 if (list_empty(&ep->rx_bufq)) {
1428 packet = NULL; 1422 packet = NULL;
1429 else { 1423 } else {
1430 packet = list_first_entry(&ep->rx_bufq, 1424 packet = list_first_entry(&ep->rx_bufq,
1431 struct htc_packet, list); 1425 struct htc_packet, list);
1432 list_del(&packet->list); 1426 list_del(&packet->list);
@@ -1487,7 +1481,6 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
1487 spin_lock_bh(&target->rx_lock); 1481 spin_lock_bh(&target->rx_lock);
1488 1482
1489 for (i = 0; i < msg; i++) { 1483 for (i = 0; i < msg; i++) {
1490
1491 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i]; 1484 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1492 1485
1493 if (htc_hdr->eid >= ENDPOINT_MAX) { 1486 if (htc_hdr->eid >= ENDPOINT_MAX) {
@@ -1708,7 +1701,6 @@ static int htc_parse_trailer(struct htc_target *target,
1708 lk_ahd = (struct htc_lookahead_report *) record_buf; 1701 lk_ahd = (struct htc_lookahead_report *) record_buf;
1709 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) && 1702 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
1710 next_lk_ahds) { 1703 next_lk_ahds) {
1711
1712 ath6kl_dbg(ATH6KL_DBG_HTC, 1704 ath6kl_dbg(ATH6KL_DBG_HTC,
1713 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n", 1705 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
1714 lk_ahd->pre_valid, lk_ahd->post_valid); 1706 lk_ahd->pre_valid, lk_ahd->post_valid);
@@ -1755,7 +1747,6 @@ static int htc_parse_trailer(struct htc_target *target,
1755 } 1747 }
1756 1748
1757 return 0; 1749 return 0;
1758
1759} 1750}
1760 1751
1761static int htc_proc_trailer(struct htc_target *target, 1752static int htc_proc_trailer(struct htc_target *target,
@@ -1776,7 +1767,6 @@ static int htc_proc_trailer(struct htc_target *target,
1776 status = 0; 1767 status = 0;
1777 1768
1778 while (len > 0) { 1769 while (len > 0) {
1779
1780 if (len < sizeof(struct htc_record_hdr)) { 1770 if (len < sizeof(struct htc_record_hdr)) {
1781 status = -ENOMEM; 1771 status = -ENOMEM;
1782 break; 1772 break;
@@ -2098,7 +2088,6 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
2098 } 2088 }
2099 2089
2100 if (!fetched_pkts) { 2090 if (!fetched_pkts) {
2101
2102 packet = list_first_entry(rx_pktq, struct htc_packet, 2091 packet = list_first_entry(rx_pktq, struct htc_packet,
2103 list); 2092 list);
2104 2093
@@ -2173,7 +2162,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
2173 look_aheads[0] = msg_look_ahead; 2162 look_aheads[0] = msg_look_ahead;
2174 2163
2175 while (true) { 2164 while (true) {
2176
2177 /* 2165 /*
2178 * First lookahead sets the expected endpoint IDs for all 2166 * First lookahead sets the expected endpoint IDs for all
2179 * packets in a bundle. 2167 * packets in a bundle.
@@ -2825,8 +2813,9 @@ static int ath6kl_htc_reset(struct htc_target *target)
2825 packet->buf = packet->buf_start; 2813 packet->buf = packet->buf_start;
2826 packet->endpoint = ENDPOINT_0; 2814 packet->endpoint = ENDPOINT_0;
2827 list_add_tail(&packet->list, &target->free_ctrl_rxbuf); 2815 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2828 } else 2816 } else {
2829 list_add_tail(&packet->list, &target->free_ctrl_txbuf); 2817 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2818 }
2830 } 2819 }
2831 2820
2832 return 0; 2821 return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 67aa924ed8b3..756fe52a12c8 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -137,7 +137,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
137 credits_required = 0; 137 credits_required = 0;
138 138
139 } else { 139 } else {
140
141 if (ep->cred_dist.credits < credits_required) 140 if (ep->cred_dist.credits < credits_required)
142 break; 141 break;
143 142
@@ -169,7 +168,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
169 /* queue this packet into the caller's queue */ 168 /* queue this packet into the caller's queue */
170 list_add_tail(&packet->list, queue); 169 list_add_tail(&packet->list, queue);
171 } 170 }
172
173} 171}
174 172
175static void get_htc_packet(struct htc_target *target, 173static void get_htc_packet(struct htc_target *target,
@@ -279,7 +277,6 @@ static int htc_issue_packets(struct htc_target *target,
279 list_add(&packet->list, pkt_queue); 277 list_add(&packet->list, pkt_queue);
280 break; 278 break;
281 } 279 }
282
283 } 280 }
284 281
285 if (status != 0) { 282 if (status != 0) {
@@ -385,7 +382,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
385 */ 382 */
386 list_for_each_entry_safe(packet, tmp_pkt, 383 list_for_each_entry_safe(packet, tmp_pkt,
387 txq, list) { 384 txq, list) {
388
389 ath6kl_dbg(ATH6KL_DBG_HTC, 385 ath6kl_dbg(ATH6KL_DBG_HTC,
390 "%s: Indicat overflowed TX pkts: %p\n", 386 "%s: Indicat overflowed TX pkts: %p\n",
391 __func__, packet); 387 __func__, packet);
@@ -403,7 +399,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
403 list_move_tail(&packet->list, 399 list_move_tail(&packet->list,
404 &send_queue); 400 &send_queue);
405 } 401 }
406
407 } 402 }
408 403
409 if (list_empty(&send_queue)) { 404 if (list_empty(&send_queue)) {
@@ -454,7 +449,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
454 * enough transmit resources. 449 * enough transmit resources.
455 */ 450 */
456 while (true) { 451 while (true) {
457
458 if (get_queue_depth(&ep->txq) == 0) 452 if (get_queue_depth(&ep->txq) == 0)
459 break; 453 break;
460 454
@@ -495,8 +489,8 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
495 } 489 }
496 490
497 spin_lock_bh(&target->tx_lock); 491 spin_lock_bh(&target->tx_lock);
498
499 } 492 }
493
500 /* done with this endpoint, we can clear the count */ 494 /* done with this endpoint, we can clear the count */
501 ep->tx_proc_cnt = 0; 495 ep->tx_proc_cnt = 0;
502 spin_unlock_bh(&target->tx_lock); 496 spin_unlock_bh(&target->tx_lock);
@@ -1106,7 +1100,6 @@ free_skb:
1106 dev_kfree_skb(skb); 1100 dev_kfree_skb(skb);
1107 1101
1108 return status; 1102 return status;
1109
1110} 1103}
1111 1104
1112static void htc_flush_rx_queue(struct htc_target *target, 1105static void htc_flush_rx_queue(struct htc_target *target,
@@ -1258,7 +1251,6 @@ static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
1258 tx_alloc = 0; 1251 tx_alloc = 0;
1259 1252
1260 } else { 1253 } else {
1261
1262 tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id); 1254 tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
1263 if (tx_alloc == 0) { 1255 if (tx_alloc == 0) {
1264 status = -ENOMEM; 1256 status = -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 4f316bdcbab5..d5ef211f261c 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1192,7 +1192,6 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
1192 1192
1193 if (board_ext_address && 1193 if (board_ext_address &&
1194 ar->fw_board_len == (board_data_size + board_ext_data_size)) { 1194 ar->fw_board_len == (board_data_size + board_ext_data_size)) {
1195
1196 /* write extended board data */ 1195 /* write extended board data */
1197 ath6kl_dbg(ATH6KL_DBG_BOOT, 1196 ath6kl_dbg(ATH6KL_DBG_BOOT,
1198 "writing extended board data to 0x%x (%d B)\n", 1197 "writing extended board data to 0x%x (%d B)\n",
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 5839fc23bdc7..d56554674da4 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -571,7 +571,6 @@ void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
571 571
572static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel) 572static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
573{ 573{
574
575 struct ath6kl *ar = vif->ar; 574 struct ath6kl *ar = vif->ar;
576 575
577 vif->profile.ch = cpu_to_le16(channel); 576 vif->profile.ch = cpu_to_le16(channel);
@@ -600,7 +599,6 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
600 599
601static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel) 600static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel)
602{ 601{
603
604 struct ath6kl_vif *vif; 602 struct ath6kl_vif *vif;
605 int res = 0; 603 int res = 0;
606 604
@@ -692,9 +690,9 @@ void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
692 cfg80211_michael_mic_failure(vif->ndev, sta->mac, 690 cfg80211_michael_mic_failure(vif->ndev, sta->mac,
693 NL80211_KEYTYPE_PAIRWISE, keyid, 691 NL80211_KEYTYPE_PAIRWISE, keyid,
694 tsc, GFP_KERNEL); 692 tsc, GFP_KERNEL);
695 } else 693 } else {
696 ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast); 694 ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
697 695 }
698} 696}
699 697
700static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len) 698static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
@@ -1093,8 +1091,9 @@ static int ath6kl_open(struct net_device *dev)
1093 if (test_bit(CONNECTED, &vif->flags)) { 1091 if (test_bit(CONNECTED, &vif->flags)) {
1094 netif_carrier_on(dev); 1092 netif_carrier_on(dev);
1095 netif_wake_queue(dev); 1093 netif_wake_queue(dev);
1096 } else 1094 } else {
1097 netif_carrier_off(dev); 1095 netif_carrier_off(dev);
1096 }
1098 1097
1099 return 0; 1098 return 0;
1100} 1099}
@@ -1146,7 +1145,6 @@ static int ath6kl_set_features(struct net_device *dev,
1146 dev->features = features | NETIF_F_RXCSUM; 1145 dev->features = features | NETIF_F_RXCSUM;
1147 return err; 1146 return err;
1148 } 1147 }
1149
1150 } 1148 }
1151 1149
1152 return err; 1150 return err;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 7126bdd4236c..339d89f14d32 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -348,7 +348,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
348 int i, scat_req_sz, scat_list_sz, size; 348 int i, scat_req_sz, scat_list_sz, size;
349 u8 *virt_buf; 349 u8 *virt_buf;
350 350
351 scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); 351 scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
352 scat_req_sz = sizeof(*s_req) + scat_list_sz; 352 scat_req_sz = sizeof(*s_req) + scat_list_sz;
353 353
354 if (!virt_scat) 354 if (!virt_scat)
@@ -425,8 +425,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
425 memcpy(tbuf, buf, len); 425 memcpy(tbuf, buf, len);
426 426
427 bounced = true; 427 bounced = true;
428 } else 428 } else {
429 tbuf = buf; 429 tbuf = buf;
430 }
430 431
431 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len); 432 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
432 if ((request & HIF_READ) && bounced) 433 if ((request & HIF_READ) && bounced)
@@ -441,9 +442,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
441static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio, 442static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
442 struct bus_request *req) 443 struct bus_request *req)
443{ 444{
444 if (req->scat_req) 445 if (req->scat_req) {
445 ath6kl_sdio_scat_rw(ar_sdio, req); 446 ath6kl_sdio_scat_rw(ar_sdio, req);
446 else { 447 } else {
447 void *context; 448 void *context;
448 int status; 449 int status;
449 450
@@ -656,7 +657,6 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
656 list_add_tail(&s_req->list, &ar_sdio->scat_req); 657 list_add_tail(&s_req->list, &ar_sdio->scat_req);
657 658
658 spin_unlock_bh(&ar_sdio->scat_lock); 659 spin_unlock_bh(&ar_sdio->scat_lock);
659
660} 660}
661 661
662/* scatter gather read write request */ 662/* scatter gather read write request */
@@ -674,9 +674,9 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
674 "hif-scatter: total len: %d scatter entries: %d\n", 674 "hif-scatter: total len: %d scatter entries: %d\n",
675 scat_req->len, scat_req->scat_entries); 675 scat_req->len, scat_req->scat_entries);
676 676
677 if (request & HIF_SYNCHRONOUS) 677 if (request & HIF_SYNCHRONOUS) {
678 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); 678 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
679 else { 679 } else {
680 spin_lock_bh(&ar_sdio->wr_async_lock); 680 spin_lock_bh(&ar_sdio->wr_async_lock);
681 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); 681 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
682 spin_unlock_bh(&ar_sdio->wr_async_lock); 682 spin_unlock_bh(&ar_sdio->wr_async_lock);
@@ -856,7 +856,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
856 856
857 if (ar->suspend_mode == WLAN_POWER_STATE_WOW || 857 if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
858 (!ar->suspend_mode && wow)) { 858 (!ar->suspend_mode && wow)) {
859
860 ret = ath6kl_set_sdio_pm_caps(ar); 859 ret = ath6kl_set_sdio_pm_caps(ar);
861 if (ret) 860 if (ret)
862 goto cut_pwr; 861 goto cut_pwr;
@@ -878,7 +877,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
878 877
879 if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP || 878 if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
880 !ar->suspend_mode || try_deepsleep) { 879 !ar->suspend_mode || try_deepsleep) {
881
882 flags = sdio_get_host_pm_caps(func); 880 flags = sdio_get_host_pm_caps(func);
883 if (!(flags & MMC_PM_KEEP_POWER)) 881 if (!(flags & MMC_PM_KEEP_POWER))
884 goto cut_pwr; 882 goto cut_pwr;
@@ -1061,7 +1059,6 @@ static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
1061 1059
1062 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); 1060 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1063 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { 1061 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
1064
1065 /* 1062 /*
1066 * Hit the credit counter with a 4-byte access, the first byte 1063 * Hit the credit counter with a 4-byte access, the first byte
1067 * read will hit the counter and cause a decrement, while the 1064 * read will hit the counter and cause a decrement, while the
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index a580a629a0da..d5eeeae7711b 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -289,7 +289,7 @@ struct host_interest {
289 u32 hi_hp_rx_traffic_ratio; /* 0xd8 */ 289 u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
290 290
291 /* test applications flags */ 291 /* test applications flags */
292 u32 hi_test_apps_related ; /* 0xdc */ 292 u32 hi_test_apps_related; /* 0xdc */
293 /* location of test script */ 293 /* location of test script */
294 u32 hi_ota_testscript; /* 0xe0 */ 294 u32 hi_ota_testscript; /* 0xe0 */
295 /* location of CAL data */ 295 /* location of CAL data */
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index ebb24045a8ae..40432fe7a5d2 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -125,8 +125,9 @@ static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
125 *flags |= WMI_DATA_HDR_FLAGS_UAPSD; 125 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
126 spin_unlock_bh(&conn->psq_lock); 126 spin_unlock_bh(&conn->psq_lock);
127 return false; 127 return false;
128 } else if (!conn->apsd_info) 128 } else if (!conn->apsd_info) {
129 return false; 129 return false;
130 }
130 131
131 if (test_bit(WMM_ENABLED, &vif->flags)) { 132 if (test_bit(WMM_ENABLED, &vif->flags)) {
132 ether_type = be16_to_cpu(datap->h_proto); 133 ether_type = be16_to_cpu(datap->h_proto);
@@ -316,8 +317,9 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
316 cookie = NULL; 317 cookie = NULL;
317 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n", 318 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
318 skb, skb->len); 319 skb, skb->len);
319 } else 320 } else {
320 cookie = ath6kl_alloc_cookie(ar); 321 cookie = ath6kl_alloc_cookie(ar);
322 }
321 323
322 if (cookie == NULL) { 324 if (cookie == NULL) {
323 spin_unlock_bh(&ar->lock); 325 spin_unlock_bh(&ar->lock);
@@ -359,7 +361,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
359 struct ath6kl_vif *vif = netdev_priv(dev); 361 struct ath6kl_vif *vif = netdev_priv(dev);
360 u32 map_no = 0; 362 u32 map_no = 0;
361 u16 htc_tag = ATH6KL_DATA_PKT_TAG; 363 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
362 u8 ac = 99 ; /* initialize to unmapped ac */ 364 u8 ac = 99; /* initialize to unmapped ac */
363 bool chk_adhoc_ps_mapping = false; 365 bool chk_adhoc_ps_mapping = false;
364 int ret; 366 int ret;
365 struct wmi_tx_meta_v2 meta_v2; 367 struct wmi_tx_meta_v2 meta_v2;
@@ -449,8 +451,9 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
449 if (ret) 451 if (ret)
450 goto fail_tx; 452 goto fail_tx;
451 } 453 }
452 } else 454 } else {
453 goto fail_tx; 455 goto fail_tx;
456 }
454 457
455 spin_lock_bh(&ar->lock); 458 spin_lock_bh(&ar->lock);
456 459
@@ -702,7 +705,6 @@ void ath6kl_tx_complete(struct htc_target *target,
702 705
703 /* reap completed packets */ 706 /* reap completed packets */
704 while (!list_empty(packet_queue)) { 707 while (!list_empty(packet_queue)) {
705
706 packet = list_first_entry(packet_queue, struct htc_packet, 708 packet = list_first_entry(packet_queue, struct htc_packet,
707 list); 709 list);
708 list_del(&packet->list); 710 list_del(&packet->list);
@@ -1089,8 +1091,9 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
1089 else 1091 else
1090 skb_queue_tail(&rxtid->q, node->skb); 1092 skb_queue_tail(&rxtid->q, node->skb);
1091 node->skb = NULL; 1093 node->skb = NULL;
1092 } else 1094 } else {
1093 stats->num_hole++; 1095 stats->num_hole++;
1096 }
1094 1097
1095 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next); 1098 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
1096 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 1099 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
@@ -1211,7 +1214,7 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
1211 return is_queued; 1214 return is_queued;
1212 1215
1213 spin_lock_bh(&rxtid->lock); 1216 spin_lock_bh(&rxtid->lock);
1214 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) { 1217 for (idx = 0; idx < rxtid->hold_q_sz; idx++) {
1215 if (rxtid->hold_q[idx].skb) { 1218 if (rxtid->hold_q[idx].skb) {
1216 /* 1219 /*
1217 * There is a frame in the queue and no 1220 * There is a frame in the queue and no
@@ -1265,7 +1268,6 @@ static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
1265 is_apsdq_empty_at_start = is_apsdq_empty; 1268 is_apsdq_empty_at_start = is_apsdq_empty;
1266 1269
1267 while ((!is_apsdq_empty) && (num_frames_to_deliver)) { 1270 while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1268
1269 spin_lock_bh(&conn->psq_lock); 1271 spin_lock_bh(&conn->psq_lock);
1270 skb = skb_dequeue(&conn->apsdq); 1272 skb = skb_dequeue(&conn->apsdq);
1271 is_apsdq_empty = skb_queue_empty(&conn->apsdq); 1273 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
@@ -1606,16 +1608,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1606 if (!conn) 1608 if (!conn)
1607 return; 1609 return;
1608 aggr_conn = conn->aggr_conn; 1610 aggr_conn = conn->aggr_conn;
1609 } else 1611 } else {
1610 aggr_conn = vif->aggr_cntxt->aggr_conn; 1612 aggr_conn = vif->aggr_cntxt->aggr_conn;
1613 }
1611 1614
1612 if (aggr_process_recv_frm(aggr_conn, tid, seq_no, 1615 if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
1613 is_amsdu, skb)) { 1616 is_amsdu, skb)) {
1614 /* aggregation code will handle the skb */ 1617 /* aggregation code will handle the skb */
1615 return; 1618 return;
1616 } 1619 }
1617 } else if (!is_broadcast_ether_addr(datap->h_dest)) 1620 } else if (!is_broadcast_ether_addr(datap->h_dest)) {
1618 vif->net_stats.multicast++; 1621 vif->net_stats.multicast++;
1622 }
1619 1623
1620 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); 1624 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1621} 1625}
@@ -1710,8 +1714,9 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
1710 sta = ath6kl_find_sta_by_aid(vif->ar, aid); 1714 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1711 if (sta) 1715 if (sta)
1712 aggr_conn = sta->aggr_conn; 1716 aggr_conn = sta->aggr_conn;
1713 } else 1717 } else {
1714 aggr_conn = vif->aggr_cntxt->aggr_conn; 1718 aggr_conn = vif->aggr_cntxt->aggr_conn;
1719 }
1715 1720
1716 if (!aggr_conn) 1721 if (!aggr_conn)
1717 return; 1722 return;
@@ -1766,7 +1771,6 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
1766 skb_queue_head_init(&rxtid->q); 1771 skb_queue_head_init(&rxtid->q);
1767 spin_lock_init(&rxtid->lock); 1772 spin_lock_init(&rxtid->lock);
1768 } 1773 }
1769
1770} 1774}
1771 1775
1772struct aggr_info *aggr_init(struct ath6kl_vif *vif) 1776struct aggr_info *aggr_init(struct ath6kl_vif *vif)
@@ -1806,8 +1810,9 @@ void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
1806 sta = ath6kl_find_sta_by_aid(vif->ar, aid); 1810 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1807 if (sta) 1811 if (sta)
1808 aggr_conn = sta->aggr_conn; 1812 aggr_conn = sta->aggr_conn;
1809 } else 1813 } else {
1810 aggr_conn = vif->aggr_cntxt->aggr_conn; 1814 aggr_conn = vif->aggr_cntxt->aggr_conn;
1815 }
1811 1816
1812 if (!aggr_conn) 1817 if (!aggr_conn)
1813 return; 1818 return;
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 56c3fd5cef65..3afc5a463d06 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -236,7 +236,6 @@ static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe)
236 break; 236 break;
237 kfree(urb_context); 237 kfree(urb_context);
238 } 238 }
239
240} 239}
241 240
242static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb) 241static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
@@ -245,7 +244,6 @@ static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
245 244
246 for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) 245 for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++)
247 ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]); 246 ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]);
248
249} 247}
250 248
251static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb, 249static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 8b4ce28e3ce8..4d7f9e4712e9 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -289,8 +289,9 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
289 ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) + 289 ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
290 sizeof(struct ath6kl_llc_snap_hdr), 290 sizeof(struct ath6kl_llc_snap_hdr),
291 layer2_priority); 291 layer2_priority);
292 } else 292 } else {
293 usr_pri = layer2_priority & 0x7; 293 usr_pri = layer2_priority & 0x7;
294 }
294 295
295 /* 296 /*
296 * Queue the EAPOL frames in the same WMM_AC_VO queue 297 * Queue the EAPOL frames in the same WMM_AC_VO queue
@@ -359,8 +360,9 @@ int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
359 hdr_size = roundup(sizeof(struct ieee80211_qos_hdr), 360 hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
360 sizeof(u32)); 361 sizeof(u32));
361 skb_pull(skb, hdr_size); 362 skb_pull(skb, hdr_size);
362 } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) 363 } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) {
363 skb_pull(skb, sizeof(struct ieee80211_hdr_3addr)); 364 skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
365 }
364 366
365 datap = skb->data; 367 datap = skb->data;
366 llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap); 368 llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
@@ -936,7 +938,6 @@ ath6kl_regd_find_country_by_rd(u16 regdmn)
936 938
937static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len) 939static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
938{ 940{
939
940 struct ath6kl_wmi_regdomain *ev; 941 struct ath6kl_wmi_regdomain *ev;
941 struct country_code_to_enum_rd *country = NULL; 942 struct country_code_to_enum_rd *country = NULL;
942 struct reg_dmn_pair_mapping *regpair = NULL; 943 struct reg_dmn_pair_mapping *regpair = NULL;
@@ -946,10 +947,9 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
946 ev = (struct ath6kl_wmi_regdomain *) datap; 947 ev = (struct ath6kl_wmi_regdomain *) datap;
947 reg_code = le32_to_cpu(ev->reg_code); 948 reg_code = le32_to_cpu(ev->reg_code);
948 949
949 if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) 950 if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) {
950 country = ath6kl_regd_find_country((u16) reg_code); 951 country = ath6kl_regd_find_country((u16) reg_code);
951 else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) { 952 } else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
952
953 regpair = ath6kl_get_regpair((u16) reg_code); 953 regpair = ath6kl_get_regpair((u16) reg_code);
954 country = ath6kl_regd_find_country_by_rd((u16) reg_code); 954 country = ath6kl_regd_find_country_by_rd((u16) reg_code);
955 if (regpair) 955 if (regpair)
@@ -1499,7 +1499,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
1499 1499
1500 if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) && 1500 if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
1501 (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) { 1501 (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
1502
1503 ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion); 1502 ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
1504 tsinfo = le16_to_cpu(ts->tsinfo); 1503 tsinfo = le16_to_cpu(ts->tsinfo);
1505 tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) & 1504 tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@@ -1530,7 +1529,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
1530 * for delete qos stream from AP 1529 * for delete qos stream from AP
1531 */ 1530 */
1532 else if (reply->cac_indication == CAC_INDICATION_DELETE) { 1531 else if (reply->cac_indication == CAC_INDICATION_DELETE) {
1533
1534 ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion); 1532 ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
1535 tsinfo = le16_to_cpu(ts->tsinfo); 1533 tsinfo = le16_to_cpu(ts->tsinfo);
1536 ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) & 1534 ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@@ -2322,7 +2320,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
2322 return ret; 2320 return ret;
2323} 2321}
2324 2322
2325int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk) 2323int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, const u8 *krk)
2326{ 2324{
2327 struct sk_buff *skb; 2325 struct sk_buff *skb;
2328 struct wmi_add_krk_cmd *cmd; 2326 struct wmi_add_krk_cmd *cmd;
@@ -2479,7 +2477,6 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
2479 goto free_data_skb; 2477 goto free_data_skb;
2480 2478
2481 for (index = 0; index < num_pri_streams; index++) { 2479 for (index = 0; index < num_pri_streams; index++) {
2482
2483 if (WARN_ON(!data_sync_bufs[index].skb)) 2480 if (WARN_ON(!data_sync_bufs[index].skb))
2484 goto free_data_skb; 2481 goto free_data_skb;
2485 2482
@@ -2704,7 +2701,6 @@ static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
2704 2701
2705 for (i = 0; i < WMM_NUM_AC; i++) { 2702 for (i = 0; i < WMM_NUM_AC; i++) {
2706 if (stream_exist & (1 << i)) { 2703 if (stream_exist & (1 << i)) {
2707
2708 /* 2704 /*
2709 * FIXME: Is this lock & unlock inside 2705 * FIXME: Is this lock & unlock inside
2710 * for loop correct? may need rework. 2706 * for loop correct? may need rework.
@@ -2870,8 +2866,9 @@ int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
2870 if (host_mode == ATH6KL_HOST_MODE_ASLEEP) { 2866 if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
2871 ath6kl_wmi_relinquish_implicit_pstream_credits(wmi); 2867 ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
2872 cmd->asleep = cpu_to_le32(1); 2868 cmd->asleep = cpu_to_le32(1);
2873 } else 2869 } else {
2874 cmd->awake = cpu_to_le32(1); 2870 cmd->awake = cpu_to_le32(1);
2871 }
2875 2872
2876 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, 2873 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
2877 WMI_SET_HOST_SLEEP_MODE_CMDID, 2874 WMI_SET_HOST_SLEEP_MODE_CMDID,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 5c702ae4d9f8..bb23fc00111d 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -898,7 +898,6 @@ struct wmi_start_scan_cmd {
898 * flags here 898 * flags here
899 */ 899 */
900enum wmi_scan_ctrl_flags_bits { 900enum wmi_scan_ctrl_flags_bits {
901
902 /* set if can scan in the connect cmd */ 901 /* set if can scan in the connect cmd */
903 CONNECT_SCAN_CTRL_FLAGS = 0x01, 902 CONNECT_SCAN_CTRL_FLAGS = 0x01,
904 903
@@ -2617,7 +2616,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
2617 u8 *key_material, 2616 u8 *key_material,
2618 u8 key_op_ctrl, u8 *mac_addr, 2617 u8 key_op_ctrl, u8 *mac_addr,
2619 enum wmi_sync_flag sync_flag); 2618 enum wmi_sync_flag sync_flag);
2620int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk); 2619int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, const u8 *krk);
2621int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index); 2620int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index);
2622int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid, 2621int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
2623 const u8 *pmkid, bool set); 2622 const u8 *pmkid, bool set);
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 8e1c7b0fe76c..8fcd586d1c39 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -53,7 +53,8 @@ obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
53obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o 53obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
54ath9k_common-y:= common.o \ 54ath9k_common-y:= common.o \
55 common-init.o \ 55 common-init.o \
56 common-beacon.o 56 common-beacon.o \
57 common-debug.o
57 58
58ath9k_htc-y += htc_hst.o \ 59ath9k_htc-y += htc_hst.o \
59 hif_usb.o \ 60 hif_usb.o \
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 0a6163e9248c..c38399bc9aa9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -410,7 +410,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
410 {0x00009e30, 0x06336f77}, 410 {0x00009e30, 0x06336f77},
411 {0x00009e34, 0x6af6532f}, 411 {0x00009e34, 0x6af6532f},
412 {0x00009e38, 0x0cc80c00}, 412 {0x00009e38, 0x0cc80c00},
413 {0x00009e40, 0x0d261820}, 413 {0x00009e40, 0x0d261800},
414 {0x00009e4c, 0x00001004}, 414 {0x00009e4c, 0x00001004},
415 {0x00009e50, 0x00ff03f1}, 415 {0x00009e50, 0x00ff03f1},
416 {0x00009e54, 0x00000000}, 416 {0x00009e54, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index f76139bbb74f..2c42ff05efa3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -592,7 +592,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
592 {0x00009e30, 0x06336f77}, 592 {0x00009e30, 0x06336f77},
593 {0x00009e34, 0x6af6532f}, 593 {0x00009e34, 0x6af6532f},
594 {0x00009e38, 0x0cc80c00}, 594 {0x00009e38, 0x0cc80c00},
595 {0x00009e40, 0x0d261820}, 595 {0x00009e40, 0x0d261800},
596 {0x00009e4c, 0x00001004}, 596 {0x00009e4c, 0x00001004},
597 {0x00009e50, 0x00ff03f1}, 597 {0x00009e50, 0x00ff03f1},
598 {0x00009fc0, 0x803e4788}, 598 {0x00009fc0, 0x803e4788},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 0ac8be96097f..2154efcd3900 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -231,7 +231,7 @@ static const u32 ar9331_1p2_baseband_core[][2] = {
231 {0x00009e30, 0x06336f77}, 231 {0x00009e30, 0x06336f77},
232 {0x00009e34, 0x6af6532f}, 232 {0x00009e34, 0x6af6532f},
233 {0x00009e38, 0x0cc80c00}, 233 {0x00009e38, 0x0cc80c00},
234 {0x00009e40, 0x0d261820}, 234 {0x00009e40, 0x0d261800},
235 {0x00009e4c, 0x00001004}, 235 {0x00009e4c, 0x00001004},
236 {0x00009e50, 0x00ff03f1}, 236 {0x00009e50, 0x00ff03f1},
237 {0x00009fc0, 0x803e4788}, 237 {0x00009fc0, 0x803e4788},
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index a01f0edb6518..b995ffe88b33 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -318,7 +318,7 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
318 {0x00009e30, 0x06336f77}, 318 {0x00009e30, 0x06336f77},
319 {0x00009e34, 0x6af6532f}, 319 {0x00009e34, 0x6af6532f},
320 {0x00009e38, 0x0cc80c00}, 320 {0x00009e38, 0x0cc80c00},
321 {0x00009e40, 0x0d261820}, 321 {0x00009e40, 0x0d261800},
322 {0x00009e4c, 0x00001004}, 322 {0x00009e4c, 0x00001004},
323 {0x00009e50, 0x00ff03f1}, 323 {0x00009e50, 0x00ff03f1},
324 {0x00009e54, 0x00000000}, 324 {0x00009e54, 0x00000000},
@@ -348,9 +348,9 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
348 {0x0000a370, 0x00000000}, 348 {0x0000a370, 0x00000000},
349 {0x0000a390, 0x00000001}, 349 {0x0000a390, 0x00000001},
350 {0x0000a394, 0x00000444}, 350 {0x0000a394, 0x00000444},
351 {0x0000a398, 0x00000000}, 351 {0x0000a398, 0x001f0e0f},
352 {0x0000a39c, 0x210d0401}, 352 {0x0000a39c, 0x0075393f},
353 {0x0000a3a0, 0xab9a7144}, 353 {0x0000a3a0, 0xb79f6427},
354 {0x0000a3a4, 0x00000000}, 354 {0x0000a3a4, 0x00000000},
355 {0x0000a3a8, 0xaaaaaaaa}, 355 {0x0000a3a8, 0xaaaaaaaa},
356 {0x0000a3ac, 0x3c466478}, 356 {0x0000a3ac, 0x3c466478},
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
index 3c9113d9b1bc..8e5c3b9786e3 100644
--- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -257,9 +257,9 @@ static const u32 qca953x_1p0_baseband_core[][2] = {
257 {0x0000a370, 0x00000000}, 257 {0x0000a370, 0x00000000},
258 {0x0000a390, 0x00000001}, 258 {0x0000a390, 0x00000001},
259 {0x0000a394, 0x00000444}, 259 {0x0000a394, 0x00000444},
260 {0x0000a398, 0x1f020503}, 260 {0x0000a398, 0x001f0e0f},
261 {0x0000a39c, 0x29180c03}, 261 {0x0000a39c, 0x0075393f},
262 {0x0000a3a0, 0x9a8b6844}, 262 {0x0000a3a0, 0xb79f6427},
263 {0x0000a3a4, 0x000000ff}, 263 {0x0000a3a4, 0x000000ff},
264 {0x0000a3a8, 0x6a6a6a6a}, 264 {0x0000a3a8, 0x6a6a6a6a},
265 {0x0000a3ac, 0x6a6a6a6a}, 265 {0x0000a3ac, 0x6a6a6a6a},
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index e6aec2c0207f..a5ca65240af3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -90,7 +90,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
90 {0x00009e30, 0x06336f77}, 90 {0x00009e30, 0x06336f77},
91 {0x00009e34, 0x6af6532f}, 91 {0x00009e34, 0x6af6532f},
92 {0x00009e38, 0x0cc80c00}, 92 {0x00009e38, 0x0cc80c00},
93 {0x00009e40, 0x0d261820}, 93 {0x00009e40, 0x0d261800},
94 {0x00009e4c, 0x00001004}, 94 {0x00009e4c, 0x00001004},
95 {0x00009e50, 0x00ff03f1}, 95 {0x00009e50, 0x00ff03f1},
96 {0x00009e54, 0x00000000}, 96 {0x00009e54, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 3ba03dde4215..2ca8f7e06174 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -23,8 +23,8 @@
23#include <linux/leds.h> 23#include <linux/leds.h>
24#include <linux/completion.h> 24#include <linux/completion.h>
25 25
26#include "debug.h"
27#include "common.h" 26#include "common.h"
27#include "debug.h"
28#include "mci.h" 28#include "mci.h"
29#include "dfs.h" 29#include "dfs.h"
30#include "spectral.h" 30#include "spectral.h"
@@ -114,6 +114,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
114#define ATH_TXFIFO_DEPTH 8 114#define ATH_TXFIFO_DEPTH 8
115#define ATH_TX_ERROR 0x01 115#define ATH_TX_ERROR 0x01
116 116
117/* Stop tx traffic 1ms before the GO goes away */
118#define ATH_P2P_PS_STOP_TIME 1000
119
117#define IEEE80211_SEQ_SEQ_SHIFT 4 120#define IEEE80211_SEQ_SEQ_SHIFT 4
118#define IEEE80211_SEQ_MAX 4096 121#define IEEE80211_SEQ_MAX 4096
119#define IEEE80211_WEP_IVLEN 3 122#define IEEE80211_WEP_IVLEN 3
@@ -271,6 +274,7 @@ struct ath_node {
271#ifdef CONFIG_ATH9K_STATION_STATISTICS 274#ifdef CONFIG_ATH9K_STATION_STATISTICS
272 struct ath_rx_rate_stats rx_rate_stats; 275 struct ath_rx_rate_stats rx_rate_stats;
273#endif 276#endif
277 u8 key_idx[4];
274}; 278};
275 279
276struct ath_tx_control { 280struct ath_tx_control {
@@ -366,11 +370,15 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
366/********/ 370/********/
367 371
368struct ath_vif { 372struct ath_vif {
373 struct ieee80211_vif *vif;
369 struct ath_node mcast_node; 374 struct ath_node mcast_node;
370 int av_bslot; 375 int av_bslot;
371 bool primary_sta_vif; 376 bool primary_sta_vif;
372 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */ 377 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
373 struct ath_buf *av_bcbuf; 378 struct ath_buf *av_bcbuf;
379
380 /* P2P Client */
381 struct ieee80211_noa_data noa;
374}; 382};
375 383
376struct ath9k_vif_iter_data { 384struct ath9k_vif_iter_data {
@@ -463,6 +471,8 @@ int ath_update_survey_stats(struct ath_softc *sc);
463void ath_update_survey_nf(struct ath_softc *sc, int channel); 471void ath_update_survey_nf(struct ath_softc *sc, int channel);
464void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type); 472void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
465void ath_ps_full_sleep(unsigned long data); 473void ath_ps_full_sleep(unsigned long data);
474void ath9k_p2p_ps_timer(void *priv);
475void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif);
466 476
467/**********/ 477/**********/
468/* BTCOEX */ 478/* BTCOEX */
@@ -713,6 +723,9 @@ struct ath_softc {
713 struct completion paprd_complete; 723 struct completion paprd_complete;
714 wait_queue_head_t tx_wait; 724 wait_queue_head_t tx_wait;
715 725
726 struct ath_gen_timer *p2p_ps_timer;
727 struct ath_vif *p2p_ps_vif;
728
716 unsigned long driver_data; 729 unsigned long driver_data;
717 730
718 u8 gtt_cnt; 731 u8 gtt_cnt;
@@ -757,6 +770,7 @@ struct ath_softc {
757 struct ath_ant_comb ant_comb; 770 struct ath_ant_comb ant_comb;
758 u8 ant_tx, ant_rx; 771 u8 ant_tx, ant_rx;
759 struct dfs_pattern_detector *dfs_detector; 772 struct dfs_pattern_detector *dfs_detector;
773 u64 dfs_prev_pulse_ts;
760 u32 wow_enabled; 774 u32 wow_enabled;
761 /* relay(fs) channel for spectral scan */ 775 /* relay(fs) channel for spectral scan */
762 struct rchan *rfs_chan_spec_scan; 776 struct rchan *rfs_chan_spec_scan;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index bd9e634879e6..e387f0b2954a 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -537,8 +537,6 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
537 cur_conf->dtim_period = bss_conf->dtim_period; 537 cur_conf->dtim_period = bss_conf->dtim_period;
538 cur_conf->dtim_count = 1; 538 cur_conf->dtim_count = 1;
539 cur_conf->ibss_creator = bss_conf->ibss_creator; 539 cur_conf->ibss_creator = bss_conf->ibss_creator;
540 cur_conf->bmiss_timeout =
541 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
542 540
543 /* 541 /*
544 * It looks like mac80211 may end up using beacon interval of zero in 542 * It looks like mac80211 may end up using beacon interval of zero in
@@ -549,6 +547,9 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
549 if (cur_conf->beacon_interval == 0) 547 if (cur_conf->beacon_interval == 0)
550 cur_conf->beacon_interval = 100; 548 cur_conf->beacon_interval = 100;
551 549
550 cur_conf->bmiss_timeout =
551 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
552
552 /* 553 /*
553 * We don't parse dtim period from mac80211 during the driver 554 * We don't parse dtim period from mac80211 during the driver
554 * initialization as it breaks association with hidden-ssid 555 * initialization as it breaks association with hidden-ssid
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.c b/drivers/net/wireless/ath/ath9k/common-debug.c
new file mode 100644
index 000000000000..3b289f933405
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-debug.c
@@ -0,0 +1,253 @@
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "common.h"
18
19static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
20 size_t count, loff_t *ppos)
21{
22 struct ath_hw *ah = file->private_data;
23 u32 len = 0, size = 6000;
24 char *buf;
25 size_t retval;
26
27 buf = kzalloc(size, GFP_KERNEL);
28 if (buf == NULL)
29 return -ENOMEM;
30
31 len = ah->eep_ops->dump_eeprom(ah, false, buf, len, size);
32
33 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
34 kfree(buf);
35
36 return retval;
37}
38
39static const struct file_operations fops_modal_eeprom = {
40 .read = read_file_modal_eeprom,
41 .open = simple_open,
42 .owner = THIS_MODULE,
43 .llseek = default_llseek,
44};
45
46
47void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
48 struct ath_hw *ah)
49{
50 debugfs_create_file("modal_eeprom", S_IRUSR, debugfs_phy, ah,
51 &fops_modal_eeprom);
52}
53EXPORT_SYMBOL(ath9k_cmn_debug_modal_eeprom);
54
55static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
56 size_t count, loff_t *ppos)
57{
58 struct ath_hw *ah = file->private_data;
59 u32 len = 0, size = 1500;
60 ssize_t retval = 0;
61 char *buf;
62
63 buf = kzalloc(size, GFP_KERNEL);
64 if (!buf)
65 return -ENOMEM;
66
67 len = ah->eep_ops->dump_eeprom(ah, true, buf, len, size);
68
69 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
70 kfree(buf);
71
72 return retval;
73}
74
75static const struct file_operations fops_base_eeprom = {
76 .read = read_file_base_eeprom,
77 .open = simple_open,
78 .owner = THIS_MODULE,
79 .llseek = default_llseek,
80};
81
82void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
83 struct ath_hw *ah)
84{
85 debugfs_create_file("base_eeprom", S_IRUSR, debugfs_phy, ah,
86 &fops_base_eeprom);
87}
88EXPORT_SYMBOL(ath9k_cmn_debug_base_eeprom);
89
90void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats,
91 struct ath_rx_status *rs)
92{
93#define RX_PHY_ERR_INC(c) rxstats->phy_err_stats[c]++
94#define RX_CMN_STAT_INC(c) (rxstats->c++)
95
96 RX_CMN_STAT_INC(rx_pkts_all);
97 rxstats->rx_bytes_all += rs->rs_datalen;
98
99 if (rs->rs_status & ATH9K_RXERR_CRC)
100 RX_CMN_STAT_INC(crc_err);
101 if (rs->rs_status & ATH9K_RXERR_DECRYPT)
102 RX_CMN_STAT_INC(decrypt_crc_err);
103 if (rs->rs_status & ATH9K_RXERR_MIC)
104 RX_CMN_STAT_INC(mic_err);
105 if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
106 RX_CMN_STAT_INC(pre_delim_crc_err);
107 if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
108 RX_CMN_STAT_INC(post_delim_crc_err);
109 if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
110 RX_CMN_STAT_INC(decrypt_busy_err);
111
112 if (rs->rs_status & ATH9K_RXERR_PHY) {
113 RX_CMN_STAT_INC(phy_err);
114 if (rs->rs_phyerr < ATH9K_PHYERR_MAX)
115 RX_PHY_ERR_INC(rs->rs_phyerr);
116 }
117
118#undef RX_CMN_STAT_INC
119#undef RX_PHY_ERR_INC
120}
121EXPORT_SYMBOL(ath9k_cmn_debug_stat_rx);
122
123static ssize_t read_file_recv(struct file *file, char __user *user_buf,
124 size_t count, loff_t *ppos)
125{
126#define RXS_ERR(s, e) \
127 do { \
128 len += scnprintf(buf + len, size - len, \
129 "%18s : %10u\n", s, \
130 rxstats->e); \
131 } while (0)
132
133 struct ath_rx_stats *rxstats = file->private_data;
134 char *buf;
135 unsigned int len = 0, size = 1600;
136 ssize_t retval = 0;
137
138 buf = kzalloc(size, GFP_KERNEL);
139 if (buf == NULL)
140 return -ENOMEM;
141
142 RXS_ERR("PKTS-ALL", rx_pkts_all);
143 RXS_ERR("BYTES-ALL", rx_bytes_all);
144 RXS_ERR("BEACONS", rx_beacons);
145 RXS_ERR("FRAGS", rx_frags);
146 RXS_ERR("SPECTRAL", rx_spectral);
147
148 RXS_ERR("CRC ERR", crc_err);
149 RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
150 RXS_ERR("PHY ERR", phy_err);
151 RXS_ERR("MIC ERR", mic_err);
152 RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
153 RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
154 RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
155 RXS_ERR("LENGTH-ERR", rx_len_err);
156 RXS_ERR("OOM-ERR", rx_oom_err);
157 RXS_ERR("RATE-ERR", rx_rate_err);
158 RXS_ERR("TOO-MANY-FRAGS", rx_too_many_frags_err);
159
160 if (len > size)
161 len = size;
162
163 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
164 kfree(buf);
165
166 return retval;
167
168#undef RXS_ERR
169}
170
171static const struct file_operations fops_recv = {
172 .read = read_file_recv,
173 .open = simple_open,
174 .owner = THIS_MODULE,
175 .llseek = default_llseek,
176};
177
178void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
179 struct ath_rx_stats *rxstats)
180{
181 debugfs_create_file("recv", S_IRUSR, debugfs_phy, rxstats,
182 &fops_recv);
183}
184EXPORT_SYMBOL(ath9k_cmn_debug_recv);
185
186static ssize_t read_file_phy_err(struct file *file, char __user *user_buf,
187 size_t count, loff_t *ppos)
188{
189#define PHY_ERR(s, p) \
190 len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
191 rxstats->phy_err_stats[p]);
192
193 struct ath_rx_stats *rxstats = file->private_data;
194 char *buf;
195 unsigned int len = 0, size = 1600;
196 ssize_t retval = 0;
197
198 buf = kzalloc(size, GFP_KERNEL);
199 if (buf == NULL)
200 return -ENOMEM;
201
202 PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
203 PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
204 PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
205 PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
206 PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
207 PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
208 PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
209 PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
210 PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
211 PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
212 PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
213 PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
214 PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
215 PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
216 PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
217 PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
218 PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
219 PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
220 PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
221 PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
222 PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
223 PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
224 PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
225 PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
226 PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
227 PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
228
229 if (len > size)
230 len = size;
231
232 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
233 kfree(buf);
234
235 return retval;
236
237#undef PHY_ERR
238}
239
240static const struct file_operations fops_phy_err = {
241 .read = read_file_phy_err,
242 .open = simple_open,
243 .owner = THIS_MODULE,
244 .llseek = default_llseek,
245};
246
247void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
248 struct ath_rx_stats *rxstats)
249{
250 debugfs_create_file("phy_err", S_IRUSR, debugfs_phy, rxstats,
251 &fops_phy_err);
252}
253EXPORT_SYMBOL(ath9k_cmn_debug_phy_err);
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.h b/drivers/net/wireless/ath/ath9k/common-debug.h
new file mode 100644
index 000000000000..7c9788490f7f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-debug.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17
18
19/**
20 * struct ath_rx_stats - RX Statistics
21 * @rx_pkts_all: No. of total frames received, including ones that
22 may have had errors.
23 * @rx_bytes_all: No. of total bytes received, including ones that
24 may have had errors.
25 * @crc_err: No. of frames with incorrect CRC value
26 * @decrypt_crc_err: No. of frames whose CRC check failed after
27 decryption process completed
28 * @phy_err: No. of frames whose reception failed because the PHY
29 encountered an error
30 * @mic_err: No. of frames with incorrect TKIP MIC verification failure
31 * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
32 * @post_delim_crc_err: Post-Frame delimiter CRC error detections
33 * @decrypt_busy_err: Decryption interruptions counter
34 * @phy_err_stats: Individual PHY error statistics
35 * @rx_len_err: No. of frames discarded due to bad length.
36 * @rx_oom_err: No. of frames dropped due to OOM issues.
37 * @rx_rate_err: No. of frames dropped due to rate errors.
38 * @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
39 * @rx_beacons: No. of beacons received.
40 * @rx_frags: No. of rx-fragements received.
41 * @rx_spectral: No of spectral packets received.
42 */
43struct ath_rx_stats {
44 u32 rx_pkts_all;
45 u32 rx_bytes_all;
46 u32 crc_err;
47 u32 decrypt_crc_err;
48 u32 phy_err;
49 u32 mic_err;
50 u32 pre_delim_crc_err;
51 u32 post_delim_crc_err;
52 u32 decrypt_busy_err;
53 u32 phy_err_stats[ATH9K_PHYERR_MAX];
54 u32 rx_len_err;
55 u32 rx_oom_err;
56 u32 rx_rate_err;
57 u32 rx_too_many_frags_err;
58 u32 rx_beacons;
59 u32 rx_frags;
60 u32 rx_spectral;
61};
62
63void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
64 struct ath_hw *ah);
65void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
66 struct ath_hw *ah);
67void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats,
68 struct ath_rx_status *rs);
69void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
70 struct ath_rx_stats *rxstats);
71void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
72 struct ath_rx_stats *rxstats);
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index ca38116838f0..ffc454b18637 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -23,6 +23,7 @@
23 23
24#include "common-init.h" 24#include "common-init.h"
25#include "common-beacon.h" 25#include "common-beacon.h"
26#include "common-debug.h"
26 27
27/* Common header for Atheros 802.11n base driver cores */ 28/* Common header for Atheros 802.11n base driver cores */
28 29
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 780ff1bee6f6..6cc42be48d4e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -948,151 +948,11 @@ static const struct file_operations fops_reset = {
948 .llseek = default_llseek, 948 .llseek = default_llseek,
949}; 949};
950 950
951static ssize_t read_file_recv(struct file *file, char __user *user_buf,
952 size_t count, loff_t *ppos)
953{
954#define RXS_ERR(s, e) \
955 do { \
956 len += scnprintf(buf + len, size - len, \
957 "%18s : %10u\n", s, \
958 sc->debug.stats.rxstats.e);\
959 } while (0)
960
961 struct ath_softc *sc = file->private_data;
962 char *buf;
963 unsigned int len = 0, size = 1600;
964 ssize_t retval = 0;
965
966 buf = kzalloc(size, GFP_KERNEL);
967 if (buf == NULL)
968 return -ENOMEM;
969
970 RXS_ERR("PKTS-ALL", rx_pkts_all);
971 RXS_ERR("BYTES-ALL", rx_bytes_all);
972 RXS_ERR("BEACONS", rx_beacons);
973 RXS_ERR("FRAGS", rx_frags);
974 RXS_ERR("SPECTRAL", rx_spectral);
975
976 RXS_ERR("CRC ERR", crc_err);
977 RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
978 RXS_ERR("PHY ERR", phy_err);
979 RXS_ERR("MIC ERR", mic_err);
980 RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
981 RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
982 RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
983 RXS_ERR("LENGTH-ERR", rx_len_err);
984 RXS_ERR("OOM-ERR", rx_oom_err);
985 RXS_ERR("RATE-ERR", rx_rate_err);
986 RXS_ERR("TOO-MANY-FRAGS", rx_too_many_frags_err);
987
988 if (len > size)
989 len = size;
990
991 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
992 kfree(buf);
993
994 return retval;
995
996#undef RXS_ERR
997}
998
999void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) 951void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
1000{ 952{
1001#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++ 953 ath9k_cmn_debug_stat_rx(&sc->debug.stats.rxstats, rs);
1002
1003 RX_STAT_INC(rx_pkts_all);
1004 sc->debug.stats.rxstats.rx_bytes_all += rs->rs_datalen;
1005
1006 if (rs->rs_status & ATH9K_RXERR_CRC)
1007 RX_STAT_INC(crc_err);
1008 if (rs->rs_status & ATH9K_RXERR_DECRYPT)
1009 RX_STAT_INC(decrypt_crc_err);
1010 if (rs->rs_status & ATH9K_RXERR_MIC)
1011 RX_STAT_INC(mic_err);
1012 if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
1013 RX_STAT_INC(pre_delim_crc_err);
1014 if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
1015 RX_STAT_INC(post_delim_crc_err);
1016 if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
1017 RX_STAT_INC(decrypt_busy_err);
1018
1019 if (rs->rs_status & ATH9K_RXERR_PHY) {
1020 RX_STAT_INC(phy_err);
1021 if (rs->rs_phyerr < ATH9K_PHYERR_MAX)
1022 RX_PHY_ERR_INC(rs->rs_phyerr);
1023 }
1024
1025#undef RX_PHY_ERR_INC
1026} 954}
1027 955
1028static const struct file_operations fops_recv = {
1029 .read = read_file_recv,
1030 .open = simple_open,
1031 .owner = THIS_MODULE,
1032 .llseek = default_llseek,
1033};
1034
1035static ssize_t read_file_phy_err(struct file *file, char __user *user_buf,
1036 size_t count, loff_t *ppos)
1037{
1038#define PHY_ERR(s, p) \
1039 len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
1040 sc->debug.stats.rxstats.phy_err_stats[p]);
1041
1042 struct ath_softc *sc = file->private_data;
1043 char *buf;
1044 unsigned int len = 0, size = 1600;
1045 ssize_t retval = 0;
1046
1047 buf = kzalloc(size, GFP_KERNEL);
1048 if (buf == NULL)
1049 return -ENOMEM;
1050
1051 PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
1052 PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
1053 PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
1054 PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
1055 PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
1056 PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
1057 PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
1058 PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
1059 PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
1060 PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
1061 PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
1062 PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
1063 PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
1064 PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
1065 PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
1066 PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
1067 PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
1068 PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
1069 PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
1070 PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
1071 PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
1072 PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
1073 PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
1074 PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
1075 PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
1076 PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
1077
1078 if (len > size)
1079 len = size;
1080
1081 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1082 kfree(buf);
1083
1084 return retval;
1085
1086#undef PHY_ERR
1087}
1088
1089static const struct file_operations fops_phy_err = {
1090 .read = read_file_phy_err,
1091 .open = simple_open,
1092 .owner = THIS_MODULE,
1093 .llseek = default_llseek,
1094};
1095
1096static ssize_t read_file_regidx(struct file *file, char __user *user_buf, 956static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
1097 size_t count, loff_t *ppos) 957 size_t count, loff_t *ppos)
1098{ 958{
@@ -1268,62 +1128,6 @@ static const struct file_operations fops_dump_nfcal = {
1268 .llseek = default_llseek, 1128 .llseek = default_llseek,
1269}; 1129};
1270 1130
1271static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
1272 size_t count, loff_t *ppos)
1273{
1274 struct ath_softc *sc = file->private_data;
1275 struct ath_hw *ah = sc->sc_ah;
1276 u32 len = 0, size = 1500;
1277 ssize_t retval = 0;
1278 char *buf;
1279
1280 buf = kzalloc(size, GFP_KERNEL);
1281 if (!buf)
1282 return -ENOMEM;
1283
1284 len = ah->eep_ops->dump_eeprom(ah, true, buf, len, size);
1285
1286 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1287 kfree(buf);
1288
1289 return retval;
1290}
1291
1292static const struct file_operations fops_base_eeprom = {
1293 .read = read_file_base_eeprom,
1294 .open = simple_open,
1295 .owner = THIS_MODULE,
1296 .llseek = default_llseek,
1297};
1298
1299static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
1300 size_t count, loff_t *ppos)
1301{
1302 struct ath_softc *sc = file->private_data;
1303 struct ath_hw *ah = sc->sc_ah;
1304 u32 len = 0, size = 6000;
1305 char *buf;
1306 size_t retval;
1307
1308 buf = kzalloc(size, GFP_KERNEL);
1309 if (buf == NULL)
1310 return -ENOMEM;
1311
1312 len = ah->eep_ops->dump_eeprom(ah, false, buf, len, size);
1313
1314 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1315 kfree(buf);
1316
1317 return retval;
1318}
1319
1320static const struct file_operations fops_modal_eeprom = {
1321 .read = read_file_modal_eeprom,
1322 .open = simple_open,
1323 .owner = THIS_MODULE,
1324 .llseek = default_llseek,
1325};
1326
1327#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 1131#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
1328static ssize_t read_file_btcoex(struct file *file, char __user *user_buf, 1132static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
1329 size_t count, loff_t *ppos) 1133 size_t count, loff_t *ppos)
@@ -1524,10 +1328,10 @@ int ath9k_init_debug(struct ath_hw *ah)
1524 &fops_misc); 1328 &fops_misc);
1525 debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc, 1329 debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc,
1526 &fops_reset); 1330 &fops_reset);
1527 debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc, 1331
1528 &fops_recv); 1332 ath9k_cmn_debug_recv(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
1529 debugfs_create_file("phy_err", S_IRUSR, sc->debug.debugfs_phy, sc, 1333 ath9k_cmn_debug_phy_err(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
1530 &fops_phy_err); 1334
1531 debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy, 1335 debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
1532 &ah->rxchainmask); 1336 &ah->rxchainmask);
1533 debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy, 1337 debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
@@ -1547,10 +1351,10 @@ int ath9k_init_debug(struct ath_hw *ah)
1547 &fops_regdump); 1351 &fops_regdump);
1548 debugfs_create_file("dump_nfcal", S_IRUSR, sc->debug.debugfs_phy, sc, 1352 debugfs_create_file("dump_nfcal", S_IRUSR, sc->debug.debugfs_phy, sc,
1549 &fops_dump_nfcal); 1353 &fops_dump_nfcal);
1550 debugfs_create_file("base_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc, 1354
1551 &fops_base_eeprom); 1355 ath9k_cmn_debug_base_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
1552 debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc, 1356 ath9k_cmn_debug_modal_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
1553 &fops_modal_eeprom); 1357
1554 debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR, 1358 debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
1555 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask); 1359 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
1556 debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR, 1360 debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 559a68c2709c..53ae15bd0c9d 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -221,50 +221,6 @@ struct ath_rx_rate_stats {
221 } cck_stats[4]; 221 } cck_stats[4];
222}; 222};
223 223
224/**
225 * struct ath_rx_stats - RX Statistics
226 * @rx_pkts_all: No. of total frames received, including ones that
227 may have had errors.
228 * @rx_bytes_all: No. of total bytes received, including ones that
229 may have had errors.
230 * @crc_err: No. of frames with incorrect CRC value
231 * @decrypt_crc_err: No. of frames whose CRC check failed after
232 decryption process completed
233 * @phy_err: No. of frames whose reception failed because the PHY
234 encountered an error
235 * @mic_err: No. of frames with incorrect TKIP MIC verification failure
236 * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
237 * @post_delim_crc_err: Post-Frame delimiter CRC error detections
238 * @decrypt_busy_err: Decryption interruptions counter
239 * @phy_err_stats: Individual PHY error statistics
240 * @rx_len_err: No. of frames discarded due to bad length.
241 * @rx_oom_err: No. of frames dropped due to OOM issues.
242 * @rx_rate_err: No. of frames dropped due to rate errors.
243 * @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
244 * @rx_beacons: No. of beacons received.
245 * @rx_frags: No. of rx-fragements received.
246 * @rx_spectral: No of spectral packets received.
247 */
248struct ath_rx_stats {
249 u32 rx_pkts_all;
250 u32 rx_bytes_all;
251 u32 crc_err;
252 u32 decrypt_crc_err;
253 u32 phy_err;
254 u32 mic_err;
255 u32 pre_delim_crc_err;
256 u32 post_delim_crc_err;
257 u32 decrypt_busy_err;
258 u32 phy_err_stats[ATH9K_PHYERR_MAX];
259 u32 rx_len_err;
260 u32 rx_oom_err;
261 u32 rx_rate_err;
262 u32 rx_too_many_frags_err;
263 u32 rx_beacons;
264 u32 rx_frags;
265 u32 rx_spectral;
266};
267
268#define ANT_MAIN 0 224#define ANT_MAIN 0
269#define ANT_ALT 1 225#define ANT_ALT 1
270 226
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 857bb28b3894..726271c7c330 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -178,12 +178,12 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
178 pe.ts = mactime; 178 pe.ts = mactime;
179 if (ath9k_postprocess_radar_event(sc, &ard, &pe)) { 179 if (ath9k_postprocess_radar_event(sc, &ard, &pe)) {
180 struct dfs_pattern_detector *pd = sc->dfs_detector; 180 struct dfs_pattern_detector *pd = sc->dfs_detector;
181 static u64 last_ts;
182 ath_dbg(common, DFS, 181 ath_dbg(common, DFS,
183 "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, " 182 "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
184 "width=%d, rssi=%d, delta_ts=%llu\n", 183 "width=%d, rssi=%d, delta_ts=%llu\n",
185 pe.freq, pe.ts, pe.width, pe.rssi, pe.ts-last_ts); 184 pe.freq, pe.ts, pe.width, pe.rssi,
186 last_ts = pe.ts; 185 pe.ts - sc->dfs_prev_pulse_ts);
186 sc->dfs_prev_pulse_ts = pe.ts;
187 DFS_STAT_INC(sc, pulses_processed); 187 DFS_STAT_INC(sc, pulses_processed);
188 if (pd != NULL && pd->add_pulse(pd, &pe)) { 188 if (pd != NULL && pd->add_pulse(pd, &pe)) {
189 DFS_STAT_INC(sc, radar_detected); 189 DFS_STAT_INC(sc, radar_detected);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index dab1f0cab993..09a5d72f3ff5 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -325,14 +325,14 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
325 325
326#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++) 326#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
327#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a) 327#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
328#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++) 328#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++)
329#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c += a) 329#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a)
330#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++ 330#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++
331 331
332#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++) 332#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
333 333
334void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv, 334void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
335 struct ath_htc_rx_status *rxs); 335 struct ath_rx_status *rs);
336 336
337struct ath_tx_stats { 337struct ath_tx_stats {
338 u32 buf_queued; 338 u32 buf_queued;
@@ -345,25 +345,18 @@ struct ath_tx_stats {
345 u32 queue_stats[IEEE80211_NUM_ACS]; 345 u32 queue_stats[IEEE80211_NUM_ACS];
346}; 346};
347 347
348struct ath_rx_stats { 348struct ath_skbrx_stats {
349 u32 skb_allocated; 349 u32 skb_allocated;
350 u32 skb_completed; 350 u32 skb_completed;
351 u32 skb_completed_bytes; 351 u32 skb_completed_bytes;
352 u32 skb_dropped; 352 u32 skb_dropped;
353 u32 err_crc;
354 u32 err_decrypt_crc;
355 u32 err_mic;
356 u32 err_pre_delim;
357 u32 err_post_delim;
358 u32 err_decrypt_busy;
359 u32 err_phy;
360 u32 err_phy_stats[ATH9K_PHYERR_MAX];
361}; 353};
362 354
363struct ath9k_debug { 355struct ath9k_debug {
364 struct dentry *debugfs_phy; 356 struct dentry *debugfs_phy;
365 struct ath_tx_stats tx_stats; 357 struct ath_tx_stats tx_stats;
366 struct ath_rx_stats rx_stats; 358 struct ath_rx_stats rx_stats;
359 struct ath_skbrx_stats skbrx_stats;
367}; 360};
368 361
369void ath9k_htc_get_et_strings(struct ieee80211_hw *hw, 362void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
@@ -385,7 +378,7 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
385#define TX_QSTAT_INC(c) do { } while (0) 378#define TX_QSTAT_INC(c) do { } while (0)
386 379
387static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv, 380static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
388 struct ath_htc_rx_status *rxs) 381 struct ath_rx_status *rs)
389{ 382{
390} 383}
391 384
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index fb071ee4fcfb..8b529e4b8ac4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -243,39 +243,14 @@ static const struct file_operations fops_xmit = {
243}; 243};
244 244
245void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv, 245void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
246 struct ath_htc_rx_status *rxs) 246 struct ath_rx_status *rs)
247{ 247{
248#define RX_PHY_ERR_INC(c) priv->debug.rx_stats.err_phy_stats[c]++ 248 ath9k_cmn_debug_stat_rx(&priv->debug.rx_stats, rs);
249
250 if (rxs->rs_status & ATH9K_RXERR_CRC)
251 priv->debug.rx_stats.err_crc++;
252 if (rxs->rs_status & ATH9K_RXERR_DECRYPT)
253 priv->debug.rx_stats.err_decrypt_crc++;
254 if (rxs->rs_status & ATH9K_RXERR_MIC)
255 priv->debug.rx_stats.err_mic++;
256 if (rxs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
257 priv->debug.rx_stats.err_pre_delim++;
258 if (rxs->rs_status & ATH9K_RX_DELIM_CRC_POST)
259 priv->debug.rx_stats.err_post_delim++;
260 if (rxs->rs_status & ATH9K_RX_DECRYPT_BUSY)
261 priv->debug.rx_stats.err_decrypt_busy++;
262
263 if (rxs->rs_status & ATH9K_RXERR_PHY) {
264 priv->debug.rx_stats.err_phy++;
265 if (rxs->rs_phyerr < ATH9K_PHYERR_MAX)
266 RX_PHY_ERR_INC(rxs->rs_phyerr);
267 }
268
269#undef RX_PHY_ERR_INC
270} 249}
271 250
272static ssize_t read_file_recv(struct file *file, char __user *user_buf, 251static ssize_t read_file_skb_rx(struct file *file, char __user *user_buf,
273 size_t count, loff_t *ppos) 252 size_t count, loff_t *ppos)
274{ 253{
275#define PHY_ERR(s, p) \
276 len += scnprintf(buf + len, size - len, "%20s : %10u\n", s, \
277 priv->debug.rx_stats.err_phy_stats[p]);
278
279 struct ath9k_htc_priv *priv = file->private_data; 254 struct ath9k_htc_priv *priv = file->private_data;
280 char *buf; 255 char *buf;
281 unsigned int len = 0, size = 1500; 256 unsigned int len = 0, size = 1500;
@@ -287,63 +262,13 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
287 262
288 len += scnprintf(buf + len, size - len, 263 len += scnprintf(buf + len, size - len,
289 "%20s : %10u\n", "SKBs allocated", 264 "%20s : %10u\n", "SKBs allocated",
290 priv->debug.rx_stats.skb_allocated); 265 priv->debug.skbrx_stats.skb_allocated);
291 len += scnprintf(buf + len, size - len, 266 len += scnprintf(buf + len, size - len,
292 "%20s : %10u\n", "SKBs completed", 267 "%20s : %10u\n", "SKBs completed",
293 priv->debug.rx_stats.skb_completed); 268 priv->debug.skbrx_stats.skb_completed);
294 len += scnprintf(buf + len, size - len, 269 len += scnprintf(buf + len, size - len,
295 "%20s : %10u\n", "SKBs Dropped", 270 "%20s : %10u\n", "SKBs Dropped",
296 priv->debug.rx_stats.skb_dropped); 271 priv->debug.skbrx_stats.skb_dropped);
297
298 len += scnprintf(buf + len, size - len,
299 "%20s : %10u\n", "CRC ERR",
300 priv->debug.rx_stats.err_crc);
301 len += scnprintf(buf + len, size - len,
302 "%20s : %10u\n", "DECRYPT CRC ERR",
303 priv->debug.rx_stats.err_decrypt_crc);
304 len += scnprintf(buf + len, size - len,
305 "%20s : %10u\n", "MIC ERR",
306 priv->debug.rx_stats.err_mic);
307 len += scnprintf(buf + len, size - len,
308 "%20s : %10u\n", "PRE-DELIM CRC ERR",
309 priv->debug.rx_stats.err_pre_delim);
310 len += scnprintf(buf + len, size - len,
311 "%20s : %10u\n", "POST-DELIM CRC ERR",
312 priv->debug.rx_stats.err_post_delim);
313 len += scnprintf(buf + len, size - len,
314 "%20s : %10u\n", "DECRYPT BUSY ERR",
315 priv->debug.rx_stats.err_decrypt_busy);
316 len += scnprintf(buf + len, size - len,
317 "%20s : %10u\n", "TOTAL PHY ERR",
318 priv->debug.rx_stats.err_phy);
319
320
321 PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
322 PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
323 PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
324 PHY_ERR("RATE", ATH9K_PHYERR_RATE);
325 PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
326 PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
327 PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
328 PHY_ERR("TOR", ATH9K_PHYERR_TOR);
329 PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
330 PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
331 PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
332 PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
333 PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
334 PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
335 PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
336 PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
337 PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
338 PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
339 PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
340 PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
341 PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
342 PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
343 PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
344 PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
345 PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
346 PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
347 272
348 if (len > size) 273 if (len > size)
349 len = size; 274 len = size;
@@ -352,12 +277,10 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
352 kfree(buf); 277 kfree(buf);
353 278
354 return retval; 279 return retval;
355
356#undef PHY_ERR
357} 280}
358 281
359static const struct file_operations fops_recv = { 282static const struct file_operations fops_skb_rx = {
360 .read = read_file_recv, 283 .read = read_file_skb_rx,
361 .open = simple_open, 284 .open = simple_open,
362 .owner = THIS_MODULE, 285 .owner = THIS_MODULE,
363 .llseek = default_llseek, 286 .llseek = default_llseek,
@@ -486,423 +409,6 @@ static const struct file_operations fops_debug = {
486 .llseek = default_llseek, 409 .llseek = default_llseek,
487}; 410};
488 411
489static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
490 size_t count, loff_t *ppos)
491{
492 struct ath9k_htc_priv *priv = file->private_data;
493 struct ath_common *common = ath9k_hw_common(priv->ah);
494 struct base_eep_header *pBase = NULL;
495 unsigned int len = 0, size = 1500;
496 ssize_t retval = 0;
497 char *buf;
498
499 pBase = ath9k_htc_get_eeprom_base(priv);
500
501 if (pBase == NULL) {
502 ath_err(common, "Unknown EEPROM type\n");
503 return 0;
504 }
505
506 buf = kzalloc(size, GFP_KERNEL);
507 if (buf == NULL)
508 return -ENOMEM;
509
510 len += scnprintf(buf + len, size - len,
511 "%20s : %10d\n", "Major Version",
512 pBase->version >> 12);
513 len += scnprintf(buf + len, size - len,
514 "%20s : %10d\n", "Minor Version",
515 pBase->version & 0xFFF);
516 len += scnprintf(buf + len, size - len,
517 "%20s : %10d\n", "Checksum",
518 pBase->checksum);
519 len += scnprintf(buf + len, size - len,
520 "%20s : %10d\n", "Length",
521 pBase->length);
522 len += scnprintf(buf + len, size - len,
523 "%20s : %10d\n", "RegDomain1",
524 pBase->regDmn[0]);
525 len += scnprintf(buf + len, size - len,
526 "%20s : %10d\n", "RegDomain2",
527 pBase->regDmn[1]);
528 len += scnprintf(buf + len, size - len,
529 "%20s : %10d\n",
530 "TX Mask", pBase->txMask);
531 len += scnprintf(buf + len, size - len,
532 "%20s : %10d\n",
533 "RX Mask", pBase->rxMask);
534 len += scnprintf(buf + len, size - len,
535 "%20s : %10d\n",
536 "Allow 5GHz",
537 !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
538 len += scnprintf(buf + len, size - len,
539 "%20s : %10d\n",
540 "Allow 2GHz",
541 !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
542 len += scnprintf(buf + len, size - len,
543 "%20s : %10d\n",
544 "Disable 2GHz HT20",
545 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
546 len += scnprintf(buf + len, size - len,
547 "%20s : %10d\n",
548 "Disable 2GHz HT40",
549 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
550 len += scnprintf(buf + len, size - len,
551 "%20s : %10d\n",
552 "Disable 5Ghz HT20",
553 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
554 len += scnprintf(buf + len, size - len,
555 "%20s : %10d\n",
556 "Disable 5Ghz HT40",
557 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
558 len += scnprintf(buf + len, size - len,
559 "%20s : %10d\n",
560 "Big Endian",
561 !!(pBase->eepMisc & 0x01));
562 len += scnprintf(buf + len, size - len,
563 "%20s : %10d\n",
564 "Cal Bin Major Ver",
565 (pBase->binBuildNumber >> 24) & 0xFF);
566 len += scnprintf(buf + len, size - len,
567 "%20s : %10d\n",
568 "Cal Bin Minor Ver",
569 (pBase->binBuildNumber >> 16) & 0xFF);
570 len += scnprintf(buf + len, size - len,
571 "%20s : %10d\n",
572 "Cal Bin Build",
573 (pBase->binBuildNumber >> 8) & 0xFF);
574
575 /*
576 * UB91 specific data.
577 */
578 if (AR_SREV_9271(priv->ah)) {
579 struct base_eep_header_4k *pBase4k =
580 &priv->ah->eeprom.map4k.baseEepHeader;
581
582 len += scnprintf(buf + len, size - len,
583 "%20s : %10d\n",
584 "TX Gain type",
585 pBase4k->txGainType);
586 }
587
588 /*
589 * UB95 specific data.
590 */
591 if (priv->ah->hw_version.usbdev == AR9287_USB) {
592 struct base_eep_ar9287_header *pBase9287 =
593 &priv->ah->eeprom.map9287.baseEepHeader;
594
595 len += scnprintf(buf + len, size - len,
596 "%20s : %10ddB\n",
597 "Power Table Offset",
598 pBase9287->pwrTableOffset);
599
600 len += scnprintf(buf + len, size - len,
601 "%20s : %10d\n",
602 "OpenLoop Power Ctrl",
603 pBase9287->openLoopPwrCntl);
604 }
605
606 len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
607 pBase->macAddr);
608 if (len > size)
609 len = size;
610
611 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
612 kfree(buf);
613
614 return retval;
615}
616
617static const struct file_operations fops_base_eeprom = {
618 .read = read_file_base_eeprom,
619 .open = simple_open,
620 .owner = THIS_MODULE,
621 .llseek = default_llseek,
622};
623
624static ssize_t read_4k_modal_eeprom(struct file *file,
625 char __user *user_buf,
626 size_t count, loff_t *ppos)
627{
628#define PR_EEP(_s, _val) \
629 do { \
630 len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
631 _s, (_val)); \
632 } while (0)
633
634 struct ath9k_htc_priv *priv = file->private_data;
635 struct modal_eep_4k_header *pModal = &priv->ah->eeprom.map4k.modalHeader;
636 unsigned int len = 0, size = 2048;
637 ssize_t retval = 0;
638 char *buf;
639
640 buf = kzalloc(size, GFP_KERNEL);
641 if (buf == NULL)
642 return -ENOMEM;
643
644 PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
645 PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
646 PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
647 PR_EEP("Switch Settle", pModal->switchSettling);
648 PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
649 PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
650 PR_EEP("ADC Desired size", pModal->adcDesiredSize);
651 PR_EEP("PGA Desired size", pModal->pgaDesiredSize);
652 PR_EEP("Chain0 xlna Gain", pModal->xlnaGainCh[0]);
653 PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
654 PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
655 PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
656 PR_EEP("CCA Threshold)", pModal->thresh62);
657 PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
658 PR_EEP("xpdGain", pModal->xpdGain);
659 PR_EEP("External PD", pModal->xpd);
660 PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
661 PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
662 PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
663 PR_EEP("O/D Bias Version", pModal->version);
664 PR_EEP("CCK OutputBias", pModal->ob_0);
665 PR_EEP("BPSK OutputBias", pModal->ob_1);
666 PR_EEP("QPSK OutputBias", pModal->ob_2);
667 PR_EEP("16QAM OutputBias", pModal->ob_3);
668 PR_EEP("64QAM OutputBias", pModal->ob_4);
669 PR_EEP("CCK Driver1_Bias", pModal->db1_0);
670 PR_EEP("BPSK Driver1_Bias", pModal->db1_1);
671 PR_EEP("QPSK Driver1_Bias", pModal->db1_2);
672 PR_EEP("16QAM Driver1_Bias", pModal->db1_3);
673 PR_EEP("64QAM Driver1_Bias", pModal->db1_4);
674 PR_EEP("CCK Driver2_Bias", pModal->db2_0);
675 PR_EEP("BPSK Driver2_Bias", pModal->db2_1);
676 PR_EEP("QPSK Driver2_Bias", pModal->db2_2);
677 PR_EEP("16QAM Driver2_Bias", pModal->db2_3);
678 PR_EEP("64QAM Driver2_Bias", pModal->db2_4);
679 PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
680 PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
681 PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
682 PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
683 PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
684 PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
685 PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
686 PR_EEP("Chain0 xatten2Db", pModal->xatten2Db[0]);
687 PR_EEP("Chain0 xatten2Margin", pModal->xatten2Margin[0]);
688 PR_EEP("Ant. Diversity ctl1", pModal->antdiv_ctl1);
689 PR_EEP("Ant. Diversity ctl2", pModal->antdiv_ctl2);
690 PR_EEP("TX Diversity", pModal->tx_diversity);
691
692 if (len > size)
693 len = size;
694
695 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
696 kfree(buf);
697
698 return retval;
699
700#undef PR_EEP
701}
702
703static ssize_t read_def_modal_eeprom(struct file *file,
704 char __user *user_buf,
705 size_t count, loff_t *ppos)
706{
707#define PR_EEP(_s, _val) \
708 do { \
709 if (pBase->opCapFlags & AR5416_OPFLAGS_11G) { \
710 pModal = &priv->ah->eeprom.def.modalHeader[1]; \
711 len += scnprintf(buf + len, size - len, "%20s : %8d%7s", \
712 _s, (_val), "|"); \
713 } \
714 if (pBase->opCapFlags & AR5416_OPFLAGS_11A) { \
715 pModal = &priv->ah->eeprom.def.modalHeader[0]; \
716 len += scnprintf(buf + len, size - len, "%9d\n",\
717 (_val)); \
718 } \
719 } while (0)
720
721 struct ath9k_htc_priv *priv = file->private_data;
722 struct base_eep_header *pBase = &priv->ah->eeprom.def.baseEepHeader;
723 struct modal_eep_header *pModal = NULL;
724 unsigned int len = 0, size = 3500;
725 ssize_t retval = 0;
726 char *buf;
727
728 buf = kzalloc(size, GFP_KERNEL);
729 if (buf == NULL)
730 return -ENOMEM;
731
732 len += scnprintf(buf + len, size - len,
733 "%31s %15s\n", "2G", "5G");
734 len += scnprintf(buf + len, size - len,
735 "%32s %16s\n", "====", "====\n");
736
737 PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
738 PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
739 PR_EEP("Chain2 Ant. Control", pModal->antCtrlChain[2]);
740 PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
741 PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
742 PR_EEP("Chain1 Ant. Gain", pModal->antennaGainCh[1]);
743 PR_EEP("Chain2 Ant. Gain", pModal->antennaGainCh[2]);
744 PR_EEP("Switch Settle", pModal->switchSettling);
745 PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
746 PR_EEP("Chain1 TxRxAtten", pModal->txRxAttenCh[1]);
747 PR_EEP("Chain2 TxRxAtten", pModal->txRxAttenCh[2]);
748 PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
749 PR_EEP("Chain1 RxTxMargin", pModal->rxTxMarginCh[1]);
750 PR_EEP("Chain2 RxTxMargin", pModal->rxTxMarginCh[2]);
751 PR_EEP("ADC Desired size", pModal->adcDesiredSize);
752 PR_EEP("PGA Desired size", pModal->pgaDesiredSize);
753 PR_EEP("Chain0 xlna Gain", pModal->xlnaGainCh[0]);
754 PR_EEP("Chain1 xlna Gain", pModal->xlnaGainCh[1]);
755 PR_EEP("Chain2 xlna Gain", pModal->xlnaGainCh[2]);
756 PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
757 PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
758 PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
759 PR_EEP("CCA Threshold)", pModal->thresh62);
760 PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
761 PR_EEP("Chain1 NF Threshold", pModal->noiseFloorThreshCh[1]);
762 PR_EEP("Chain2 NF Threshold", pModal->noiseFloorThreshCh[2]);
763 PR_EEP("xpdGain", pModal->xpdGain);
764 PR_EEP("External PD", pModal->xpd);
765 PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
766 PR_EEP("Chain1 I Coefficient", pModal->iqCalICh[1]);
767 PR_EEP("Chain2 I Coefficient", pModal->iqCalICh[2]);
768 PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
769 PR_EEP("Chain1 Q Coefficient", pModal->iqCalQCh[1]);
770 PR_EEP("Chain2 Q Coefficient", pModal->iqCalQCh[2]);
771 PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
772 PR_EEP("Chain0 OutputBias", pModal->ob);
773 PR_EEP("Chain0 DriverBias", pModal->db);
774 PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
775 PR_EEP("2chain pwr decrease", pModal->pwrDecreaseFor2Chain);
776 PR_EEP("3chain pwr decrease", pModal->pwrDecreaseFor3Chain);
777 PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
778 PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
779 PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
780 PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
781 PR_EEP("Chain1 bswAtten", pModal->bswAtten[1]);
782 PR_EEP("Chain2 bswAtten", pModal->bswAtten[2]);
783 PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
784 PR_EEP("Chain1 bswMargin", pModal->bswMargin[1]);
785 PR_EEP("Chain2 bswMargin", pModal->bswMargin[2]);
786 PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
787 PR_EEP("Chain0 xatten2Db", pModal->xatten2Db[0]);
788 PR_EEP("Chain1 xatten2Db", pModal->xatten2Db[1]);
789 PR_EEP("Chain2 xatten2Db", pModal->xatten2Db[2]);
790 PR_EEP("Chain0 xatten2Margin", pModal->xatten2Margin[0]);
791 PR_EEP("Chain1 xatten2Margin", pModal->xatten2Margin[1]);
792 PR_EEP("Chain2 xatten2Margin", pModal->xatten2Margin[2]);
793 PR_EEP("Chain1 OutputBias", pModal->ob_ch1);
794 PR_EEP("Chain1 DriverBias", pModal->db_ch1);
795 PR_EEP("LNA Control", pModal->lna_ctl);
796 PR_EEP("XPA Bias Freq0", pModal->xpaBiasLvlFreq[0]);
797 PR_EEP("XPA Bias Freq1", pModal->xpaBiasLvlFreq[1]);
798 PR_EEP("XPA Bias Freq2", pModal->xpaBiasLvlFreq[2]);
799
800 if (len > size)
801 len = size;
802
803 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
804 kfree(buf);
805
806 return retval;
807
808#undef PR_EEP
809}
810
811static ssize_t read_9287_modal_eeprom(struct file *file,
812 char __user *user_buf,
813 size_t count, loff_t *ppos)
814{
815#define PR_EEP(_s, _val) \
816 do { \
817 len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
818 _s, (_val)); \
819 } while (0)
820
821 struct ath9k_htc_priv *priv = file->private_data;
822 struct modal_eep_ar9287_header *pModal = &priv->ah->eeprom.map9287.modalHeader;
823 unsigned int len = 0, size = 3000;
824 ssize_t retval = 0;
825 char *buf;
826
827 buf = kzalloc(size, GFP_KERNEL);
828 if (buf == NULL)
829 return -ENOMEM;
830
831 PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
832 PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
833 PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
834 PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
835 PR_EEP("Chain1 Ant. Gain", pModal->antennaGainCh[1]);
836 PR_EEP("Switch Settle", pModal->switchSettling);
837 PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
838 PR_EEP("Chain1 TxRxAtten", pModal->txRxAttenCh[1]);
839 PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
840 PR_EEP("Chain1 RxTxMargin", pModal->rxTxMarginCh[1]);
841 PR_EEP("ADC Desired size", pModal->adcDesiredSize);
842 PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
843 PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
844 PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
845 PR_EEP("CCA Threshold)", pModal->thresh62);
846 PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
847 PR_EEP("Chain1 NF Threshold", pModal->noiseFloorThreshCh[1]);
848 PR_EEP("xpdGain", pModal->xpdGain);
849 PR_EEP("External PD", pModal->xpd);
850 PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
851 PR_EEP("Chain1 I Coefficient", pModal->iqCalICh[1]);
852 PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
853 PR_EEP("Chain1 Q Coefficient", pModal->iqCalQCh[1]);
854 PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
855 PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
856 PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
857 PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
858 PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
859 PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
860 PR_EEP("Chain1 bswAtten", pModal->bswAtten[1]);
861 PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
862 PR_EEP("Chain1 bswMargin", pModal->bswMargin[1]);
863 PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
864 PR_EEP("AR92x7 Version", pModal->version);
865 PR_EEP("DriverBias1", pModal->db1);
866 PR_EEP("DriverBias2", pModal->db1);
867 PR_EEP("CCK OutputBias", pModal->ob_cck);
868 PR_EEP("PSK OutputBias", pModal->ob_psk);
869 PR_EEP("QAM OutputBias", pModal->ob_qam);
870 PR_EEP("PAL_OFF OutputBias", pModal->ob_pal_off);
871
872 if (len > size)
873 len = size;
874
875 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
876 kfree(buf);
877
878 return retval;
879
880#undef PR_EEP
881}
882
883static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
884 size_t count, loff_t *ppos)
885{
886 struct ath9k_htc_priv *priv = file->private_data;
887
888 if (AR_SREV_9271(priv->ah))
889 return read_4k_modal_eeprom(file, user_buf, count, ppos);
890 else if (priv->ah->hw_version.usbdev == AR9280_USB)
891 return read_def_modal_eeprom(file, user_buf, count, ppos);
892 else if (priv->ah->hw_version.usbdev == AR9287_USB)
893 return read_9287_modal_eeprom(file, user_buf, count, ppos);
894
895 return 0;
896}
897
898static const struct file_operations fops_modal_eeprom = {
899 .read = read_file_modal_eeprom,
900 .open = simple_open,
901 .owner = THIS_MODULE,
902 .llseek = default_llseek,
903};
904
905
906/* Ethtool support for get-stats */ 412/* Ethtool support for get-stats */
907#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO" 413#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
908static const char ath9k_htc_gstrings_stats[][ETH_GSTRING_LEN] = { 414static const char ath9k_htc_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -947,6 +453,8 @@ int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
947 453
948#define STXBASE priv->debug.tx_stats 454#define STXBASE priv->debug.tx_stats
949#define SRXBASE priv->debug.rx_stats 455#define SRXBASE priv->debug.rx_stats
456#define SKBTXBASE priv->debug.tx_stats
457#define SKBRXBASE priv->debug.skbrx_stats
950#define ASTXQ(a) \ 458#define ASTXQ(a) \
951 data[i++] = STXBASE.a[IEEE80211_AC_BE]; \ 459 data[i++] = STXBASE.a[IEEE80211_AC_BE]; \
952 data[i++] = STXBASE.a[IEEE80211_AC_BK]; \ 460 data[i++] = STXBASE.a[IEEE80211_AC_BK]; \
@@ -960,24 +468,24 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
960 struct ath9k_htc_priv *priv = hw->priv; 468 struct ath9k_htc_priv *priv = hw->priv;
961 int i = 0; 469 int i = 0;
962 470
963 data[i++] = STXBASE.skb_success; 471 data[i++] = SKBTXBASE.skb_success;
964 data[i++] = STXBASE.skb_success_bytes; 472 data[i++] = SKBTXBASE.skb_success_bytes;
965 data[i++] = SRXBASE.skb_completed; 473 data[i++] = SKBRXBASE.skb_completed;
966 data[i++] = SRXBASE.skb_completed_bytes; 474 data[i++] = SKBRXBASE.skb_completed_bytes;
967 475
968 ASTXQ(queue_stats); 476 ASTXQ(queue_stats);
969 477
970 data[i++] = SRXBASE.err_crc; 478 data[i++] = SRXBASE.crc_err;
971 data[i++] = SRXBASE.err_decrypt_crc; 479 data[i++] = SRXBASE.decrypt_crc_err;
972 data[i++] = SRXBASE.err_phy; 480 data[i++] = SRXBASE.phy_err;
973 data[i++] = SRXBASE.err_mic; 481 data[i++] = SRXBASE.mic_err;
974 data[i++] = SRXBASE.err_pre_delim; 482 data[i++] = SRXBASE.pre_delim_crc_err;
975 data[i++] = SRXBASE.err_post_delim; 483 data[i++] = SRXBASE.post_delim_crc_err;
976 data[i++] = SRXBASE.err_decrypt_busy; 484 data[i++] = SRXBASE.decrypt_busy_err;
977 485
978 data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_RADAR]; 486 data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_RADAR];
979 data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_OFDM_TIMING]; 487 data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_OFDM_TIMING];
980 data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_CCK_TIMING]; 488 data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_CCK_TIMING];
981 489
982 WARN_ON(i != ATH9K_HTC_SSTATS_LEN); 490 WARN_ON(i != ATH9K_HTC_SSTATS_LEN);
983} 491}
@@ -1001,18 +509,21 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
1001 priv, &fops_tgt_rx_stats); 509 priv, &fops_tgt_rx_stats);
1002 debugfs_create_file("xmit", S_IRUSR, priv->debug.debugfs_phy, 510 debugfs_create_file("xmit", S_IRUSR, priv->debug.debugfs_phy,
1003 priv, &fops_xmit); 511 priv, &fops_xmit);
1004 debugfs_create_file("recv", S_IRUSR, priv->debug.debugfs_phy, 512 debugfs_create_file("skb_rx", S_IRUSR, priv->debug.debugfs_phy,
1005 priv, &fops_recv); 513 priv, &fops_skb_rx);
514
515 ath9k_cmn_debug_recv(priv->debug.debugfs_phy, &priv->debug.rx_stats);
516 ath9k_cmn_debug_phy_err(priv->debug.debugfs_phy, &priv->debug.rx_stats);
517
1006 debugfs_create_file("slot", S_IRUSR, priv->debug.debugfs_phy, 518 debugfs_create_file("slot", S_IRUSR, priv->debug.debugfs_phy,
1007 priv, &fops_slot); 519 priv, &fops_slot);
1008 debugfs_create_file("queue", S_IRUSR, priv->debug.debugfs_phy, 520 debugfs_create_file("queue", S_IRUSR, priv->debug.debugfs_phy,
1009 priv, &fops_queue); 521 priv, &fops_queue);
1010 debugfs_create_file("debug", S_IRUSR | S_IWUSR, priv->debug.debugfs_phy, 522 debugfs_create_file("debug", S_IRUSR | S_IWUSR, priv->debug.debugfs_phy,
1011 priv, &fops_debug); 523 priv, &fops_debug);
1012 debugfs_create_file("base_eeprom", S_IRUSR, priv->debug.debugfs_phy, 524
1013 priv, &fops_base_eeprom); 525 ath9k_cmn_debug_base_eeprom(priv->debug.debugfs_phy, priv->ah);
1014 debugfs_create_file("modal_eeprom", S_IRUSR, priv->debug.debugfs_phy, 526 ath9k_cmn_debug_modal_eeprom(priv->debug.debugfs_phy, priv->ah);
1015 priv, &fops_modal_eeprom);
1016 527
1017 return 0; 528 return 0;
1018} 529}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 289f3d8924b5..bb86eb2ffc95 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -996,8 +996,6 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
996 goto rx_next; 996 goto rx_next;
997 } 997 }
998 998
999 ath9k_htc_err_stat_rx(priv, rxstatus);
1000
1001 /* Get the RX status information */ 999 /* Get the RX status information */
1002 1000
1003 memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 1001 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
@@ -1005,6 +1003,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
1005 /* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER). 1003 /* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER).
1006 * After this, we can drop this part of skb. */ 1004 * After this, we can drop this part of skb. */
1007 rx_status_htc_to_ath(&rx_stats, rxstatus); 1005 rx_status_htc_to_ath(&rx_stats, rxstatus);
1006 ath9k_htc_err_stat_rx(priv, &rx_stats);
1008 rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp); 1007 rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp);
1009 skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE); 1008 skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
1010 1009
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index c8a9dfab1fee..2a8ed8375ec0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -26,7 +26,6 @@
26#include "ar9003_mac.h" 26#include "ar9003_mac.h"
27#include "ar9003_mci.h" 27#include "ar9003_mci.h"
28#include "ar9003_phy.h" 28#include "ar9003_phy.h"
29#include "debug.h"
30#include "ath9k.h" 29#include "ath9k.h"
31 30
32static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); 31static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
@@ -246,6 +245,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
246 return; 245 return;
247 case AR9300_DEVID_AR953X: 246 case AR9300_DEVID_AR953X:
248 ah->hw_version.macVersion = AR_SREV_VERSION_9531; 247 ah->hw_version.macVersion = AR_SREV_VERSION_9531;
248 if (ah->get_mac_revision)
249 ah->hw_version.macRev = ah->get_mac_revision();
249 return; 250 return;
250 } 251 }
251 252
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 36ae6490e554..0246b990fe87 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -61,6 +61,10 @@ static int ath9k_ps_enable;
61module_param_named(ps_enable, ath9k_ps_enable, int, 0444); 61module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
62MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave"); 62MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
63 63
64static int ath9k_use_chanctx;
65module_param_named(use_chanctx, ath9k_use_chanctx, int, 0444);
66MODULE_PARM_DESC(use_chanctx, "Enable channel context for concurrency");
67
64bool is_ath9k_unloaded; 68bool is_ath9k_unloaded;
65 69
66#ifdef CONFIG_MAC80211_LEDS 70#ifdef CONFIG_MAC80211_LEDS
@@ -508,7 +512,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
508 sc->tx99_power = MAX_RATE_POWER + 1; 512 sc->tx99_power = MAX_RATE_POWER + 1;
509 init_waitqueue_head(&sc->tx_wait); 513 init_waitqueue_head(&sc->tx_wait);
510 514
511 if (!pdata) { 515 if (!pdata || pdata->use_eeprom) {
512 ah->ah_flags |= AH_USE_EEPROM; 516 ah->ah_flags |= AH_USE_EEPROM;
513 sc->sc_ah->led_pin = -1; 517 sc->sc_ah->led_pin = -1;
514 } else { 518 } else {
@@ -589,6 +593,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
589 if (ret) 593 if (ret)
590 goto err_btcoex; 594 goto err_btcoex;
591 595
596 sc->p2p_ps_timer = ath_gen_timer_alloc(sc->sc_ah, ath9k_p2p_ps_timer,
597 NULL, sc, AR_FIRST_NDP_TIMER);
598
592 ath9k_cmn_init_crypto(sc->sc_ah); 599 ath9k_cmn_init_crypto(sc->sc_ah);
593 ath9k_init_misc(sc); 600 ath9k_init_misc(sc);
594 ath_fill_led_pin(sc); 601 ath_fill_led_pin(sc);
@@ -643,17 +650,20 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
643} 650}
644 651
645static const struct ieee80211_iface_limit if_limits[] = { 652static const struct ieee80211_iface_limit if_limits[] = {
646 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) | 653 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) },
647 BIT(NL80211_IFTYPE_P2P_CLIENT) |
648 BIT(NL80211_IFTYPE_WDS) },
649 { .max = 8, .types = 654 { .max = 8, .types =
650#ifdef CONFIG_MAC80211_MESH 655#ifdef CONFIG_MAC80211_MESH
651 BIT(NL80211_IFTYPE_MESH_POINT) | 656 BIT(NL80211_IFTYPE_MESH_POINT) |
652#endif 657#endif
653 BIT(NL80211_IFTYPE_AP) | 658 BIT(NL80211_IFTYPE_AP) },
659 { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
654 BIT(NL80211_IFTYPE_P2P_GO) }, 660 BIT(NL80211_IFTYPE_P2P_GO) },
655}; 661};
656 662
663static const struct ieee80211_iface_limit wds_limits[] = {
664 { .max = 2048, .types = BIT(NL80211_IFTYPE_WDS) },
665};
666
657static const struct ieee80211_iface_limit if_dfs_limits[] = { 667static const struct ieee80211_iface_limit if_dfs_limits[] = {
658 { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | 668 { .max = 1, .types = BIT(NL80211_IFTYPE_AP) |
659#ifdef CONFIG_MAC80211_MESH 669#ifdef CONFIG_MAC80211_MESH
@@ -670,6 +680,13 @@ static const struct ieee80211_iface_combination if_comb[] = {
670 .num_different_channels = 1, 680 .num_different_channels = 1,
671 .beacon_int_infra_match = true, 681 .beacon_int_infra_match = true,
672 }, 682 },
683 {
684 .limits = wds_limits,
685 .n_limits = ARRAY_SIZE(wds_limits),
686 .max_interfaces = 2048,
687 .num_different_channels = 1,
688 .beacon_int_infra_match = true,
689 },
673#ifdef CONFIG_ATH9K_DFS_CERTIFIED 690#ifdef CONFIG_ATH9K_DFS_CERTIFIED
674 { 691 {
675 .limits = if_dfs_limits, 692 .limits = if_dfs_limits,
@@ -711,19 +728,23 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
711 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt) 728 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
712 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 729 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
713 730
714 hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; 731 hw->wiphy->features |= (NL80211_FEATURE_ACTIVE_MONITOR |
732 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE);
715 733
716 if (!config_enabled(CONFIG_ATH9K_TX99)) { 734 if (!config_enabled(CONFIG_ATH9K_TX99)) {
717 hw->wiphy->interface_modes = 735 hw->wiphy->interface_modes =
718 BIT(NL80211_IFTYPE_P2P_GO) | 736 BIT(NL80211_IFTYPE_P2P_GO) |
719 BIT(NL80211_IFTYPE_P2P_CLIENT) | 737 BIT(NL80211_IFTYPE_P2P_CLIENT) |
720 BIT(NL80211_IFTYPE_AP) | 738 BIT(NL80211_IFTYPE_AP) |
721 BIT(NL80211_IFTYPE_WDS) |
722 BIT(NL80211_IFTYPE_STATION) | 739 BIT(NL80211_IFTYPE_STATION) |
723 BIT(NL80211_IFTYPE_ADHOC) | 740 BIT(NL80211_IFTYPE_ADHOC) |
724 BIT(NL80211_IFTYPE_MESH_POINT); 741 BIT(NL80211_IFTYPE_MESH_POINT);
725 hw->wiphy->iface_combinations = if_comb; 742 hw->wiphy->iface_combinations = if_comb;
726 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); 743 if (!ath9k_use_chanctx) {
744 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
745 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_WDS);
746 } else
747 hw->wiphy->n_iface_combinations = 1;
727 } 748 }
728 749
729 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 750 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -855,6 +876,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
855{ 876{
856 int i = 0; 877 int i = 0;
857 878
879 if (sc->p2p_ps_timer)
880 ath_gen_timer_free(sc->sc_ah, sc->p2p_ps_timer);
881
858 ath9k_deinit_btcoex(sc); 882 ath9k_deinit_btcoex(sc);
859 883
860 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 884 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 51ce36f108f9..275205ab5f15 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -958,3 +958,25 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
958 return; 958 return;
959} 959}
960EXPORT_SYMBOL(ath9k_hw_set_interrupts); 960EXPORT_SYMBOL(ath9k_hw_set_interrupts);
961
962#define ATH9K_HW_MAX_DCU 10
963#define ATH9K_HW_SLICE_PER_DCU 16
964#define ATH9K_HW_BIT_IN_SLICE 16
965void ath9k_hw_set_tx_filter(struct ath_hw *ah, u8 destidx, bool set)
966{
967 int dcu_idx;
968 u32 filter;
969
970 for (dcu_idx = 0; dcu_idx < 10; dcu_idx++) {
971 filter = SM(set, AR_D_TXBLK_WRITE_COMMAND);
972 filter |= SM(dcu_idx, AR_D_TXBLK_WRITE_DCU);
973 filter |= SM((destidx / ATH9K_HW_SLICE_PER_DCU),
974 AR_D_TXBLK_WRITE_SLICE);
975 filter |= BIT(destidx % ATH9K_HW_BIT_IN_SLICE);
976 ath_dbg(ath9k_hw_common(ah), PS,
977 "DCU%d staid %d set %d txfilter %08x\n",
978 dcu_idx, destidx, set, filter);
979 REG_WRITE(ah, AR_D_TXBLK_BASE, filter);
980 }
981}
982EXPORT_SYMBOL(ath9k_hw_set_tx_filter);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 89df634e81f9..da7686757535 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -729,6 +729,7 @@ void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
729void ath9k_hw_abortpcurecv(struct ath_hw *ah); 729void ath9k_hw_abortpcurecv(struct ath_hw *ah);
730bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset); 730bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
731int ath9k_hw_beaconq_setup(struct ath_hw *ah); 731int ath9k_hw_beaconq_setup(struct ath_hw *ah);
732void ath9k_hw_set_tx_filter(struct ath_hw *ah, u8 destidx, bool set);
732 733
733/* Interrupt Handling */ 734/* Interrupt Handling */
734bool ath9k_hw_intrpend(struct ath_hw *ah); 735bool ath9k_hw_intrpend(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index d69853b848ce..62ac95d6bb9d 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -261,6 +261,8 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
261 sc->gtt_cnt = 0; 261 sc->gtt_cnt = 0;
262 ieee80211_wake_queues(sc->hw); 262 ieee80211_wake_queues(sc->hw);
263 263
264 ath9k_p2p_ps_timer(sc);
265
264 return true; 266 return true;
265} 267}
266 268
@@ -419,6 +421,7 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
419 an->sc = sc; 421 an->sc = sc;
420 an->sta = sta; 422 an->sta = sta;
421 an->vif = vif; 423 an->vif = vif;
424 memset(&an->key_idx, 0, sizeof(an->key_idx));
422 425
423 ath_tx_node_init(sc, an); 426 ath_tx_node_init(sc, an);
424} 427}
@@ -1119,6 +1122,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1119 if (ath9k_uses_beacons(vif->type)) 1122 if (ath9k_uses_beacons(vif->type))
1120 ath9k_beacon_assign_slot(sc, vif); 1123 ath9k_beacon_assign_slot(sc, vif);
1121 1124
1125 avp->vif = vif;
1126
1122 an->sc = sc; 1127 an->sc = sc;
1123 an->sta = NULL; 1128 an->sta = NULL;
1124 an->vif = vif; 1129 an->vif = vif;
@@ -1163,6 +1168,29 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1163 return 0; 1168 return 0;
1164} 1169}
1165 1170
1171static void
1172ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp)
1173{
1174 struct ath_hw *ah = sc->sc_ah;
1175 s32 tsf, target_tsf;
1176
1177 if (!avp || !avp->noa.has_next_tsf)
1178 return;
1179
1180 ath9k_hw_gen_timer_stop(ah, sc->p2p_ps_timer);
1181
1182 tsf = ath9k_hw_gettsf32(sc->sc_ah);
1183
1184 target_tsf = avp->noa.next_tsf;
1185 if (!avp->noa.absent)
1186 target_tsf -= ATH_P2P_PS_STOP_TIME;
1187
1188 if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME)
1189 target_tsf = tsf + ATH_P2P_PS_STOP_TIME;
1190
1191 ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000);
1192}
1193
1166static void ath9k_remove_interface(struct ieee80211_hw *hw, 1194static void ath9k_remove_interface(struct ieee80211_hw *hw,
1167 struct ieee80211_vif *vif) 1195 struct ieee80211_vif *vif)
1168{ 1196{
@@ -1174,6 +1202,13 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1174 1202
1175 mutex_lock(&sc->mutex); 1203 mutex_lock(&sc->mutex);
1176 1204
1205 spin_lock_bh(&sc->sc_pcu_lock);
1206 if (avp == sc->p2p_ps_vif) {
1207 sc->p2p_ps_vif = NULL;
1208 ath9k_update_p2p_ps_timer(sc, NULL);
1209 }
1210 spin_unlock_bh(&sc->sc_pcu_lock);
1211
1177 sc->nvifs--; 1212 sc->nvifs--;
1178 sc->tx99_vif = NULL; 1213 sc->tx99_vif = NULL;
1179 1214
@@ -1427,8 +1462,10 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
1427 return 0; 1462 return 0;
1428 1463
1429 key = ath_key_config(common, vif, sta, &ps_key); 1464 key = ath_key_config(common, vif, sta, &ps_key);
1430 if (key > 0) 1465 if (key > 0) {
1431 an->ps_key = key; 1466 an->ps_key = key;
1467 an->key_idx[0] = key;
1468 }
1432 1469
1433 return 0; 1470 return 0;
1434} 1471}
@@ -1446,6 +1483,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
1446 1483
1447 ath_key_delete(common, &ps_key); 1484 ath_key_delete(common, &ps_key);
1448 an->ps_key = 0; 1485 an->ps_key = 0;
1486 an->key_idx[0] = 0;
1449} 1487}
1450 1488
1451static int ath9k_sta_remove(struct ieee80211_hw *hw, 1489static int ath9k_sta_remove(struct ieee80211_hw *hw,
@@ -1460,6 +1498,19 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
1460 return 0; 1498 return 0;
1461} 1499}
1462 1500
1501static void ath9k_sta_set_tx_filter(struct ath_hw *ah,
1502 struct ath_node *an,
1503 bool set)
1504{
1505 int i;
1506
1507 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
1508 if (!an->key_idx[i])
1509 continue;
1510 ath9k_hw_set_tx_filter(ah, an->key_idx[i], set);
1511 }
1512}
1513
1463static void ath9k_sta_notify(struct ieee80211_hw *hw, 1514static void ath9k_sta_notify(struct ieee80211_hw *hw,
1464 struct ieee80211_vif *vif, 1515 struct ieee80211_vif *vif,
1465 enum sta_notify_cmd cmd, 1516 enum sta_notify_cmd cmd,
@@ -1472,8 +1523,10 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
1472 case STA_NOTIFY_SLEEP: 1523 case STA_NOTIFY_SLEEP:
1473 an->sleeping = true; 1524 an->sleeping = true;
1474 ath_tx_aggr_sleep(sta, sc, an); 1525 ath_tx_aggr_sleep(sta, sc, an);
1526 ath9k_sta_set_tx_filter(sc->sc_ah, an, true);
1475 break; 1527 break;
1476 case STA_NOTIFY_AWAKE: 1528 case STA_NOTIFY_AWAKE:
1529 ath9k_sta_set_tx_filter(sc->sc_ah, an, false);
1477 an->sleeping = false; 1530 an->sleeping = false;
1478 ath_tx_aggr_wakeup(sc, an); 1531 ath_tx_aggr_wakeup(sc, an);
1479 break; 1532 break;
@@ -1529,7 +1582,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1529{ 1582{
1530 struct ath_softc *sc = hw->priv; 1583 struct ath_softc *sc = hw->priv;
1531 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1584 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1532 int ret = 0; 1585 struct ath_node *an = NULL;
1586 int ret = 0, i;
1533 1587
1534 if (ath9k_modparam_nohwcrypt) 1588 if (ath9k_modparam_nohwcrypt)
1535 return -ENOSPC; 1589 return -ENOSPC;
@@ -1551,13 +1605,16 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1551 1605
1552 mutex_lock(&sc->mutex); 1606 mutex_lock(&sc->mutex);
1553 ath9k_ps_wakeup(sc); 1607 ath9k_ps_wakeup(sc);
1554 ath_dbg(common, CONFIG, "Set HW Key\n"); 1608 ath_dbg(common, CONFIG, "Set HW Key %d\n", cmd);
1609 if (sta)
1610 an = (struct ath_node *)sta->drv_priv;
1555 1611
1556 switch (cmd) { 1612 switch (cmd) {
1557 case SET_KEY: 1613 case SET_KEY:
1558 if (sta) 1614 if (sta)
1559 ath9k_del_ps_key(sc, vif, sta); 1615 ath9k_del_ps_key(sc, vif, sta);
1560 1616
1617 key->hw_key_idx = 0;
1561 ret = ath_key_config(common, vif, sta, key); 1618 ret = ath_key_config(common, vif, sta, key);
1562 if (ret >= 0) { 1619 if (ret >= 0) {
1563 key->hw_key_idx = ret; 1620 key->hw_key_idx = ret;
@@ -1570,9 +1627,27 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1570 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; 1627 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1571 ret = 0; 1628 ret = 0;
1572 } 1629 }
1630 if (an && key->hw_key_idx) {
1631 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
1632 if (an->key_idx[i])
1633 continue;
1634 an->key_idx[i] = key->hw_key_idx;
1635 break;
1636 }
1637 WARN_ON(i == ARRAY_SIZE(an->key_idx));
1638 }
1573 break; 1639 break;
1574 case DISABLE_KEY: 1640 case DISABLE_KEY:
1575 ath_key_delete(common, key); 1641 ath_key_delete(common, key);
1642 if (an) {
1643 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
1644 if (an->key_idx[i] != key->hw_key_idx)
1645 continue;
1646 an->key_idx[i] = 0;
1647 break;
1648 }
1649 }
1650 key->hw_key_idx = 0;
1576 break; 1651 break;
1577 default: 1652 default:
1578 ret = -EINVAL; 1653 ret = -EINVAL;
@@ -1636,6 +1711,66 @@ static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1636 ath9k_set_assoc_state(sc, vif); 1711 ath9k_set_assoc_state(sc, vif);
1637} 1712}
1638 1713
1714void ath9k_p2p_ps_timer(void *priv)
1715{
1716 struct ath_softc *sc = priv;
1717 struct ath_vif *avp = sc->p2p_ps_vif;
1718 struct ieee80211_vif *vif;
1719 struct ieee80211_sta *sta;
1720 struct ath_node *an;
1721 u32 tsf;
1722
1723 if (!avp)
1724 return;
1725
1726 tsf = ath9k_hw_gettsf32(sc->sc_ah);
1727 if (!avp->noa.absent)
1728 tsf += ATH_P2P_PS_STOP_TIME;
1729
1730 if (!avp->noa.has_next_tsf ||
1731 avp->noa.next_tsf - tsf > BIT(31))
1732 ieee80211_update_p2p_noa(&avp->noa, tsf);
1733
1734 ath9k_update_p2p_ps_timer(sc, avp);
1735
1736 rcu_read_lock();
1737
1738 vif = avp->vif;
1739 sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
1740 if (!sta)
1741 goto out;
1742
1743 an = (void *) sta->drv_priv;
1744 if (an->sleeping == !!avp->noa.absent)
1745 goto out;
1746
1747 an->sleeping = avp->noa.absent;
1748 if (an->sleeping)
1749 ath_tx_aggr_sleep(sta, sc, an);
1750 else
1751 ath_tx_aggr_wakeup(sc, an);
1752
1753out:
1754 rcu_read_unlock();
1755}
1756
1757void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif)
1758{
1759 struct ath_vif *avp = (void *)vif->drv_priv;
1760 u32 tsf;
1761
1762 if (!sc->p2p_ps_timer)
1763 return;
1764
1765 if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p)
1766 return;
1767
1768 sc->p2p_ps_vif = avp;
1769 tsf = ath9k_hw_gettsf32(sc->sc_ah);
1770 ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf);
1771 ath9k_update_p2p_ps_timer(sc, avp);
1772}
1773
1639static void ath9k_bss_info_changed(struct ieee80211_hw *hw, 1774static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1640 struct ieee80211_vif *vif, 1775 struct ieee80211_vif *vif,
1641 struct ieee80211_bss_conf *bss_conf, 1776 struct ieee80211_bss_conf *bss_conf,
@@ -1650,6 +1785,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1650 struct ath_hw *ah = sc->sc_ah; 1785 struct ath_hw *ah = sc->sc_ah;
1651 struct ath_common *common = ath9k_hw_common(ah); 1786 struct ath_common *common = ath9k_hw_common(ah);
1652 struct ath_vif *avp = (void *)vif->drv_priv; 1787 struct ath_vif *avp = (void *)vif->drv_priv;
1788 unsigned long flags;
1653 int slottime; 1789 int slottime;
1654 1790
1655 ath9k_ps_wakeup(sc); 1791 ath9k_ps_wakeup(sc);
@@ -1710,6 +1846,15 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1710 } 1846 }
1711 } 1847 }
1712 1848
1849 if (changed & BSS_CHANGED_P2P_PS) {
1850 spin_lock_bh(&sc->sc_pcu_lock);
1851 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1852 if (!(sc->ps_flags & PS_BEACON_SYNC))
1853 ath9k_update_p2p_ps(sc, vif);
1854 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1855 spin_unlock_bh(&sc->sc_pcu_lock);
1856 }
1857
1713 if (changed & CHECK_ANI) 1858 if (changed & CHECK_ANI)
1714 ath_check_ani(sc); 1859 ath_check_ani(sc);
1715 1860
@@ -1883,7 +2028,8 @@ static bool ath9k_has_tx_pending(struct ath_softc *sc)
1883 return !!npend; 2028 return !!npend;
1884} 2029}
1885 2030
1886static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 2031static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2032 u32 queues, bool drop)
1887{ 2033{
1888 struct ath_softc *sc = hw->priv; 2034 struct ath_softc *sc = hw->priv;
1889 struct ath_hw *ah = sc->sc_ah; 2035 struct ath_hw *ah = sc->sc_ah;
@@ -2084,14 +2230,6 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2084 clear_bit(ATH_OP_SCANNING, &common->op_flags); 2230 clear_bit(ATH_OP_SCANNING, &common->op_flags);
2085} 2231}
2086 2232
2087static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
2088 struct ieee80211_vif *vif,
2089 struct cfg80211_chan_def *chandef)
2090{
2091 /* depend on vif->csa_active only */
2092 return;
2093}
2094
2095struct ieee80211_ops ath9k_ops = { 2233struct ieee80211_ops ath9k_ops = {
2096 .tx = ath9k_tx, 2234 .tx = ath9k_tx,
2097 .start = ath9k_start, 2235 .start = ath9k_start,
@@ -2139,5 +2277,4 @@ struct ieee80211_ops ath9k_ops = {
2139#endif 2277#endif
2140 .sw_scan_start = ath9k_sw_scan_start, 2278 .sw_scan_start = ath9k_sw_scan_start,
2141 .sw_scan_complete = ath9k_sw_scan_complete, 2279 .sw_scan_complete = ath9k_sw_scan_complete,
2142 .channel_switch_beacon = ath9k_channel_switch_beacon,
2143}; 2280};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 914dbc6b1720..4dec09e565ed 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -686,7 +686,7 @@ static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
686 struct ath_softc *sc = (struct ath_softc *) common->priv; 686 struct ath_softc *sc = (struct ath_softc *) common->priv;
687 struct ath9k_platform_data *pdata = sc->dev->platform_data; 687 struct ath9k_platform_data *pdata = sc->dev->platform_data;
688 688
689 if (pdata) { 689 if (pdata && !pdata->use_eeprom) {
690 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) { 690 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
691 ath_err(common, 691 ath_err(common,
692 "%s: eeprom read failed, offset %08x is out of range\n", 692 "%s: eeprom read failed, offset %08x is out of range\n",
@@ -914,6 +914,7 @@ static int ath_pci_suspend(struct device *device)
914 */ 914 */
915 ath9k_stop_btcoex(sc); 915 ath9k_stop_btcoex(sc);
916 ath9k_hw_disable(sc->sc_ah); 916 ath9k_hw_disable(sc->sc_ah);
917 del_timer_sync(&sc->sleep_timer);
917 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); 918 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
918 919
919 return 0; 920 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 19df969ec909..9105a92364f7 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -34,7 +34,8 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
34 * buffer (or rx fifo). This can incorrectly acknowledge packets 34 * buffer (or rx fifo). This can incorrectly acknowledge packets
35 * to a sender if last desc is self-linked. 35 * to a sender if last desc is self-linked.
36 */ 36 */
37static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf) 37static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf,
38 bool flush)
38{ 39{
39 struct ath_hw *ah = sc->sc_ah; 40 struct ath_hw *ah = sc->sc_ah;
40 struct ath_common *common = ath9k_hw_common(ah); 41 struct ath_common *common = ath9k_hw_common(ah);
@@ -59,18 +60,19 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf)
59 common->rx_bufsize, 60 common->rx_bufsize,
60 0); 61 0);
61 62
62 if (sc->rx.rxlink == NULL) 63 if (sc->rx.rxlink)
63 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
64 else
65 *sc->rx.rxlink = bf->bf_daddr; 64 *sc->rx.rxlink = bf->bf_daddr;
65 else if (!flush)
66 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
66 67
67 sc->rx.rxlink = &ds->ds_link; 68 sc->rx.rxlink = &ds->ds_link;
68} 69}
69 70
70static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf) 71static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf,
72 bool flush)
71{ 73{
72 if (sc->rx.buf_hold) 74 if (sc->rx.buf_hold)
73 ath_rx_buf_link(sc, sc->rx.buf_hold); 75 ath_rx_buf_link(sc, sc->rx.buf_hold, flush);
74 76
75 sc->rx.buf_hold = bf; 77 sc->rx.buf_hold = bf;
76} 78}
@@ -442,7 +444,7 @@ int ath_startrecv(struct ath_softc *sc)
442 sc->rx.buf_hold = NULL; 444 sc->rx.buf_hold = NULL;
443 sc->rx.rxlink = NULL; 445 sc->rx.rxlink = NULL;
444 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 446 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
445 ath_rx_buf_link(sc, bf); 447 ath_rx_buf_link(sc, bf, false);
446 } 448 }
447 449
448 /* We could have deleted elements so the list may be empty now */ 450 /* We could have deleted elements so the list may be empty now */
@@ -538,7 +540,10 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
538 sc->ps_flags &= ~PS_BEACON_SYNC; 540 sc->ps_flags &= ~PS_BEACON_SYNC;
539 ath_dbg(common, PS, 541 ath_dbg(common, PS,
540 "Reconfigure beacon timers based on synchronized timestamp\n"); 542 "Reconfigure beacon timers based on synchronized timestamp\n");
541 ath9k_set_beacon(sc); 543 if (!(WARN_ON_ONCE(sc->cur_beacon_conf.beacon_interval == 0)))
544 ath9k_set_beacon(sc);
545 if (sc->p2p_ps_vif)
546 ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif);
542 } 547 }
543 548
544 if (ath_beacon_dtim_pending_cab(skb)) { 549 if (ath_beacon_dtim_pending_cab(skb)) {
@@ -1115,12 +1120,12 @@ requeue_drop_frag:
1115requeue: 1120requeue:
1116 list_add_tail(&bf->list, &sc->rx.rxbuf); 1121 list_add_tail(&bf->list, &sc->rx.rxbuf);
1117 1122
1118 if (edma) { 1123 if (!edma) {
1119 ath_rx_edma_buf_link(sc, qtype); 1124 ath_rx_buf_relink(sc, bf, flush);
1120 } else {
1121 ath_rx_buf_relink(sc, bf);
1122 if (!flush) 1125 if (!flush)
1123 ath9k_hw_rxena(ah); 1126 ath9k_hw_rxena(ah);
1127 } else if (!flush) {
1128 ath_rx_edma_buf_link(sc, qtype);
1124 } 1129 }
1125 1130
1126 if (!budget--) 1131 if (!budget--)
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index b1fd3fa84983..f1bbce3f7774 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -505,9 +505,6 @@
505#define AR_D_QCUMASK 0x000003FF 505#define AR_D_QCUMASK 0x000003FF
506#define AR_D_QCUMASK_RESV0 0xFFFFFC00 506#define AR_D_QCUMASK_RESV0 0xFFFFFC00
507 507
508#define AR_D_TXBLK_CMD 0x1038
509#define AR_D_TXBLK_DATA(i) (AR_D_TXBLK_CMD+(i))
510
511#define AR_D0_LCL_IFS 0x1040 508#define AR_D0_LCL_IFS 0x1040
512#define AR_D1_LCL_IFS 0x1044 509#define AR_D1_LCL_IFS 0x1044
513#define AR_D2_LCL_IFS 0x1048 510#define AR_D2_LCL_IFS 0x1048
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 4c8cdb097b65..f8ded84b7be8 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1707,7 +1707,9 @@ found:
1707 return 0; 1707 return 0;
1708} 1708}
1709 1709
1710static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 1710static void carl9170_op_flush(struct ieee80211_hw *hw,
1711 struct ieee80211_vif *vif,
1712 u32 queues, bool drop)
1711{ 1713{
1712 struct ar9170 *ar = hw->priv; 1714 struct ar9170 *ar = hw->priv;
1713 unsigned int vid; 1715 unsigned int vid;
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index ca115f33746f..f35c7f30f9a6 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -1076,8 +1076,14 @@ static int carl9170_usb_probe(struct usb_interface *intf,
1076 1076
1077 carl9170_set_state(ar, CARL9170_STOPPED); 1077 carl9170_set_state(ar, CARL9170_STOPPED);
1078 1078
1079 return request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME, 1079 err = request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
1080 &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2); 1080 &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
1081 if (err) {
1082 usb_put_dev(udev);
1083 usb_put_dev(udev);
1084 carl9170_free(ar);
1085 }
1086 return err;
1081} 1087}
1082 1088
1083static void carl9170_usb_disconnect(struct usb_interface *intf) 1089static void carl9170_usb_disconnect(struct usb_interface *intf)
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index a1a69c5db409..650be79c7ac9 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -73,9 +73,52 @@ static const struct radar_types etsi_radar_types_v15 = {
73 .radar_types = etsi_radar_ref_types_v15, 73 .radar_types = etsi_radar_ref_types_v15,
74}; 74};
75 75
76/* for now, we support ETSI radar types, FCC and JP are TODO */ 76#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
77{ \
78 ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
79 PMIN - PRI_TOLERANCE, \
80 PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
81 PPB_THRESH(PPB), PRI_TOLERANCE, \
82}
83
84static const struct radar_detector_specs fcc_radar_ref_types[] = {
85 FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
86 FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
87 FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
88 FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
89 FCC_PATTERN(4, 50, 100, 1000, 2000, 20, 1),
90 FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
91};
92
93static const struct radar_types fcc_radar_types = {
94 .region = NL80211_DFS_FCC,
95 .num_radar_types = ARRAY_SIZE(fcc_radar_ref_types),
96 .radar_types = fcc_radar_ref_types,
97};
98
99#define JP_PATTERN FCC_PATTERN
100static const struct radar_detector_specs jp_radar_ref_types[] = {
101 JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
102 JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18),
103 JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18),
104 JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18),
105 JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
106 JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
107 JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
108 JP_PATTERN(7, 50, 100, 1000, 2000, 20, 1),
109 JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
110};
111
112static const struct radar_types jp_radar_types = {
113 .region = NL80211_DFS_JP,
114 .num_radar_types = ARRAY_SIZE(jp_radar_ref_types),
115 .radar_types = jp_radar_ref_types,
116};
117
77static const struct radar_types *dfs_domains[] = { 118static const struct radar_types *dfs_domains[] = {
78 &etsi_radar_types_v15, 119 &etsi_radar_types_v15,
120 &fcc_radar_types,
121 &jp_radar_types,
79}; 122};
80 123
81/** 124/**
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 7bf0ef8a1f56..63986931829e 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -2068,7 +2068,7 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
2068 if (!msg_ind) 2068 if (!msg_ind)
2069 goto nomem; 2069 goto nomem;
2070 msg_ind->msg_len = len; 2070 msg_ind->msg_len = len;
2071 msg_ind->msg = kmalloc(len, GFP_KERNEL); 2071 msg_ind->msg = kmemdup(buf, len, GFP_KERNEL);
2072 if (!msg_ind->msg) { 2072 if (!msg_ind->msg) {
2073 kfree(msg_ind); 2073 kfree(msg_ind);
2074nomem: 2074nomem:
@@ -2080,7 +2080,6 @@ nomem:
2080 msg_header->msg_type); 2080 msg_header->msg_type);
2081 break; 2081 break;
2082 } 2082 }
2083 memcpy(msg_ind->msg, buf, len);
2084 mutex_lock(&wcn->hal_ind_mutex); 2083 mutex_lock(&wcn->hal_ind_mutex);
2085 list_add_tail(&msg_ind->list, &wcn->hal_ind_queue); 2084 list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
2086 queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work); 2085 queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 4806a49cb61b..820d4ebd9322 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -172,7 +172,7 @@ static int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
172 172
173static int wil_cfg80211_get_station(struct wiphy *wiphy, 173static int wil_cfg80211_get_station(struct wiphy *wiphy,
174 struct net_device *ndev, 174 struct net_device *ndev,
175 u8 *mac, struct station_info *sinfo) 175 const u8 *mac, struct station_info *sinfo)
176{ 176{
177 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 177 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
178 int rc; 178 int rc;
@@ -288,6 +288,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
288 } 288 }
289 289
290 wil->scan_request = request; 290 wil->scan_request = request;
291 mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO);
291 292
292 memset(&cmd, 0, sizeof(cmd)); 293 memset(&cmd, 0, sizeof(cmd));
293 cmd.cmd.num_channels = 0; 294 cmd.cmd.num_channels = 0;
@@ -671,7 +672,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
671} 672}
672 673
673static int wil_cfg80211_del_station(struct wiphy *wiphy, 674static int wil_cfg80211_del_station(struct wiphy *wiphy,
674 struct net_device *dev, u8 *mac) 675 struct net_device *dev, const u8 *mac)
675{ 676{
676 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 677 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
677 678
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index ecdabe4adec3..8d4bc4bfb664 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -35,7 +35,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
35 void __iomem *x = wmi_addr(wil, vring->hwtail); 35 void __iomem *x = wmi_addr(wil, vring->hwtail);
36 36
37 seq_printf(s, "VRING %s = {\n", name); 37 seq_printf(s, "VRING %s = {\n", name);
38 seq_printf(s, " pa = 0x%016llx\n", (unsigned long long)vring->pa); 38 seq_printf(s, " pa = %pad\n", &vring->pa);
39 seq_printf(s, " va = 0x%p\n", vring->va); 39 seq_printf(s, " va = 0x%p\n", vring->va);
40 seq_printf(s, " size = %d\n", vring->size); 40 seq_printf(s, " size = %d\n", vring->size);
41 seq_printf(s, " swtail = %d\n", vring->swtail); 41 seq_printf(s, " swtail = %d\n", vring->swtail);
@@ -473,7 +473,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
473 u[0], u[1], u[2], u[3]); 473 u[0], u[1], u[2], u[3]);
474 seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n", 474 seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
475 u[4], u[5], u[6], u[7]); 475 u[4], u[5], u[6], u[7]);
476 seq_printf(s, " SKB = %p\n", skb); 476 seq_printf(s, " SKB = 0x%p\n", skb);
477 477
478 if (skb) { 478 if (skb) {
479 skb_get(skb); 479 skb_get(skb);
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 5824cd41e4ba..73593aa3cd98 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -338,7 +338,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
338 } 338 }
339 339
340 if (isr) 340 if (isr)
341 wil_err(wil, "un-handled MISC ISR bits 0x%08x\n", isr); 341 wil_dbg_irq(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
342 342
343 wil->isr_misc = 0; 343 wil->isr_misc = 0;
344 344
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 95f4efe9ef37..11e6d9d22eae 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -81,7 +81,7 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
81 memset(&sta->stats, 0, sizeof(sta->stats)); 81 memset(&sta->stats, 0, sizeof(sta->stats));
82} 82}
83 83
84static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid) 84static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
85{ 85{
86 int cid = -ENOENT; 86 int cid = -ENOENT;
87 struct net_device *ndev = wil_to_ndev(wil); 87 struct net_device *ndev = wil_to_ndev(wil);
@@ -150,6 +150,15 @@ static void wil_connect_timer_fn(ulong x)
150 schedule_work(&wil->disconnect_worker); 150 schedule_work(&wil->disconnect_worker);
151} 151}
152 152
153static void wil_scan_timer_fn(ulong x)
154{
155 struct wil6210_priv *wil = (void *)x;
156
157 clear_bit(wil_status_fwready, &wil->status);
158 wil_err(wil, "Scan timeout detected, start fw error recovery\n");
159 schedule_work(&wil->fw_error_worker);
160}
161
153static void wil_fw_error_worker(struct work_struct *work) 162static void wil_fw_error_worker(struct work_struct *work)
154{ 163{
155 struct wil6210_priv *wil = container_of(work, 164 struct wil6210_priv *wil = container_of(work,
@@ -161,12 +170,30 @@ static void wil_fw_error_worker(struct work_struct *work)
161 if (no_fw_recovery) 170 if (no_fw_recovery)
162 return; 171 return;
163 172
173 /* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO
174 * passed since last recovery attempt
175 */
176 if (time_is_after_jiffies(wil->last_fw_recovery +
177 WIL6210_FW_RECOVERY_TO))
178 wil->recovery_count++;
179 else
180 wil->recovery_count = 1; /* fw was alive for a long time */
181
182 if (wil->recovery_count > WIL6210_FW_RECOVERY_RETRIES) {
183 wil_err(wil, "too many recovery attempts (%d), giving up\n",
184 wil->recovery_count);
185 return;
186 }
187
188 wil->last_fw_recovery = jiffies;
189
164 mutex_lock(&wil->mutex); 190 mutex_lock(&wil->mutex);
165 switch (wdev->iftype) { 191 switch (wdev->iftype) {
166 case NL80211_IFTYPE_STATION: 192 case NL80211_IFTYPE_STATION:
167 case NL80211_IFTYPE_P2P_CLIENT: 193 case NL80211_IFTYPE_P2P_CLIENT:
168 case NL80211_IFTYPE_MONITOR: 194 case NL80211_IFTYPE_MONITOR:
169 wil_info(wil, "fw error recovery started...\n"); 195 wil_info(wil, "fw error recovery started (try %d)...\n",
196 wil->recovery_count);
170 wil_reset(wil); 197 wil_reset(wil);
171 198
172 /* need to re-allocate Rx ring after reset */ 199 /* need to re-allocate Rx ring after reset */
@@ -230,6 +257,7 @@ int wil_priv_init(struct wil6210_priv *wil)
230 257
231 wil->pending_connect_cid = -1; 258 wil->pending_connect_cid = -1;
232 setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil); 259 setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
260 setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
233 261
234 INIT_WORK(&wil->connect_worker, wil_connect_worker); 262 INIT_WORK(&wil->connect_worker, wil_connect_worker);
235 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker); 263 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
@@ -249,10 +277,12 @@ int wil_priv_init(struct wil6210_priv *wil)
249 return -EAGAIN; 277 return -EAGAIN;
250 } 278 }
251 279
280 wil->last_fw_recovery = jiffies;
281
252 return 0; 282 return 0;
253} 283}
254 284
255void wil6210_disconnect(struct wil6210_priv *wil, void *bssid) 285void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
256{ 286{
257 del_timer_sync(&wil->connect_timer); 287 del_timer_sync(&wil->connect_timer);
258 _wil6210_disconnect(wil, bssid); 288 _wil6210_disconnect(wil, bssid);
@@ -260,6 +290,7 @@ void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
260 290
261void wil_priv_deinit(struct wil6210_priv *wil) 291void wil_priv_deinit(struct wil6210_priv *wil)
262{ 292{
293 del_timer_sync(&wil->scan_timer);
263 cancel_work_sync(&wil->disconnect_worker); 294 cancel_work_sync(&wil->disconnect_worker);
264 cancel_work_sync(&wil->fw_error_worker); 295 cancel_work_sync(&wil->fw_error_worker);
265 mutex_lock(&wil->mutex); 296 mutex_lock(&wil->mutex);
@@ -363,8 +394,8 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
363 wil_err(wil, "Firmware not ready\n"); 394 wil_err(wil, "Firmware not ready\n");
364 return -ETIME; 395 return -ETIME;
365 } else { 396 } else {
366 wil_dbg_misc(wil, "FW ready after %d ms\n", 397 wil_info(wil, "FW ready after %d ms. HW version 0x%08x\n",
367 jiffies_to_msecs(to-left)); 398 jiffies_to_msecs(to-left), wil->hw_version);
368 } 399 }
369 return 0; 400 return 0;
370} 401}
@@ -391,6 +422,7 @@ int wil_reset(struct wil6210_priv *wil)
391 if (wil->scan_request) { 422 if (wil->scan_request) {
392 wil_dbg_misc(wil, "Abort scan_request 0x%p\n", 423 wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
393 wil->scan_request); 424 wil->scan_request);
425 del_timer_sync(&wil->scan_timer);
394 cfg80211_scan_done(wil->scan_request, true); 426 cfg80211_scan_done(wil->scan_request, true);
395 wil->scan_request = NULL; 427 wil->scan_request = NULL;
396 } 428 }
@@ -520,6 +552,7 @@ static int __wil_down(struct wil6210_priv *wil)
520 napi_disable(&wil->napi_tx); 552 napi_disable(&wil->napi_tx);
521 553
522 if (wil->scan_request) { 554 if (wil->scan_request) {
555 del_timer_sync(&wil->scan_timer);
523 cfg80211_scan_done(wil->scan_request, true); 556 cfg80211_scan_done(wil->scan_request, true);
524 wil->scan_request = NULL; 557 wil->scan_request = NULL;
525 } 558 }
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index fdcaeb820e75..106b6dcb773a 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -32,12 +32,26 @@ static int wil_stop(struct net_device *ndev)
32 return wil_down(wil); 32 return wil_down(wil);
33} 33}
34 34
35static int wil_change_mtu(struct net_device *ndev, int new_mtu)
36{
37 struct wil6210_priv *wil = ndev_to_wil(ndev);
38
39 if (new_mtu < 68 || new_mtu > IEEE80211_MAX_DATA_LEN_DMG)
40 return -EINVAL;
41
42 wil_dbg_misc(wil, "change MTU %d -> %d\n", ndev->mtu, new_mtu);
43 ndev->mtu = new_mtu;
44
45 return 0;
46}
47
35static const struct net_device_ops wil_netdev_ops = { 48static const struct net_device_ops wil_netdev_ops = {
36 .ndo_open = wil_open, 49 .ndo_open = wil_open,
37 .ndo_stop = wil_stop, 50 .ndo_stop = wil_stop,
38 .ndo_start_xmit = wil_start_xmit, 51 .ndo_start_xmit = wil_start_xmit,
39 .ndo_set_mac_address = eth_mac_addr, 52 .ndo_set_mac_address = eth_mac_addr,
40 .ndo_validate_addr = eth_validate_addr, 53 .ndo_validate_addr = eth_validate_addr,
54 .ndo_change_mtu = wil_change_mtu,
41}; 55};
42 56
43static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget) 57static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index f1e1bb338d68..1e2e07b9d13d 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -74,8 +74,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
74 if (rc) 74 if (rc)
75 goto release_irq; 75 goto release_irq;
76 76
77 wil_info(wil, "HW version: 0x%08x\n", wil->hw_version);
78
79 return 0; 77 return 0;
80 78
81 release_irq: 79 release_irq:
@@ -140,7 +138,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
140 goto err_release_reg; 138 goto err_release_reg;
141 } 139 }
142 /* rollback to err_iounmap */ 140 /* rollback to err_iounmap */
143 dev_info(&pdev->dev, "CSR at %pR -> %p\n", &pdev->resource[0], csr); 141 dev_info(&pdev->dev, "CSR at %pR -> 0x%p\n", &pdev->resource[0], csr);
144 142
145 wil = wil_if_alloc(dev, csr); 143 wil = wil_if_alloc(dev, csr);
146 if (IS_ERR(wil)) { 144 if (IS_ERR(wil)) {
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index d04629fe053f..747ae1275877 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -49,10 +49,17 @@ static void wil_release_reorder_frames(struct wil6210_priv *wil,
49{ 49{
50 int index; 50 int index;
51 51
52 while (seq_less(r->head_seq_num, hseq)) { 52 /* note: this function is never called with
53 * hseq preceding r->head_seq_num, i.e it is always true
54 * !seq_less(hseq, r->head_seq_num)
55 * and thus on loop exit it should be
56 * r->head_seq_num == hseq
57 */
58 while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) {
53 index = reorder_index(r, r->head_seq_num); 59 index = reorder_index(r, r->head_seq_num);
54 wil_release_reorder_frame(wil, r, index); 60 wil_release_reorder_frame(wil, r, index);
55 } 61 }
62 r->head_seq_num = hseq;
56} 63}
57 64
58static void wil_reorder_release(struct wil6210_priv *wil, 65static void wil_reorder_release(struct wil6210_priv *wil,
@@ -91,6 +98,22 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
91 98
92 spin_lock(&r->reorder_lock); 99 spin_lock(&r->reorder_lock);
93 100
101 /** Due to the race between WMI events, where BACK establishment
102 * reported, and data Rx, few packets may be pass up before reorder
103 * buffer get allocated. Catch up by pretending SSN is what we
104 * see in the 1-st Rx packet
105 */
106 if (r->first_time) {
107 r->first_time = false;
108 if (seq != r->head_seq_num) {
109 wil_err(wil, "Error: 1-st frame with wrong sequence"
110 " %d, should be %d. Fixing...\n", seq,
111 r->head_seq_num);
112 r->head_seq_num = seq;
113 r->ssn = seq;
114 }
115 }
116
94 /* frame with out of date sequence number */ 117 /* frame with out of date sequence number */
95 if (seq_less(seq, r->head_seq_num)) { 118 if (seq_less(seq, r->head_seq_num)) {
96 dev_kfree_skb(skb); 119 dev_kfree_skb(skb);
@@ -162,6 +185,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
162 r->head_seq_num = ssn; 185 r->head_seq_num = ssn;
163 r->buf_size = size; 186 r->buf_size = size;
164 r->stored_mpdu_num = 0; 187 r->stored_mpdu_num = 0;
188 r->first_time = true;
165 return r; 189 return r;
166} 190}
167 191
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index c8c547457eb4..0784ef3d4ce2 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -64,6 +64,22 @@ static inline int wil_vring_avail_tx(struct vring *vring)
64 return vring->size - used - 1; 64 return vring->size - used - 1;
65} 65}
66 66
67/**
68 * wil_vring_wmark_low - low watermark for available descriptor space
69 */
70static inline int wil_vring_wmark_low(struct vring *vring)
71{
72 return vring->size/8;
73}
74
75/**
76 * wil_vring_wmark_high - high watermark for available descriptor space
77 */
78static inline int wil_vring_wmark_high(struct vring *vring)
79{
80 return vring->size/4;
81}
82
67static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) 83static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
68{ 84{
69 struct device *dev = wil_to_dev(wil); 85 struct device *dev = wil_to_dev(wil);
@@ -98,8 +114,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
98 _d->dma.status = TX_DMA_STATUS_DU; 114 _d->dma.status = TX_DMA_STATUS_DU;
99 } 115 }
100 116
101 wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size, 117 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
102 vring->va, (unsigned long long)vring->pa, vring->ctx); 118 vring->va, &vring->pa, vring->ctx);
103 119
104 return 0; 120 return 0;
105} 121}
@@ -880,8 +896,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
880 pa = dma_map_single(dev, skb->data, 896 pa = dma_map_single(dev, skb->data,
881 skb_headlen(skb), DMA_TO_DEVICE); 897 skb_headlen(skb), DMA_TO_DEVICE);
882 898
883 wil_dbg_txrx(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb), 899 wil_dbg_txrx(wil, "Tx skb %d bytes 0x%p -> %pad\n", skb_headlen(skb),
884 skb->data, (unsigned long long)pa); 900 skb->data, &pa);
885 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1, 901 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
886 skb->data, skb_headlen(skb), false); 902 skb->data, skb_headlen(skb), false);
887 903
@@ -1007,7 +1023,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1007 rc = wil_tx_vring(wil, vring, skb); 1023 rc = wil_tx_vring(wil, vring, skb);
1008 1024
1009 /* do we still have enough room in the vring? */ 1025 /* do we still have enough room in the vring? */
1010 if (wil_vring_avail_tx(vring) < vring->size/8) 1026 if (wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))
1011 netif_tx_stop_all_queues(wil_to_ndev(wil)); 1027 netif_tx_stop_all_queues(wil_to_ndev(wil));
1012 1028
1013 switch (rc) { 1029 switch (rc) {
@@ -1116,7 +1132,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
1116 done++; 1132 done++;
1117 } 1133 }
1118 } 1134 }
1119 if (wil_vring_avail_tx(vring) > vring->size/4) 1135 if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring))
1120 netif_tx_wake_all_queues(wil_to_ndev(wil)); 1136 netif_tx_wake_all_queues(wil_to_ndev(wil));
1121 1137
1122 return done; 1138 return done;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 2a2dec75f026..e25edc52398f 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -35,11 +35,14 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
35#define WIL6210_MEM_SIZE (2*1024*1024UL) 35#define WIL6210_MEM_SIZE (2*1024*1024UL)
36 36
37#define WIL6210_RX_RING_SIZE (128) 37#define WIL6210_RX_RING_SIZE (128)
38#define WIL6210_TX_RING_SIZE (128) 38#define WIL6210_TX_RING_SIZE (512)
39#define WIL6210_MAX_TX_RINGS (24) /* HW limit */ 39#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
40#define WIL6210_MAX_CID (8) /* HW limit */ 40#define WIL6210_MAX_CID (8) /* HW limit */
41#define WIL6210_NAPI_BUDGET (16) /* arbitrary */ 41#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
42#define WIL6210_ITR_TRSH (10000) /* arbitrary - about 15 IRQs/msec */ 42#define WIL6210_ITR_TRSH (10000) /* arbitrary - about 15 IRQs/msec */
43#define WIL6210_FW_RECOVERY_RETRIES (5) /* try to recover this many times */
44#define WIL6210_FW_RECOVERY_TO msecs_to_jiffies(5000)
45#define WIL6210_SCAN_TO msecs_to_jiffies(10000)
43 46
44/* Hardware definitions begin */ 47/* Hardware definitions begin */
45 48
@@ -301,6 +304,7 @@ struct wil_tid_ampdu_rx {
301 u16 buf_size; 304 u16 buf_size;
302 u16 timeout; 305 u16 timeout;
303 u8 dialog_token; 306 u8 dialog_token;
307 bool first_time; /* is it 1-st time this buffer used? */
304}; 308};
305 309
306struct wil6210_stats { 310struct wil6210_stats {
@@ -360,6 +364,8 @@ struct wil6210_priv {
360 u32 fw_version; 364 u32 fw_version;
361 u32 hw_version; 365 u32 hw_version;
362 u8 n_mids; /* number of additional MIDs as reported by FW */ 366 u8 n_mids; /* number of additional MIDs as reported by FW */
367 int recovery_count; /* num of FW recovery attempts in a short time */
368 unsigned long last_fw_recovery; /* jiffies of last fw recovery */
363 /* profile */ 369 /* profile */
364 u32 monitor_flags; 370 u32 monitor_flags;
365 u32 secure_pcp; /* create secure PCP? */ 371 u32 secure_pcp; /* create secure PCP? */
@@ -381,6 +387,7 @@ struct wil6210_priv {
381 struct work_struct disconnect_worker; 387 struct work_struct disconnect_worker;
382 struct work_struct fw_error_worker; /* for FW error recovery */ 388 struct work_struct fw_error_worker; /* for FW error recovery */
383 struct timer_list connect_timer; 389 struct timer_list connect_timer;
390 struct timer_list scan_timer; /* detect scan timeout */
384 int pending_connect_cid; 391 int pending_connect_cid;
385 struct list_head pending_wmi_ev; 392 struct list_head pending_wmi_ev;
386 /* 393 /*
@@ -507,7 +514,7 @@ void wil_wdev_free(struct wil6210_priv *wil);
507int wmi_set_mac_address(struct wil6210_priv *wil, void *addr); 514int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
508int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan); 515int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan);
509int wmi_pcp_stop(struct wil6210_priv *wil); 516int wmi_pcp_stop(struct wil6210_priv *wil);
510void wil6210_disconnect(struct wil6210_priv *wil, void *bssid); 517void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid);
511 518
512int wil_rx_init(struct wil6210_priv *wil); 519int wil_rx_init(struct wil6210_priv *wil);
513void wil_rx_fini(struct wil6210_priv *wil); 520void wil_rx_fini(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 2ba56eef0c45..6cc0e182cc70 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -192,7 +192,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
192 might_sleep(); 192 might_sleep();
193 193
194 if (!test_bit(wil_status_fwready, &wil->status)) { 194 if (!test_bit(wil_status_fwready, &wil->status)) {
195 wil_err(wil, "FW not ready\n"); 195 wil_err(wil, "WMI: cannot send command while FW not ready\n");
196 return -EAGAIN; 196 return -EAGAIN;
197 } 197 }
198 198
@@ -276,8 +276,8 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
276 wil->fw_version = le32_to_cpu(evt->sw_version); 276 wil->fw_version = le32_to_cpu(evt->sw_version);
277 wil->n_mids = evt->numof_additional_mids; 277 wil->n_mids = evt->numof_additional_mids;
278 278
279 wil_dbg_wmi(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version, 279 wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
280 evt->mac, wil->n_mids); 280 evt->mac, wil->n_mids);
281 281
282 if (!is_valid_ether_addr(ndev->dev_addr)) { 282 if (!is_valid_ether_addr(ndev->dev_addr)) {
283 memcpy(ndev->dev_addr, evt->mac, ETH_ALEN); 283 memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
@@ -290,7 +290,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
290static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d, 290static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
291 int len) 291 int len)
292{ 292{
293 wil_dbg_wmi(wil, "WMI: FW ready\n"); 293 wil_dbg_wmi(wil, "WMI: got FW ready event\n");
294 294
295 set_bit(wil_status_fwready, &wil->status); 295 set_bit(wil_status_fwready, &wil->status);
296 /* reuse wmi_ready for the firmware ready indication */ 296 /* reuse wmi_ready for the firmware ready indication */
@@ -348,9 +348,10 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
348{ 348{
349 if (wil->scan_request) { 349 if (wil->scan_request) {
350 struct wmi_scan_complete_event *data = d; 350 struct wmi_scan_complete_event *data = d;
351 bool aborted = (data->status != 0); 351 bool aborted = (data->status != WMI_SCAN_SUCCESS);
352 352
353 wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status); 353 wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
354 del_timer_sync(&wil->scan_timer);
354 cfg80211_scan_done(wil->scan_request, aborted); 355 cfg80211_scan_done(wil->scan_request, aborted);
355 wil->scan_request = NULL; 356 wil->scan_request = NULL;
356 } else { 357 } else {
@@ -658,21 +659,27 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
658 u8 *cmd; 659 u8 *cmd;
659 void __iomem *src; 660 void __iomem *src;
660 ulong flags; 661 ulong flags;
662 unsigned n;
661 663
662 if (!test_bit(wil_status_reset_done, &wil->status)) { 664 if (!test_bit(wil_status_reset_done, &wil->status)) {
663 wil_err(wil, "Reset not completed\n"); 665 wil_err(wil, "Reset not completed\n");
664 return; 666 return;
665 } 667 }
666 668
667 for (;;) { 669 for (n = 0;; n++) {
668 u16 len; 670 u16 len;
669 671
670 r->head = ioread32(wil->csr + HOST_MBOX + 672 r->head = ioread32(wil->csr + HOST_MBOX +
671 offsetof(struct wil6210_mbox_ctl, rx.head)); 673 offsetof(struct wil6210_mbox_ctl, rx.head));
672 if (r->tail == r->head) 674 if (r->tail == r->head) {
675 if (n == 0)
676 wil_dbg_wmi(wil, "No events?\n");
673 return; 677 return;
678 }
674 679
675 /* read cmd from tail */ 680 wil_dbg_wmi(wil, "Mbox head %08x tail %08x\n",
681 r->head, r->tail);
682 /* read cmd descriptor from tail */
676 wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail), 683 wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
677 sizeof(struct wil6210_mbox_ring_desc)); 684 sizeof(struct wil6210_mbox_ring_desc));
678 if (d_tail.sync == 0) { 685 if (d_tail.sync == 0) {
@@ -680,13 +687,18 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
680 return; 687 return;
681 } 688 }
682 689
690 /* read cmd header from descriptor */
683 if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) { 691 if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
684 wil_err(wil, "Mbox evt at 0x%08x?\n", 692 wil_err(wil, "Mbox evt at 0x%08x?\n",
685 le32_to_cpu(d_tail.addr)); 693 le32_to_cpu(d_tail.addr));
686 return; 694 return;
687 } 695 }
688
689 len = le16_to_cpu(hdr.len); 696 len = le16_to_cpu(hdr.len);
697 wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
698 le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
699 hdr.flags);
700
701 /* read cmd buffer from descriptor */
690 src = wmi_buffer(wil, d_tail.addr) + 702 src = wmi_buffer(wil, d_tail.addr) +
691 sizeof(struct wil6210_mbox_hdr); 703 sizeof(struct wil6210_mbox_hdr);
692 evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event, 704 evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
@@ -702,9 +714,6 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
702 iowrite32(0, wil->csr + HOSTADDR(r->tail) + 714 iowrite32(0, wil->csr + HOSTADDR(r->tail) +
703 offsetof(struct wil6210_mbox_ring_desc, sync)); 715 offsetof(struct wil6210_mbox_ring_desc, sync));
704 /* indicate */ 716 /* indicate */
705 wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
706 le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
707 hdr.flags);
708 if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && 717 if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
709 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { 718 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
710 struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi; 719 struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
@@ -734,6 +743,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
734 wil_dbg_wmi(wil, "queue_work -> %d\n", q); 743 wil_dbg_wmi(wil, "queue_work -> %d\n", q);
735 } 744 }
736 } 745 }
746 if (n > 1)
747 wil_dbg_wmi(wil, "%s -> %d events processed\n", __func__, n);
737} 748}
738 749
739int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, 750int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
@@ -802,6 +813,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
802 .network_type = wmi_nettype, 813 .network_type = wmi_nettype,
803 .disable_sec_offload = 1, 814 .disable_sec_offload = 1,
804 .channel = chan - 1, 815 .channel = chan - 1,
816 .pcp_max_assoc_sta = WIL6210_MAX_CID,
805 }; 817 };
806 struct { 818 struct {
807 struct wil6210_mbox_hdr_wmi wmi; 819 struct wil6210_mbox_hdr_wmi wmi;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 50b8528394f4..17334c852866 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -28,7 +28,7 @@
28#define __WILOCITY_WMI_H__ 28#define __WILOCITY_WMI_H__
29 29
30/* General */ 30/* General */
31 31#define WILOCITY_MAX_ASSOC_STA (8)
32#define WMI_MAC_LEN (6) 32#define WMI_MAC_LEN (6)
33#define WMI_PROX_RANGE_NUM (3) 33#define WMI_PROX_RANGE_NUM (3)
34 34
@@ -219,15 +219,6 @@ struct wmi_disconnect_sta_cmd {
219 __le16 disconnect_reason; 219 __le16 disconnect_reason;
220} __packed; 220} __packed;
221 221
222/*
223 * WMI_RECONNECT_CMDID
224 */
225struct wmi_reconnect_cmd {
226 u8 channel; /* hint */
227 u8 reserved;
228 u8 bssid[WMI_MAC_LEN]; /* mandatory if set */
229} __packed;
230
231 222
232/* 223/*
233 * WMI_SET_PMK_CMDID 224 * WMI_SET_PMK_CMDID
@@ -296,11 +287,13 @@ enum wmi_scan_type {
296 WMI_LONG_SCAN = 0, 287 WMI_LONG_SCAN = 0,
297 WMI_SHORT_SCAN = 1, 288 WMI_SHORT_SCAN = 1,
298 WMI_PBC_SCAN = 2, 289 WMI_PBC_SCAN = 2,
290 WMI_ACTIVE_SCAN = 3,
291 WMI_DIRECT_SCAN = 4,
299}; 292};
300 293
301struct wmi_start_scan_cmd { 294struct wmi_start_scan_cmd {
302 u8 reserved[8]; 295 u8 direct_scan_mac_addr[6];
303 296 u8 reserved[2];
304 __le32 home_dwell_time; /* Max duration in the home channel(ms) */ 297 __le32 home_dwell_time; /* Max duration in the home channel(ms) */
305 __le32 force_scan_interval; /* Time interval between scans (ms)*/ 298 __le32 force_scan_interval; /* Time interval between scans (ms)*/
306 u8 scan_type; /* wmi_scan_type */ 299 u8 scan_type; /* wmi_scan_type */
@@ -332,6 +325,7 @@ struct wmi_probed_ssid_cmd {
332 u8 ssid[WMI_MAX_SSID_LEN]; 325 u8 ssid[WMI_MAX_SSID_LEN];
333} __packed; 326} __packed;
334 327
328
335/* 329/*
336 * WMI_SET_APPIE_CMDID 330 * WMI_SET_APPIE_CMDID
337 * Add Application specified IE to a management frame 331 * Add Application specified IE to a management frame
@@ -427,7 +421,7 @@ struct wmi_bcon_ctrl_cmd {
427 __le16 frag_num; 421 __le16 frag_num;
428 __le64 ss_mask; 422 __le64 ss_mask;
429 u8 network_type; 423 u8 network_type;
430 u8 reserved; 424 u8 pcp_max_assoc_sta;
431 u8 disable_sec_offload; 425 u8 disable_sec_offload;
432 u8 disable_sec; 426 u8 disable_sec;
433} __packed; 427} __packed;
@@ -450,7 +444,7 @@ enum wmi_port_role {
450struct wmi_port_allocate_cmd { 444struct wmi_port_allocate_cmd {
451 u8 mac[WMI_MAC_LEN]; 445 u8 mac[WMI_MAC_LEN];
452 u8 port_role; 446 u8 port_role;
453 u8 midid; 447 u8 mid;
454} __packed; 448} __packed;
455 449
456/* 450/*
@@ -467,6 +461,7 @@ struct wmi_delete_port_cmd {
467enum wmi_discovery_mode { 461enum wmi_discovery_mode {
468 WMI_DISCOVERY_MODE_NON_OFFLOAD = 0, 462 WMI_DISCOVERY_MODE_NON_OFFLOAD = 0,
469 WMI_DISCOVERY_MODE_OFFLOAD = 1, 463 WMI_DISCOVERY_MODE_OFFLOAD = 1,
464 WMI_DISCOVERY_MODE_PEER2PEER = 2,
470}; 465};
471 466
472struct wmi_p2p_cfg_cmd { 467struct wmi_p2p_cfg_cmd {
@@ -493,7 +488,8 @@ struct wmi_power_mgmt_cfg_cmd {
493 */ 488 */
494struct wmi_pcp_start_cmd { 489struct wmi_pcp_start_cmd {
495 __le16 bcon_interval; 490 __le16 bcon_interval;
496 u8 reserved0[10]; 491 u8 pcp_max_assoc_sta;
492 u8 reserved0[9];
497 u8 network_type; 493 u8 network_type;
498 u8 channel; 494 u8 channel;
499 u8 disable_sec_offload; 495 u8 disable_sec_offload;
@@ -857,6 +853,7 @@ enum wmi_event_id {
857 WMI_RF_MGMT_STATUS_EVENTID = 0x1853, 853 WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
858 WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838, 854 WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
859 WMI_RX_MGMT_PACKET_EVENTID = 0x1840, 855 WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
856 WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
860 857
861 /* Performance monitoring events */ 858 /* Performance monitoring events */
862 WMI_DATA_PORT_OPEN_EVENTID = 0x1860, 859 WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
@@ -1040,16 +1037,23 @@ enum wmi_disconnect_reason {
1040struct wmi_disconnect_event { 1037struct wmi_disconnect_event {
1041 __le16 protocol_reason_status; /* reason code, see 802.11 spec. */ 1038 __le16 protocol_reason_status; /* reason code, see 802.11 spec. */
1042 u8 bssid[WMI_MAC_LEN]; /* set if known */ 1039 u8 bssid[WMI_MAC_LEN]; /* set if known */
1043 u8 disconnect_reason; /* see wmi_disconnect_reason_e */ 1040 u8 disconnect_reason; /* see wmi_disconnect_reason */
1044 u8 assoc_resp_len; 1041 u8 assoc_resp_len; /* not in use */
1045 u8 assoc_info[0]; 1042 u8 assoc_info[0]; /* not in use */
1046} __packed; 1043} __packed;
1047 1044
1048/* 1045/*
1049 * WMI_SCAN_COMPLETE_EVENTID 1046 * WMI_SCAN_COMPLETE_EVENTID
1050 */ 1047 */
1048enum scan_status {
1049 WMI_SCAN_SUCCESS = 0,
1050 WMI_SCAN_FAILED = 1,
1051 WMI_SCAN_ABORTED = 2,
1052 WMI_SCAN_REJECTED = 3,
1053};
1054
1051struct wmi_scan_complete_event { 1055struct wmi_scan_complete_event {
1052 __le32 status; 1056 __le32 status; /* scan_status */
1053} __packed; 1057} __packed;
1054 1058
1055/* 1059/*
@@ -1256,6 +1260,14 @@ struct wmi_rx_mgmt_info {
1256 u8 channel; /* From Radio MNGR */ 1260 u8 channel; /* From Radio MNGR */
1257} __packed; 1261} __packed;
1258 1262
1263
1264/*
1265 * WMI_TX_MGMT_PACKET_EVENTID
1266 */
1267struct wmi_tx_mgmt_packet_event {
1268 u8 payload[0];
1269} __packed;
1270
1259struct wmi_rx_mgmt_packet_event { 1271struct wmi_rx_mgmt_packet_event {
1260 struct wmi_rx_mgmt_info info; 1272 struct wmi_rx_mgmt_info info;
1261 u8 payload[0]; 1273 u8 payload[0];
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 088d544ec63f..e3f67b8d3f80 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -1,7 +1,8 @@
1config B43 1config B43
2 tristate "Broadcom 43xx wireless support (mac80211 stack)" 2 tristate "Broadcom 43xx wireless support (mac80211 stack)"
3 depends on SSB_POSSIBLE && MAC80211 && HAS_DMA 3 depends on (BCMA_POSSIBLE || SSB_POSSIBLE) && MAC80211 && HAS_DMA
4 select SSB 4 select BCMA if B43_BCMA
5 select SSB if B43_SSB
5 select FW_LOADER 6 select FW_LOADER
6 ---help--- 7 ---help---
7 b43 is a driver for the Broadcom 43xx series wireless devices. 8 b43 is a driver for the Broadcom 43xx series wireless devices.
@@ -27,14 +28,33 @@ config B43
27 If unsure, say M. 28 If unsure, say M.
28 29
29config B43_BCMA 30config B43_BCMA
30 bool "Support for BCMA bus" 31 bool
31 depends on B43 && (BCMA = y || BCMA = B43)
32 default y
33 32
34config B43_SSB 33config B43_SSB
35 bool 34 bool
36 depends on B43 && (SSB = y || SSB = B43) 35
37 default y 36choice
37 prompt "Supported bus types"
38 depends on B43
39 default B43_BCMA_AND_SSB
40
41config B43_BUSES_BCMA_AND_SSB
42 bool "BCMA and SSB"
43 depends on BCMA_POSSIBLE && SSB_POSSIBLE
44 select B43_BCMA
45 select B43_SSB
46
47config B43_BUSES_BCMA
48 bool "BCMA only"
49 depends on BCMA_POSSIBLE
50 select B43_BCMA
51
52config B43_BUSES_SSB
53 bool "SSB only"
54 depends on SSB_POSSIBLE
55 select B43_SSB
56
57endchoice
38 58
39# Auto-select SSB PCI-HOST support, if possible 59# Auto-select SSB PCI-HOST support, if possible
40config B43_PCI_AUTOSELECT 60config B43_PCI_AUTOSELECT
@@ -53,7 +73,7 @@ config B43_PCICORE_AUTOSELECT
53 73
54config B43_PCMCIA 74config B43_PCMCIA
55 bool "Broadcom 43xx PCMCIA device support" 75 bool "Broadcom 43xx PCMCIA device support"
56 depends on B43 && SSB_PCMCIAHOST_POSSIBLE 76 depends on B43 && B43_SSB && SSB_PCMCIAHOST_POSSIBLE
57 select SSB_PCMCIAHOST 77 select SSB_PCMCIAHOST
58 ---help--- 78 ---help---
59 Broadcom 43xx PCMCIA device support. 79 Broadcom 43xx PCMCIA device support.
@@ -73,7 +93,7 @@ config B43_PCMCIA
73 93
74config B43_SDIO 94config B43_SDIO
75 bool "Broadcom 43xx SDIO device support" 95 bool "Broadcom 43xx SDIO device support"
76 depends on B43 && SSB_SDIOHOST_POSSIBLE 96 depends on B43 && B43_SSB && SSB_SDIOHOST_POSSIBLE
77 select SSB_SDIOHOST 97 select SSB_SDIOHOST
78 ---help--- 98 ---help---
79 Broadcom 43xx device support for Soft-MAC SDIO devices. 99 Broadcom 43xx device support for Soft-MAC SDIO devices.
@@ -98,7 +118,7 @@ config B43_BCMA_PIO
98 118
99config B43_PIO 119config B43_PIO
100 bool 120 bool
101 depends on B43 121 depends on B43 && B43_SSB
102 select SSB_BLOCKIO 122 select SSB_BLOCKIO
103 default y 123 default y
104 124
@@ -116,7 +136,7 @@ config B43_PHY_N
116 136
117config B43_PHY_LP 137config B43_PHY_LP
118 bool "Support for low-power (LP-PHY) devices" 138 bool "Support for low-power (LP-PHY) devices"
119 depends on B43 139 depends on B43 && B43_SSB
120 default y 140 default y
121 ---help--- 141 ---help---
122 Support for the LP-PHY. 142 Support for the LP-PHY.
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 54376fddfaf9..4113b6934764 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -915,10 +915,6 @@ struct b43_wl {
915 char rng_name[30 + 1]; 915 char rng_name[30 + 1];
916#endif /* CONFIG_B43_HWRNG */ 916#endif /* CONFIG_B43_HWRNG */
917 917
918 /* List of all wireless devices on this chip */
919 struct list_head devlist;
920 u8 nr_devs;
921
922 bool radiotap_enabled; 918 bool radiotap_enabled;
923 bool radio_enabled; 919 bool radio_enabled;
924 920
diff --git a/drivers/net/wireless/b43/bus.h b/drivers/net/wireless/b43/bus.h
index 184c95659279..f3205c6988bc 100644
--- a/drivers/net/wireless/b43/bus.h
+++ b/drivers/net/wireless/b43/bus.h
@@ -5,7 +5,9 @@ enum b43_bus_type {
5#ifdef CONFIG_B43_BCMA 5#ifdef CONFIG_B43_BCMA
6 B43_BUS_BCMA, 6 B43_BUS_BCMA,
7#endif 7#endif
8#ifdef CONFIG_B43_SSB
8 B43_BUS_SSB, 9 B43_BUS_SSB,
10#endif
9}; 11};
10 12
11struct b43_bus_dev { 13struct b43_bus_dev {
@@ -52,13 +54,21 @@ struct b43_bus_dev {
52 54
53static inline bool b43_bus_host_is_pcmcia(struct b43_bus_dev *dev) 55static inline bool b43_bus_host_is_pcmcia(struct b43_bus_dev *dev)
54{ 56{
57#ifdef CONFIG_B43_SSB
55 return (dev->bus_type == B43_BUS_SSB && 58 return (dev->bus_type == B43_BUS_SSB &&
56 dev->sdev->bus->bustype == SSB_BUSTYPE_PCMCIA); 59 dev->sdev->bus->bustype == SSB_BUSTYPE_PCMCIA);
60#else
61 return false;
62#endif
57} 63}
58static inline bool b43_bus_host_is_sdio(struct b43_bus_dev *dev) 64static inline bool b43_bus_host_is_sdio(struct b43_bus_dev *dev)
59{ 65{
66#ifdef CONFIG_B43_SSB
60 return (dev->bus_type == B43_BUS_SSB && 67 return (dev->bus_type == B43_BUS_SSB &&
61 dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO); 68 dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO);
69#else
70 return false;
71#endif
62} 72}
63 73
64struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core); 74struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 69fc3d65531a..32538ac5f7e4 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -182,7 +182,7 @@ static struct ieee80211_rate __b43_ratetable[] = {
182#define b43_g_ratetable (__b43_ratetable + 0) 182#define b43_g_ratetable (__b43_ratetable + 0)
183#define b43_g_ratetable_size 12 183#define b43_g_ratetable_size 12
184 184
185#define CHAN4G(_channel, _freq, _flags) { \ 185#define CHAN2G(_channel, _freq, _flags) { \
186 .band = IEEE80211_BAND_2GHZ, \ 186 .band = IEEE80211_BAND_2GHZ, \
187 .center_freq = (_freq), \ 187 .center_freq = (_freq), \
188 .hw_value = (_channel), \ 188 .hw_value = (_channel), \
@@ -191,23 +191,31 @@ static struct ieee80211_rate __b43_ratetable[] = {
191 .max_power = 30, \ 191 .max_power = 30, \
192} 192}
193static struct ieee80211_channel b43_2ghz_chantable[] = { 193static struct ieee80211_channel b43_2ghz_chantable[] = {
194 CHAN4G(1, 2412, 0), 194 CHAN2G(1, 2412, 0),
195 CHAN4G(2, 2417, 0), 195 CHAN2G(2, 2417, 0),
196 CHAN4G(3, 2422, 0), 196 CHAN2G(3, 2422, 0),
197 CHAN4G(4, 2427, 0), 197 CHAN2G(4, 2427, 0),
198 CHAN4G(5, 2432, 0), 198 CHAN2G(5, 2432, 0),
199 CHAN4G(6, 2437, 0), 199 CHAN2G(6, 2437, 0),
200 CHAN4G(7, 2442, 0), 200 CHAN2G(7, 2442, 0),
201 CHAN4G(8, 2447, 0), 201 CHAN2G(8, 2447, 0),
202 CHAN4G(9, 2452, 0), 202 CHAN2G(9, 2452, 0),
203 CHAN4G(10, 2457, 0), 203 CHAN2G(10, 2457, 0),
204 CHAN4G(11, 2462, 0), 204 CHAN2G(11, 2462, 0),
205 CHAN4G(12, 2467, 0), 205 CHAN2G(12, 2467, 0),
206 CHAN4G(13, 2472, 0), 206 CHAN2G(13, 2472, 0),
207 CHAN4G(14, 2484, 0), 207 CHAN2G(14, 2484, 0),
208}; 208};
209#undef CHAN4G 209#undef CHAN2G
210 210
211#define CHAN4G(_channel, _flags) { \
212 .band = IEEE80211_BAND_5GHZ, \
213 .center_freq = 4000 + (5 * (_channel)), \
214 .hw_value = (_channel), \
215 .flags = (_flags), \
216 .max_antenna_gain = 0, \
217 .max_power = 30, \
218}
211#define CHAN5G(_channel, _flags) { \ 219#define CHAN5G(_channel, _flags) { \
212 .band = IEEE80211_BAND_5GHZ, \ 220 .band = IEEE80211_BAND_5GHZ, \
213 .center_freq = 5000 + (5 * (_channel)), \ 221 .center_freq = 5000 + (5 * (_channel)), \
@@ -217,6 +225,18 @@ static struct ieee80211_channel b43_2ghz_chantable[] = {
217 .max_power = 30, \ 225 .max_power = 30, \
218} 226}
219static struct ieee80211_channel b43_5ghz_nphy_chantable[] = { 227static struct ieee80211_channel b43_5ghz_nphy_chantable[] = {
228 CHAN4G(184, 0), CHAN4G(186, 0),
229 CHAN4G(188, 0), CHAN4G(190, 0),
230 CHAN4G(192, 0), CHAN4G(194, 0),
231 CHAN4G(196, 0), CHAN4G(198, 0),
232 CHAN4G(200, 0), CHAN4G(202, 0),
233 CHAN4G(204, 0), CHAN4G(206, 0),
234 CHAN4G(208, 0), CHAN4G(210, 0),
235 CHAN4G(212, 0), CHAN4G(214, 0),
236 CHAN4G(216, 0), CHAN4G(218, 0),
237 CHAN4G(220, 0), CHAN4G(222, 0),
238 CHAN4G(224, 0), CHAN4G(226, 0),
239 CHAN4G(228, 0),
220 CHAN5G(32, 0), CHAN5G(34, 0), 240 CHAN5G(32, 0), CHAN5G(34, 0),
221 CHAN5G(36, 0), CHAN5G(38, 0), 241 CHAN5G(36, 0), CHAN5G(38, 0),
222 CHAN5G(40, 0), CHAN5G(42, 0), 242 CHAN5G(40, 0), CHAN5G(42, 0),
@@ -260,18 +280,7 @@ static struct ieee80211_channel b43_5ghz_nphy_chantable[] = {
260 CHAN5G(170, 0), CHAN5G(172, 0), 280 CHAN5G(170, 0), CHAN5G(172, 0),
261 CHAN5G(174, 0), CHAN5G(176, 0), 281 CHAN5G(174, 0), CHAN5G(176, 0),
262 CHAN5G(178, 0), CHAN5G(180, 0), 282 CHAN5G(178, 0), CHAN5G(180, 0),
263 CHAN5G(182, 0), CHAN5G(184, 0), 283 CHAN5G(182, 0),
264 CHAN5G(186, 0), CHAN5G(188, 0),
265 CHAN5G(190, 0), CHAN5G(192, 0),
266 CHAN5G(194, 0), CHAN5G(196, 0),
267 CHAN5G(198, 0), CHAN5G(200, 0),
268 CHAN5G(202, 0), CHAN5G(204, 0),
269 CHAN5G(206, 0), CHAN5G(208, 0),
270 CHAN5G(210, 0), CHAN5G(212, 0),
271 CHAN5G(214, 0), CHAN5G(216, 0),
272 CHAN5G(218, 0), CHAN5G(220, 0),
273 CHAN5G(222, 0), CHAN5G(224, 0),
274 CHAN5G(226, 0), CHAN5G(228, 0),
275}; 284};
276 285
277static struct ieee80211_channel b43_5ghz_aphy_chantable[] = { 286static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
@@ -295,6 +304,7 @@ static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
295 CHAN5G(208, 0), CHAN5G(212, 0), 304 CHAN5G(208, 0), CHAN5G(212, 0),
296 CHAN5G(216, 0), 305 CHAN5G(216, 0),
297}; 306};
307#undef CHAN4G
298#undef CHAN5G 308#undef CHAN5G
299 309
300static struct ieee80211_supported_band b43_band_5GHz_nphy = { 310static struct ieee80211_supported_band b43_band_5GHz_nphy = {
@@ -1175,18 +1185,7 @@ static void b43_bcma_phy_reset(struct b43_wldev *dev)
1175 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags); 1185 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
1176 udelay(2); 1186 udelay(2);
1177 1187
1178 /* Take PHY out of reset */ 1188 b43_phy_take_out_of_reset(dev);
1179 flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
1180 flags &= ~B43_BCMA_IOCTL_PHY_RESET;
1181 flags |= BCMA_IOCTL_FGC;
1182 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
1183 udelay(1);
1184
1185 /* Do not force clock anymore */
1186 flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
1187 flags &= ~BCMA_IOCTL_FGC;
1188 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
1189 udelay(1);
1190} 1189}
1191 1190
1192static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode) 1191static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
@@ -1195,18 +1194,22 @@ static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
1195 B43_BCMA_CLKCTLST_PHY_PLL_REQ; 1194 B43_BCMA_CLKCTLST_PHY_PLL_REQ;
1196 u32 status = B43_BCMA_CLKCTLST_80211_PLL_ST | 1195 u32 status = B43_BCMA_CLKCTLST_80211_PLL_ST |
1197 B43_BCMA_CLKCTLST_PHY_PLL_ST; 1196 B43_BCMA_CLKCTLST_PHY_PLL_ST;
1197 u32 flags;
1198
1199 flags = B43_BCMA_IOCTL_PHY_CLKEN;
1200 if (gmode)
1201 flags |= B43_BCMA_IOCTL_GMODE;
1202 b43_device_enable(dev, flags);
1198 1203
1199 b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN);
1200 bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST); 1204 bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
1201 b43_bcma_phy_reset(dev); 1205 b43_bcma_phy_reset(dev);
1202 bcma_core_pll_ctl(dev->dev->bdev, req, status, true); 1206 bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
1203} 1207}
1204#endif 1208#endif
1205 1209
1210#ifdef CONFIG_B43_SSB
1206static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, bool gmode) 1211static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, bool gmode)
1207{ 1212{
1208 struct ssb_device *sdev = dev->dev->sdev;
1209 u32 tmslow;
1210 u32 flags = 0; 1213 u32 flags = 0;
1211 1214
1212 if (gmode) 1215 if (gmode)
@@ -1218,18 +1221,9 @@ static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, bool gmode)
1218 b43_device_enable(dev, flags); 1221 b43_device_enable(dev, flags);
1219 msleep(2); /* Wait for the PLL to turn on. */ 1222 msleep(2); /* Wait for the PLL to turn on. */
1220 1223
1221 /* Now take the PHY out of Reset again */ 1224 b43_phy_take_out_of_reset(dev);
1222 tmslow = ssb_read32(sdev, SSB_TMSLOW);
1223 tmslow |= SSB_TMSLOW_FGC;
1224 tmslow &= ~B43_TMSLOW_PHYRESET;
1225 ssb_write32(sdev, SSB_TMSLOW, tmslow);
1226 ssb_read32(sdev, SSB_TMSLOW); /* flush */
1227 msleep(1);
1228 tmslow &= ~SSB_TMSLOW_FGC;
1229 ssb_write32(sdev, SSB_TMSLOW, tmslow);
1230 ssb_read32(sdev, SSB_TMSLOW); /* flush */
1231 msleep(1);
1232} 1225}
1226#endif
1233 1227
1234void b43_wireless_core_reset(struct b43_wldev *dev, bool gmode) 1228void b43_wireless_core_reset(struct b43_wldev *dev, bool gmode)
1235{ 1229{
@@ -2704,32 +2698,37 @@ static int b43_upload_initvals(struct b43_wldev *dev)
2704 struct b43_firmware *fw = &dev->fw; 2698 struct b43_firmware *fw = &dev->fw;
2705 const struct b43_iv *ivals; 2699 const struct b43_iv *ivals;
2706 size_t count; 2700 size_t count;
2707 int err;
2708 2701
2709 hdr = (const struct b43_fw_header *)(fw->initvals.data->data); 2702 hdr = (const struct b43_fw_header *)(fw->initvals.data->data);
2710 ivals = (const struct b43_iv *)(fw->initvals.data->data + hdr_len); 2703 ivals = (const struct b43_iv *)(fw->initvals.data->data + hdr_len);
2711 count = be32_to_cpu(hdr->size); 2704 count = be32_to_cpu(hdr->size);
2712 err = b43_write_initvals(dev, ivals, count, 2705 return b43_write_initvals(dev, ivals, count,
2713 fw->initvals.data->size - hdr_len); 2706 fw->initvals.data->size - hdr_len);
2714 if (err) 2707}
2715 goto out;
2716 if (fw->initvals_band.data) {
2717 hdr = (const struct b43_fw_header *)(fw->initvals_band.data->data);
2718 ivals = (const struct b43_iv *)(fw->initvals_band.data->data + hdr_len);
2719 count = be32_to_cpu(hdr->size);
2720 err = b43_write_initvals(dev, ivals, count,
2721 fw->initvals_band.data->size - hdr_len);
2722 if (err)
2723 goto out;
2724 }
2725out:
2726 2708
2727 return err; 2709static int b43_upload_initvals_band(struct b43_wldev *dev)
2710{
2711 const size_t hdr_len = sizeof(struct b43_fw_header);
2712 const struct b43_fw_header *hdr;
2713 struct b43_firmware *fw = &dev->fw;
2714 const struct b43_iv *ivals;
2715 size_t count;
2716
2717 if (!fw->initvals_band.data)
2718 return 0;
2719
2720 hdr = (const struct b43_fw_header *)(fw->initvals_band.data->data);
2721 ivals = (const struct b43_iv *)(fw->initvals_band.data->data + hdr_len);
2722 count = be32_to_cpu(hdr->size);
2723 return b43_write_initvals(dev, ivals, count,
2724 fw->initvals_band.data->size - hdr_len);
2728} 2725}
2729 2726
2730/* Initialize the GPIOs 2727/* Initialize the GPIOs
2731 * http://bcm-specs.sipsolutions.net/GPIO 2728 * http://bcm-specs.sipsolutions.net/GPIO
2732 */ 2729 */
2730
2731#ifdef CONFIG_B43_SSB
2733static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev) 2732static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev)
2734{ 2733{
2735 struct ssb_bus *bus = dev->dev->sdev->bus; 2734 struct ssb_bus *bus = dev->dev->sdev->bus;
@@ -2740,10 +2739,13 @@ static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev)
2740 return bus->chipco.dev; 2739 return bus->chipco.dev;
2741#endif 2740#endif
2742} 2741}
2742#endif
2743 2743
2744static int b43_gpio_init(struct b43_wldev *dev) 2744static int b43_gpio_init(struct b43_wldev *dev)
2745{ 2745{
2746#ifdef CONFIG_B43_SSB
2746 struct ssb_device *gpiodev; 2747 struct ssb_device *gpiodev;
2748#endif
2747 u32 mask, set; 2749 u32 mask, set;
2748 2750
2749 b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_GPOUTSMSK, 0); 2751 b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_GPOUTSMSK, 0);
@@ -2802,7 +2804,9 @@ static int b43_gpio_init(struct b43_wldev *dev)
2802/* Turn off all GPIO stuff. Call this on module unload, for example. */ 2804/* Turn off all GPIO stuff. Call this on module unload, for example. */
2803static void b43_gpio_cleanup(struct b43_wldev *dev) 2805static void b43_gpio_cleanup(struct b43_wldev *dev)
2804{ 2806{
2807#ifdef CONFIG_B43_SSB
2805 struct ssb_device *gpiodev; 2808 struct ssb_device *gpiodev;
2809#endif
2806 2810
2807 switch (dev->dev->bus_type) { 2811 switch (dev->dev->bus_type) {
2808#ifdef CONFIG_B43_BCMA 2812#ifdef CONFIG_B43_BCMA
@@ -3086,6 +3090,10 @@ static int b43_chip_init(struct b43_wldev *dev)
3086 if (err) 3090 if (err)
3087 goto err_gpio_clean; 3091 goto err_gpio_clean;
3088 3092
3093 err = b43_upload_initvals_band(dev);
3094 if (err)
3095 goto err_gpio_clean;
3096
3089 /* Turn the Analog on and initialize the PHY. */ 3097 /* Turn the Analog on and initialize the PHY. */
3090 phy->ops->switch_analog(dev, 1); 3098 phy->ops->switch_analog(dev, 1);
3091 err = b43_phy_init(dev); 3099 err = b43_phy_init(dev);
@@ -3685,37 +3693,6 @@ static void b43_op_set_tsf(struct ieee80211_hw *hw,
3685 mutex_unlock(&wl->mutex); 3693 mutex_unlock(&wl->mutex);
3686} 3694}
3687 3695
3688static void b43_put_phy_into_reset(struct b43_wldev *dev)
3689{
3690 u32 tmp;
3691
3692 switch (dev->dev->bus_type) {
3693#ifdef CONFIG_B43_BCMA
3694 case B43_BUS_BCMA:
3695 b43err(dev->wl,
3696 "Putting PHY into reset not supported on BCMA\n");
3697 break;
3698#endif
3699#ifdef CONFIG_B43_SSB
3700 case B43_BUS_SSB:
3701 tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
3702 tmp &= ~B43_TMSLOW_GMODE;
3703 tmp |= B43_TMSLOW_PHYRESET;
3704 tmp |= SSB_TMSLOW_FGC;
3705 ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
3706 msleep(1);
3707
3708 tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
3709 tmp &= ~SSB_TMSLOW_FGC;
3710 tmp |= B43_TMSLOW_PHYRESET;
3711 ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
3712 msleep(1);
3713
3714 break;
3715#endif
3716 }
3717}
3718
3719static const char *band_to_string(enum ieee80211_band band) 3696static const char *band_to_string(enum ieee80211_band band)
3720{ 3697{
3721 switch (band) { 3698 switch (band) {
@@ -3731,94 +3708,75 @@ static const char *band_to_string(enum ieee80211_band band)
3731} 3708}
3732 3709
3733/* Expects wl->mutex locked */ 3710/* Expects wl->mutex locked */
3734static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan) 3711static int b43_switch_band(struct b43_wldev *dev,
3712 struct ieee80211_channel *chan)
3735{ 3713{
3736 struct b43_wldev *up_dev = NULL; 3714 struct b43_phy *phy = &dev->phy;
3737 struct b43_wldev *down_dev; 3715 bool gmode;
3738 struct b43_wldev *d; 3716 u32 tmp;
3739 int err;
3740 bool uninitialized_var(gmode);
3741 int prev_status;
3742 3717
3743 /* Find a device and PHY which supports the band. */ 3718 switch (chan->band) {
3744 list_for_each_entry(d, &wl->devlist, list) { 3719 case IEEE80211_BAND_5GHZ:
3745 switch (chan->band) { 3720 gmode = false;
3746 case IEEE80211_BAND_5GHZ: 3721 break;
3747 if (d->phy.supports_5ghz) { 3722 case IEEE80211_BAND_2GHZ:
3748 up_dev = d; 3723 gmode = true;
3749 gmode = false; 3724 break;
3750 } 3725 default:
3751 break; 3726 B43_WARN_ON(1);
3752 case IEEE80211_BAND_2GHZ: 3727 return -EINVAL;
3753 if (d->phy.supports_2ghz) {
3754 up_dev = d;
3755 gmode = true;
3756 }
3757 break;
3758 default:
3759 B43_WARN_ON(1);
3760 return -EINVAL;
3761 }
3762 if (up_dev)
3763 break;
3764 } 3728 }
3765 if (!up_dev) { 3729
3766 b43err(wl, "Could not find a device for %s-GHz band operation\n", 3730 if (!((gmode && phy->supports_2ghz) ||
3731 (!gmode && phy->supports_5ghz))) {
3732 b43err(dev->wl, "This device doesn't support %s-GHz band\n",
3767 band_to_string(chan->band)); 3733 band_to_string(chan->band));
3768 return -ENODEV; 3734 return -ENODEV;
3769 } 3735 }
3770 if ((up_dev == wl->current_dev) && 3736
3771 (!!wl->current_dev->phy.gmode == !!gmode)) { 3737 if (!!phy->gmode == !!gmode) {
3772 /* This device is already running. */ 3738 /* This device is already running. */
3773 return 0; 3739 return 0;
3774 } 3740 }
3775 b43dbg(wl, "Switching to %s-GHz band\n", 3741
3742 b43dbg(dev->wl, "Switching to %s GHz band\n",
3776 band_to_string(chan->band)); 3743 band_to_string(chan->band));
3777 down_dev = wl->current_dev;
3778 3744
3779 prev_status = b43_status(down_dev); 3745 /* Some new devices don't need disabling radio for band switching */
3780 /* Shutdown the currently running core. */ 3746 if (!(phy->type == B43_PHYTYPE_N && phy->rev >= 3))
3781 if (prev_status >= B43_STAT_STARTED) 3747 b43_software_rfkill(dev, true);
3782 down_dev = b43_wireless_core_stop(down_dev);
3783 if (prev_status >= B43_STAT_INITIALIZED)
3784 b43_wireless_core_exit(down_dev);
3785 3748
3786 if (down_dev != up_dev) { 3749 phy->gmode = gmode;
3787 /* We switch to a different core, so we put PHY into 3750 b43_phy_put_into_reset(dev);
3788 * RESET on the old core. */ 3751 switch (dev->dev->bus_type) {
3789 b43_put_phy_into_reset(down_dev); 3752#ifdef CONFIG_B43_BCMA
3753 case B43_BUS_BCMA:
3754 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
3755 if (gmode)
3756 tmp |= B43_BCMA_IOCTL_GMODE;
3757 else
3758 tmp &= ~B43_BCMA_IOCTL_GMODE;
3759 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
3760 break;
3761#endif
3762#ifdef CONFIG_B43_SSB
3763 case B43_BUS_SSB:
3764 tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
3765 if (gmode)
3766 tmp |= B43_TMSLOW_GMODE;
3767 else
3768 tmp &= ~B43_TMSLOW_GMODE;
3769 ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
3770 break;
3771#endif
3790 } 3772 }
3773 b43_phy_take_out_of_reset(dev);
3791 3774
3792 /* Now start the new core. */ 3775 b43_upload_initvals_band(dev);
3793 up_dev->phy.gmode = gmode;
3794 if (prev_status >= B43_STAT_INITIALIZED) {
3795 err = b43_wireless_core_init(up_dev);
3796 if (err) {
3797 b43err(wl, "Fatal: Could not initialize device for "
3798 "selected %s-GHz band\n",
3799 band_to_string(chan->band));
3800 goto init_failure;
3801 }
3802 }
3803 if (prev_status >= B43_STAT_STARTED) {
3804 err = b43_wireless_core_start(up_dev);
3805 if (err) {
3806 b43err(wl, "Fatal: Could not start device for "
3807 "selected %s-GHz band\n",
3808 band_to_string(chan->band));
3809 b43_wireless_core_exit(up_dev);
3810 goto init_failure;
3811 }
3812 }
3813 B43_WARN_ON(b43_status(up_dev) != prev_status);
3814 3776
3815 wl->current_dev = up_dev; 3777 b43_phy_init(dev);
3816 3778
3817 return 0; 3779 return 0;
3818init_failure:
3819 /* Whoops, failed to init the new core. No core is operating now. */
3820 wl->current_dev = NULL;
3821 return err;
3822} 3780}
3823 3781
3824/* Write the short and long frame retry limit values. */ 3782/* Write the short and long frame retry limit values. */
@@ -3851,8 +3809,10 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
3851 3809
3852 dev = wl->current_dev; 3810 dev = wl->current_dev;
3853 3811
3812 b43_mac_suspend(dev);
3813
3854 /* Switch the band (if necessary). This might change the active core. */ 3814 /* Switch the band (if necessary). This might change the active core. */
3855 err = b43_switch_band(wl, conf->chandef.chan); 3815 err = b43_switch_band(dev, conf->chandef.chan);
3856 if (err) 3816 if (err)
3857 goto out_unlock_mutex; 3817 goto out_unlock_mutex;
3858 3818
@@ -3871,8 +3831,6 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
3871 else 3831 else
3872 phy->is_40mhz = false; 3832 phy->is_40mhz = false;
3873 3833
3874 b43_mac_suspend(dev);
3875
3876 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) 3834 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
3877 b43_set_retry_limits(dev, conf->short_frame_max_tx_count, 3835 b43_set_retry_limits(dev, conf->short_frame_max_tx_count,
3878 conf->long_frame_max_tx_count); 3836 conf->long_frame_max_tx_count);
@@ -4582,8 +4540,12 @@ static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev)
4582 struct ssb_bus *bus; 4540 struct ssb_bus *bus;
4583 u32 tmp; 4541 u32 tmp;
4584 4542
4543#ifdef CONFIG_B43_SSB
4585 if (dev->dev->bus_type != B43_BUS_SSB) 4544 if (dev->dev->bus_type != B43_BUS_SSB)
4586 return; 4545 return;
4546#else
4547 return;
4548#endif
4587 4549
4588 bus = dev->dev->sdev->bus; 4550 bus = dev->dev->sdev->bus;
4589 4551
@@ -4738,7 +4700,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4738 } 4700 }
4739 if (sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW) 4701 if (sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW)
4740 hf |= B43_HF_DSCRQ; /* Disable slowclock requests from ucode. */ 4702 hf |= B43_HF_DSCRQ; /* Disable slowclock requests from ucode. */
4741#ifdef CONFIG_SSB_DRIVER_PCICORE 4703#if defined(CONFIG_B43_SSB) && defined(CONFIG_SSB_DRIVER_PCICORE)
4742 if (dev->dev->bus_type == B43_BUS_SSB && 4704 if (dev->dev->bus_type == B43_BUS_SSB &&
4743 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI && 4705 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
4744 dev->dev->sdev->bus->pcicore.dev->id.revision <= 10) 4706 dev->dev->sdev->bus->pcicore.dev->id.revision <= 10)
@@ -5129,10 +5091,82 @@ static void b43_wireless_core_detach(struct b43_wldev *dev)
5129 b43_phy_free(dev); 5091 b43_phy_free(dev);
5130} 5092}
5131 5093
5094static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
5095 bool *have_5ghz_phy)
5096{
5097 u16 dev_id = 0;
5098
5099#ifdef CONFIG_B43_BCMA
5100 if (dev->dev->bus_type == B43_BUS_BCMA &&
5101 dev->dev->bdev->bus->hosttype == BCMA_HOSTTYPE_PCI)
5102 dev_id = dev->dev->bdev->bus->host_pci->device;
5103#endif
5104#ifdef CONFIG_B43_SSB
5105 if (dev->dev->bus_type == B43_BUS_SSB &&
5106 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI)
5107 dev_id = dev->dev->sdev->bus->host_pci->device;
5108#endif
5109 /* Override with SPROM value if available */
5110 if (dev->dev->bus_sprom->dev_id)
5111 dev_id = dev->dev->bus_sprom->dev_id;
5112
5113 /* Note: below IDs can be "virtual" (not maching e.g. real PCI ID) */
5114 switch (dev_id) {
5115 case 0x4324: /* BCM4306 */
5116 case 0x4312: /* BCM4311 */
5117 case 0x4319: /* BCM4318 */
5118 case 0x4328: /* BCM4321 */
5119 case 0x432b: /* BCM4322 */
5120 case 0x4350: /* BCM43222 */
5121 case 0x4353: /* BCM43224 */
5122 case 0x0576: /* BCM43224 */
5123 case 0x435f: /* BCM6362 */
5124 case 0x4331: /* BCM4331 */
5125 case 0x4359: /* BCM43228 */
5126 case 0x43a0: /* BCM4360 */
5127 case 0x43b1: /* BCM4352 */
5128 /* Dual band devices */
5129 *have_2ghz_phy = true;
5130 *have_5ghz_phy = true;
5131 return;
5132 case 0x4321: /* BCM4306 */
5133 case 0x4313: /* BCM4311 */
5134 case 0x431a: /* BCM4318 */
5135 case 0x432a: /* BCM4321 */
5136 case 0x432d: /* BCM4322 */
5137 case 0x4352: /* BCM43222 */
5138 case 0x4333: /* BCM4331 */
5139 case 0x43a2: /* BCM4360 */
5140 case 0x43b3: /* BCM4352 */
5141 /* 5 GHz only devices */
5142 *have_2ghz_phy = false;
5143 *have_5ghz_phy = true;
5144 return;
5145 }
5146
5147 /* As a fallback, try to guess using PHY type */
5148 switch (dev->phy.type) {
5149 case B43_PHYTYPE_A:
5150 *have_2ghz_phy = false;
5151 *have_5ghz_phy = true;
5152 return;
5153 case B43_PHYTYPE_G:
5154 case B43_PHYTYPE_N:
5155 case B43_PHYTYPE_LP:
5156 case B43_PHYTYPE_HT:
5157 case B43_PHYTYPE_LCN:
5158 *have_2ghz_phy = true;
5159 *have_5ghz_phy = false;
5160 return;
5161 }
5162
5163 B43_WARN_ON(1);
5164}
5165
5132static int b43_wireless_core_attach(struct b43_wldev *dev) 5166static int b43_wireless_core_attach(struct b43_wldev *dev)
5133{ 5167{
5134 struct b43_wl *wl = dev->wl; 5168 struct b43_wl *wl = dev->wl;
5135 struct pci_dev *pdev = NULL; 5169 struct b43_phy *phy = &dev->phy;
5136 int err; 5170 int err;
5137 u32 tmp; 5171 u32 tmp;
5138 bool have_2ghz_phy = false, have_5ghz_phy = false; 5172 bool have_2ghz_phy = false, have_5ghz_phy = false;
@@ -5144,19 +5178,15 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
5144 * that in core_init(), too. 5178 * that in core_init(), too.
5145 */ 5179 */
5146 5180
5147#ifdef CONFIG_B43_SSB
5148 if (dev->dev->bus_type == B43_BUS_SSB &&
5149 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI)
5150 pdev = dev->dev->sdev->bus->host_pci;
5151#endif
5152
5153 err = b43_bus_powerup(dev, 0); 5181 err = b43_bus_powerup(dev, 0);
5154 if (err) { 5182 if (err) {
5155 b43err(wl, "Bus powerup failed\n"); 5183 b43err(wl, "Bus powerup failed\n");
5156 goto out; 5184 goto out;
5157 } 5185 }
5158 5186
5159 /* Get the PHY type. */ 5187 phy->do_full_init = true;
5188
5189 /* Try to guess supported bands for the first init needs */
5160 switch (dev->dev->bus_type) { 5190 switch (dev->dev->bus_type) {
5161#ifdef CONFIG_B43_BCMA 5191#ifdef CONFIG_B43_BCMA
5162 case B43_BUS_BCMA: 5192 case B43_BUS_BCMA:
@@ -5178,51 +5208,31 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
5178 } 5208 }
5179 5209
5180 dev->phy.gmode = have_2ghz_phy; 5210 dev->phy.gmode = have_2ghz_phy;
5181 dev->phy.radio_on = true;
5182 b43_wireless_core_reset(dev, dev->phy.gmode); 5211 b43_wireless_core_reset(dev, dev->phy.gmode);
5183 5212
5213 /* Get the PHY type. */
5184 err = b43_phy_versioning(dev); 5214 err = b43_phy_versioning(dev);
5185 if (err) 5215 if (err)
5186 goto err_powerdown; 5216 goto err_powerdown;
5187 /* Check if this device supports multiband. */ 5217
5188 if (!pdev || 5218 /* Get real info about supported bands */
5189 (pdev->device != 0x4312 && 5219 b43_supported_bands(dev, &have_2ghz_phy, &have_5ghz_phy);
5190 pdev->device != 0x4319 && pdev->device != 0x4324)) { 5220
5191 /* No multiband support. */ 5221 /* We don't support 5 GHz on some PHYs yet */
5192 have_2ghz_phy = false; 5222 switch (dev->phy.type) {
5223 case B43_PHYTYPE_A:
5224 case B43_PHYTYPE_N:
5225 case B43_PHYTYPE_LP:
5226 case B43_PHYTYPE_HT:
5227 b43warn(wl, "5 GHz band is unsupported on this PHY\n");
5193 have_5ghz_phy = false; 5228 have_5ghz_phy = false;
5194 switch (dev->phy.type) {
5195 case B43_PHYTYPE_A:
5196 have_5ghz_phy = true;
5197 break;
5198 case B43_PHYTYPE_LP: //FIXME not always!
5199#if 0 //FIXME enabling 5GHz causes a NULL pointer dereference
5200 have_5ghz_phy = 1;
5201#endif
5202 case B43_PHYTYPE_G:
5203 case B43_PHYTYPE_N:
5204 case B43_PHYTYPE_HT:
5205 case B43_PHYTYPE_LCN:
5206 have_2ghz_phy = true;
5207 break;
5208 default:
5209 B43_WARN_ON(1);
5210 }
5211 } 5229 }
5212 if (dev->phy.type == B43_PHYTYPE_A) { 5230
5213 /* FIXME */ 5231 if (!have_2ghz_phy && !have_5ghz_phy) {
5214 b43err(wl, "IEEE 802.11a devices are unsupported\n"); 5232 b43err(wl, "b43 can't support any band on this device\n");
5215 err = -EOPNOTSUPP; 5233 err = -EOPNOTSUPP;
5216 goto err_powerdown; 5234 goto err_powerdown;
5217 } 5235 }
5218 if (1 /* disable A-PHY */) {
5219 /* FIXME: For now we disable the A-PHY on multi-PHY devices. */
5220 if (dev->phy.type != B43_PHYTYPE_N &&
5221 dev->phy.type != B43_PHYTYPE_LP) {
5222 have_2ghz_phy = true;
5223 have_5ghz_phy = false;
5224 }
5225 }
5226 5236
5227 err = b43_phy_allocate(dev); 5237 err = b43_phy_allocate(dev);
5228 if (err) 5238 if (err)
@@ -5270,7 +5280,6 @@ static void b43_one_core_detach(struct b43_bus_dev *dev)
5270 b43_debugfs_remove_device(wldev); 5280 b43_debugfs_remove_device(wldev);
5271 b43_wireless_core_detach(wldev); 5281 b43_wireless_core_detach(wldev);
5272 list_del(&wldev->list); 5282 list_del(&wldev->list);
5273 wl->nr_devs--;
5274 b43_bus_set_wldev(dev, NULL); 5283 b43_bus_set_wldev(dev, NULL);
5275 kfree(wldev); 5284 kfree(wldev);
5276} 5285}
@@ -5295,8 +5304,6 @@ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
5295 if (err) 5304 if (err)
5296 goto err_kfree_wldev; 5305 goto err_kfree_wldev;
5297 5306
5298 list_add(&wldev->list, &wl->devlist);
5299 wl->nr_devs++;
5300 b43_bus_set_wldev(dev, wldev); 5307 b43_bus_set_wldev(dev, wldev);
5301 b43_debugfs_add_device(wldev); 5308 b43_debugfs_add_device(wldev);
5302 5309
@@ -5314,6 +5321,7 @@ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
5314 (pdev->subsystem_vendor == PCI_VENDOR_ID_##_subvendor) && \ 5321 (pdev->subsystem_vendor == PCI_VENDOR_ID_##_subvendor) && \
5315 (pdev->subsystem_device == _subdevice) ) 5322 (pdev->subsystem_device == _subdevice) )
5316 5323
5324#ifdef CONFIG_B43_SSB
5317static void b43_sprom_fixup(struct ssb_bus *bus) 5325static void b43_sprom_fixup(struct ssb_bus *bus)
5318{ 5326{
5319 struct pci_dev *pdev; 5327 struct pci_dev *pdev;
@@ -5345,6 +5353,7 @@ static void b43_wireless_exit(struct b43_bus_dev *dev, struct b43_wl *wl)
5345 ssb_set_devtypedata(dev->sdev, NULL); 5353 ssb_set_devtypedata(dev->sdev, NULL);
5346 ieee80211_free_hw(hw); 5354 ieee80211_free_hw(hw);
5347} 5355}
5356#endif
5348 5357
5349static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) 5358static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
5350{ 5359{
@@ -5386,7 +5395,6 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
5386 wl->hw = hw; 5395 wl->hw = hw;
5387 mutex_init(&wl->mutex); 5396 mutex_init(&wl->mutex);
5388 spin_lock_init(&wl->hardirq_lock); 5397 spin_lock_init(&wl->hardirq_lock);
5389 INIT_LIST_HEAD(&wl->devlist);
5390 INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work); 5398 INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
5391 INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work); 5399 INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
5392 INIT_WORK(&wl->tx_work, b43_tx_work); 5400 INIT_WORK(&wl->tx_work, b43_tx_work);
@@ -5486,39 +5494,42 @@ int b43_ssb_probe(struct ssb_device *sdev, const struct ssb_device_id *id)
5486 struct b43_bus_dev *dev; 5494 struct b43_bus_dev *dev;
5487 struct b43_wl *wl; 5495 struct b43_wl *wl;
5488 int err; 5496 int err;
5489 int first = 0;
5490 5497
5491 dev = b43_bus_dev_ssb_init(sdev); 5498 dev = b43_bus_dev_ssb_init(sdev);
5492 if (!dev) 5499 if (!dev)
5493 return -ENOMEM; 5500 return -ENOMEM;
5494 5501
5495 wl = ssb_get_devtypedata(sdev); 5502 wl = ssb_get_devtypedata(sdev);
5496 if (!wl) { 5503 if (wl) {
5497 /* Probing the first core. Must setup common struct b43_wl */ 5504 b43err(NULL, "Dual-core devices are not supported\n");
5498 first = 1; 5505 err = -ENOTSUPP;
5499 b43_sprom_fixup(sdev->bus); 5506 goto err_ssb_kfree_dev;
5500 wl = b43_wireless_init(dev); 5507 }
5501 if (IS_ERR(wl)) { 5508
5502 err = PTR_ERR(wl); 5509 b43_sprom_fixup(sdev->bus);
5503 goto out; 5510
5504 } 5511 wl = b43_wireless_init(dev);
5505 ssb_set_devtypedata(sdev, wl); 5512 if (IS_ERR(wl)) {
5506 B43_WARN_ON(ssb_get_devtypedata(sdev) != wl); 5513 err = PTR_ERR(wl);
5514 goto err_ssb_kfree_dev;
5507 } 5515 }
5516 ssb_set_devtypedata(sdev, wl);
5517 B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
5518
5508 err = b43_one_core_attach(dev, wl); 5519 err = b43_one_core_attach(dev, wl);
5509 if (err) 5520 if (err)
5510 goto err_wireless_exit; 5521 goto err_ssb_wireless_exit;
5511 5522
5512 /* setup and start work to load firmware */ 5523 /* setup and start work to load firmware */
5513 INIT_WORK(&wl->firmware_load, b43_request_firmware); 5524 INIT_WORK(&wl->firmware_load, b43_request_firmware);
5514 schedule_work(&wl->firmware_load); 5525 schedule_work(&wl->firmware_load);
5515 5526
5516 out:
5517 return err; 5527 return err;
5518 5528
5519 err_wireless_exit: 5529err_ssb_wireless_exit:
5520 if (first) 5530 b43_wireless_exit(dev, wl);
5521 b43_wireless_exit(dev, wl); 5531err_ssb_kfree_dev:
5532 kfree(dev);
5522 return err; 5533 return err;
5523} 5534}
5524 5535
@@ -5546,13 +5557,8 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5546 /* Unregister HW RNG driver */ 5557 /* Unregister HW RNG driver */
5547 b43_rng_exit(wl); 5558 b43_rng_exit(wl);
5548 5559
5549 if (list_empty(&wl->devlist)) { 5560 b43_leds_unregister(wl);
5550 b43_leds_unregister(wl); 5561 b43_wireless_exit(dev, wl);
5551 /* Last core on the chip unregistered.
5552 * We can destroy common struct b43_wl.
5553 */
5554 b43_wireless_exit(dev, wl);
5555 }
5556} 5562}
5557 5563
5558static struct ssb_driver b43_ssb_driver = { 5564static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index dbaa51890198..08244b3b327e 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -96,12 +96,16 @@ int b43_phy_init(struct b43_wldev *dev)
96 96
97 phy->channel = ops->get_default_chan(dev); 97 phy->channel = ops->get_default_chan(dev);
98 98
99 ops->software_rfkill(dev, false); 99 phy->ops->switch_analog(dev, true);
100 b43_software_rfkill(dev, false);
101
100 err = ops->init(dev); 102 err = ops->init(dev);
101 if (err) { 103 if (err) {
102 b43err(dev->wl, "PHY init failed\n"); 104 b43err(dev->wl, "PHY init failed\n");
103 goto err_block_rf; 105 goto err_block_rf;
104 } 106 }
107 phy->do_full_init = false;
108
105 /* Make sure to switch hardware and firmware (SHM) to 109 /* Make sure to switch hardware and firmware (SHM) to
106 * the default channel. */ 110 * the default channel. */
107 err = b43_switch_channel(dev, ops->get_default_chan(dev)); 111 err = b43_switch_channel(dev, ops->get_default_chan(dev));
@@ -113,10 +117,11 @@ int b43_phy_init(struct b43_wldev *dev)
113 return 0; 117 return 0;
114 118
115err_phy_exit: 119err_phy_exit:
120 phy->do_full_init = true;
116 if (ops->exit) 121 if (ops->exit)
117 ops->exit(dev); 122 ops->exit(dev);
118err_block_rf: 123err_block_rf:
119 ops->software_rfkill(dev, true); 124 b43_software_rfkill(dev, true);
120 125
121 return err; 126 return err;
122} 127}
@@ -125,7 +130,8 @@ void b43_phy_exit(struct b43_wldev *dev)
125{ 130{
126 const struct b43_phy_operations *ops = dev->phy.ops; 131 const struct b43_phy_operations *ops = dev->phy.ops;
127 132
128 ops->software_rfkill(dev, true); 133 b43_software_rfkill(dev, true);
134 dev->phy.do_full_init = true;
129 if (ops->exit) 135 if (ops->exit)
130 ops->exit(dev); 136 ops->exit(dev);
131} 137}
@@ -312,6 +318,90 @@ void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
312 } 318 }
313} 319}
314 320
321void b43_phy_put_into_reset(struct b43_wldev *dev)
322{
323 u32 tmp;
324
325 switch (dev->dev->bus_type) {
326#ifdef CONFIG_B43_BCMA
327 case B43_BUS_BCMA:
328 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
329 tmp &= ~B43_BCMA_IOCTL_GMODE;
330 tmp |= B43_BCMA_IOCTL_PHY_RESET;
331 tmp |= BCMA_IOCTL_FGC;
332 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
333 udelay(1);
334
335 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
336 tmp &= ~BCMA_IOCTL_FGC;
337 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
338 udelay(1);
339 break;
340#endif
341#ifdef CONFIG_B43_SSB
342 case B43_BUS_SSB:
343 tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
344 tmp &= ~B43_TMSLOW_GMODE;
345 tmp |= B43_TMSLOW_PHYRESET;
346 tmp |= SSB_TMSLOW_FGC;
347 ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
348 usleep_range(1000, 2000);
349
350 tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
351 tmp &= ~SSB_TMSLOW_FGC;
352 ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
353 usleep_range(1000, 2000);
354
355 break;
356#endif
357 }
358}
359
360void b43_phy_take_out_of_reset(struct b43_wldev *dev)
361{
362 u32 tmp;
363
364 switch (dev->dev->bus_type) {
365#ifdef CONFIG_B43_BCMA
366 case B43_BUS_BCMA:
367 /* Unset reset bit (with forcing clock) */
368 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
369 tmp &= ~B43_BCMA_IOCTL_PHY_RESET;
370 tmp &= ~B43_BCMA_IOCTL_PHY_CLKEN;
371 tmp |= BCMA_IOCTL_FGC;
372 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
373 udelay(1);
374
375 /* Do not force clock anymore */
376 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
377 tmp &= ~BCMA_IOCTL_FGC;
378 tmp |= B43_BCMA_IOCTL_PHY_CLKEN;
379 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
380 udelay(1);
381 break;
382#endif
383#ifdef CONFIG_B43_SSB
384 case B43_BUS_SSB:
385 /* Unset reset bit (with forcing clock) */
386 tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
387 tmp &= ~B43_TMSLOW_PHYRESET;
388 tmp &= ~B43_TMSLOW_PHYCLKEN;
389 tmp |= SSB_TMSLOW_FGC;
390 ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
391 ssb_read32(dev->dev->sdev, SSB_TMSLOW); /* flush */
392 usleep_range(1000, 2000);
393
394 tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
395 tmp &= ~SSB_TMSLOW_FGC;
396 tmp |= B43_TMSLOW_PHYCLKEN;
397 ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
398 ssb_read32(dev->dev->sdev, SSB_TMSLOW); /* flush */
399 usleep_range(1000, 2000);
400 break;
401#endif
402 }
403}
404
315int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel) 405int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
316{ 406{
317 struct b43_phy *phy = &(dev->phy); 407 struct b43_phy *phy = &(dev->phy);
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index f1b999349876..4ad6240d9ff4 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -231,9 +231,12 @@ struct b43_phy {
231 /* HT info */ 231 /* HT info */
232 bool is_40mhz; 232 bool is_40mhz;
233 233
234 /* GMODE bit enabled? */ 234 /* Is GMODE (2 GHz mode) bit enabled? */
235 bool gmode; 235 bool gmode;
236 236
237 /* After power reset full init has to be performed */
238 bool do_full_init;
239
237 /* Analog Type */ 240 /* Analog Type */
238 u8 analog; 241 u8 analog;
239 /* B43_PHYTYPE_ */ 242 /* B43_PHYTYPE_ */
@@ -390,6 +393,9 @@ void b43_phy_lock(struct b43_wldev *dev);
390 */ 393 */
391void b43_phy_unlock(struct b43_wldev *dev); 394void b43_phy_unlock(struct b43_wldev *dev);
392 395
396void b43_phy_put_into_reset(struct b43_wldev *dev);
397void b43_phy_take_out_of_reset(struct b43_wldev *dev);
398
393/** 399/**
394 * b43_switch_channel - Switch to another channel 400 * b43_switch_channel - Switch to another channel
395 */ 401 */
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 12f467b8d564..8f5c14bc10e6 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -1587,6 +1587,7 @@ static void b43_phy_initb5(struct b43_wldev *dev)
1587 b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004); 1587 b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
1588} 1588}
1589 1589
1590/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */
1590static void b43_phy_initb6(struct b43_wldev *dev) 1591static void b43_phy_initb6(struct b43_wldev *dev)
1591{ 1592{
1592 struct b43_phy *phy = &dev->phy; 1593 struct b43_phy *phy = &dev->phy;
@@ -1670,7 +1671,7 @@ static void b43_phy_initb6(struct b43_wldev *dev)
1670 b43_radio_write16(dev, 0x50, 0x20); 1671 b43_radio_write16(dev, 0x50, 0x20);
1671 } 1672 }
1672 if (phy->radio_rev <= 2) { 1673 if (phy->radio_rev <= 2) {
1673 b43_radio_write16(dev, 0x7C, 0x20); 1674 b43_radio_write16(dev, 0x50, 0x20);
1674 b43_radio_write16(dev, 0x5A, 0x70); 1675 b43_radio_write16(dev, 0x5A, 0x70);
1675 b43_radio_write16(dev, 0x5B, 0x7B); 1676 b43_radio_write16(dev, 0x5B, 0x7B);
1676 b43_radio_write16(dev, 0x5C, 0xB0); 1677 b43_radio_write16(dev, 0x5C, 0xB0);
@@ -1686,9 +1687,8 @@ static void b43_phy_initb6(struct b43_wldev *dev)
1686 b43_phy_write(dev, 0x2A, 0x8AC0); 1687 b43_phy_write(dev, 0x2A, 0x8AC0);
1687 b43_phy_write(dev, 0x0038, 0x0668); 1688 b43_phy_write(dev, 0x0038, 0x0668);
1688 b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control); 1689 b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control);
1689 if (phy->radio_rev <= 5) { 1690 if (phy->radio_rev == 4 || phy->radio_rev == 5)
1690 b43_phy_maskset(dev, 0x5D, 0xFF80, 0x0003); 1691 b43_phy_maskset(dev, 0x5D, 0xFF80, 0x0003);
1691 }
1692 if (phy->radio_rev <= 2) 1692 if (phy->radio_rev <= 2)
1693 b43_radio_write16(dev, 0x005D, 0x000D); 1693 b43_radio_write16(dev, 0x005D, 0x000D);
1694 1694
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 24ccbe96e0c8..86569f6a8705 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -257,6 +257,72 @@ static void b43_nphy_rf_ctl_override(struct b43_wldev *dev, u16 field,
257 } 257 }
258} 258}
259 259
260static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
261 enum n_intc_override intc_override,
262 u16 value, u8 core_sel)
263{
264 u16 reg, tmp, tmp2, val;
265 int core;
266
267 for (core = 0; core < 2; core++) {
268 if ((core_sel == 1 && core != 0) ||
269 (core_sel == 2 && core != 1))
270 continue;
271
272 reg = (core == 0) ? B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
273
274 switch (intc_override) {
275 case N_INTC_OVERRIDE_OFF:
276 b43_phy_write(dev, reg, 0);
277 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
278 break;
279 case N_INTC_OVERRIDE_TRSW:
280 b43_phy_maskset(dev, reg, ~0xC0, value << 6);
281 b43_phy_set(dev, reg, 0x400);
282
283 b43_phy_mask(dev, 0x2ff, ~0xC000 & 0xFFFF);
284 b43_phy_set(dev, 0x2ff, 0x2000);
285 b43_phy_set(dev, 0x2ff, 0x0001);
286 break;
287 case N_INTC_OVERRIDE_PA:
288 tmp = 0x0030;
289 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
290 val = value << 5;
291 else
292 val = value << 4;
293 b43_phy_maskset(dev, reg, ~tmp, val);
294 b43_phy_set(dev, reg, 0x1000);
295 break;
296 case N_INTC_OVERRIDE_EXT_LNA_PU:
297 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
298 tmp = 0x0001;
299 tmp2 = 0x0004;
300 val = value;
301 } else {
302 tmp = 0x0004;
303 tmp2 = 0x0001;
304 val = value << 2;
305 }
306 b43_phy_maskset(dev, reg, ~tmp, val);
307 b43_phy_mask(dev, reg, ~tmp2);
308 break;
309 case N_INTC_OVERRIDE_EXT_LNA_GAIN:
310 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
311 tmp = 0x0002;
312 tmp2 = 0x0008;
313 val = value << 1;
314 } else {
315 tmp = 0x0008;
316 tmp2 = 0x0002;
317 val = value << 3;
318 }
319 b43_phy_maskset(dev, reg, ~tmp, val);
320 b43_phy_mask(dev, reg, ~tmp2);
321 break;
322 }
323 }
324}
325
260/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */ 326/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
261static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev, 327static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
262 enum n_intc_override intc_override, 328 enum n_intc_override intc_override,
@@ -265,6 +331,12 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
265 u8 i, j; 331 u8 i, j;
266 u16 reg, tmp, val; 332 u16 reg, tmp, val;
267 333
334 if (dev->phy.rev >= 7) {
335 b43_nphy_rf_ctl_intc_override_rev7(dev, intc_override, value,
336 core);
337 return;
338 }
339
268 B43_WARN_ON(dev->phy.rev < 3); 340 B43_WARN_ON(dev->phy.rev < 3);
269 341
270 for (i = 0; i < 2; i++) { 342 for (i = 0; i < 2; i++) {
@@ -419,7 +491,8 @@ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
419 static const u16 clip[] = { 0xFFFF, 0xFFFF }; 491 static const u16 clip[] = { 0xFFFF, 0xFFFF };
420 if (nphy->deaf_count++ == 0) { 492 if (nphy->deaf_count++ == 0) {
421 nphy->classifier_state = b43_nphy_classifier(dev, 0, 0); 493 nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
422 b43_nphy_classifier(dev, 0x7, 0); 494 b43_nphy_classifier(dev, 0x7,
495 B43_NPHY_CLASSCTL_WAITEDEN);
423 b43_nphy_read_clip_detection(dev, nphy->clip_state); 496 b43_nphy_read_clip_detection(dev, nphy->clip_state);
424 b43_nphy_write_clip_detection(dev, clip); 497 b43_nphy_write_clip_detection(dev, clip);
425 } 498 }
@@ -627,13 +700,11 @@ static void b43_radio_2057_init_post(struct b43_wldev *dev)
627 b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78); 700 b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
628 b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80); 701 b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);
629 702
630 if (dev->phy.n->init_por) { 703 if (dev->phy.do_full_init) {
631 b43_radio_2057_rcal(dev); 704 b43_radio_2057_rcal(dev);
632 b43_radio_2057_rccal(dev); 705 b43_radio_2057_rccal(dev);
633 } 706 }
634 b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8); 707 b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8);
635
636 dev->phy.n->init_por = false;
637} 708}
638 709
639/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */ 710/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
@@ -734,9 +805,16 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
734 u16 bias, cbias; 805 u16 bias, cbias;
735 u16 pag_boost, padg_boost, pgag_boost, mixg_boost; 806 u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
736 u16 paa_boost, pada_boost, pgaa_boost, mixa_boost; 807 u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
808 bool is_pkg_fab_smic;
737 809
738 B43_WARN_ON(dev->phy.rev < 3); 810 B43_WARN_ON(dev->phy.rev < 3);
739 811
812 is_pkg_fab_smic =
813 ((dev->dev->chip_id == BCMA_CHIP_ID_BCM43224 ||
814 dev->dev->chip_id == BCMA_CHIP_ID_BCM43225 ||
815 dev->dev->chip_id == BCMA_CHIP_ID_BCM43421) &&
816 dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC);
817
740 b43_chantab_radio_2056_upload(dev, e); 818 b43_chantab_radio_2056_upload(dev, e);
741 b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ); 819 b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
742 820
@@ -744,7 +822,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
744 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 822 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
745 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); 823 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
746 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F); 824 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
747 if (dev->dev->chip_id == 0x4716) { 825 if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
826 dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
748 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14); 827 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
749 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0); 828 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
750 } else { 829 } else {
@@ -752,6 +831,13 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
752 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14); 831 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
753 } 832 }
754 } 833 }
834 if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 &&
835 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
836 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f);
837 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f);
838 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b);
839 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20);
840 }
755 if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR && 841 if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
756 b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 842 b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
757 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); 843 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
@@ -767,7 +853,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
767 b43_radio_write(dev, 853 b43_radio_write(dev,
768 offset | B2056_TX_PADG_IDAC, 0xcc); 854 offset | B2056_TX_PADG_IDAC, 0xcc);
769 855
770 if (dev->dev->chip_id == 0x4716) { 856 if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
857 dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
771 bias = 0x40; 858 bias = 0x40;
772 cbias = 0x45; 859 cbias = 0x45;
773 pag_boost = 0x5; 860 pag_boost = 0x5;
@@ -776,6 +863,10 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
776 } else { 863 } else {
777 bias = 0x25; 864 bias = 0x25;
778 cbias = 0x20; 865 cbias = 0x20;
866 if (is_pkg_fab_smic) {
867 bias = 0x2a;
868 cbias = 0x38;
869 }
779 pag_boost = 0x4; 870 pag_boost = 0x4;
780 pgag_boost = 0x03; 871 pgag_boost = 0x03;
781 mixg_boost = 0x65; 872 mixg_boost = 0x65;
@@ -844,6 +935,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
844 mixa_boost = 0xF; 935 mixa_boost = 0xF;
845 } 936 }
846 937
938 cbias = is_pkg_fab_smic ? 0x35 : 0x30;
939
847 for (i = 0; i < 2; i++) { 940 for (i = 0; i < 2; i++) {
848 offset = i ? B2056_TX1 : B2056_TX0; 941 offset = i ? B2056_TX1 : B2056_TX0;
849 942
@@ -862,11 +955,11 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
862 b43_radio_write(dev, 955 b43_radio_write(dev,
863 offset | B2056_TX_PADA_CASCBIAS, 0x03); 956 offset | B2056_TX_PADA_CASCBIAS, 0x03);
864 b43_radio_write(dev, 957 b43_radio_write(dev,
865 offset | B2056_TX_INTPAA_IAUX_STAT, 0x50); 958 offset | B2056_TX_INTPAA_IAUX_STAT, 0x30);
866 b43_radio_write(dev, 959 b43_radio_write(dev,
867 offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50); 960 offset | B2056_TX_INTPAA_IMAIN_STAT, 0x30);
868 b43_radio_write(dev, 961 b43_radio_write(dev,
869 offset | B2056_TX_INTPAA_CASCBIAS, 0x30); 962 offset | B2056_TX_INTPAA_CASCBIAS, cbias);
870 } 963 }
871 } 964 }
872 965
@@ -933,7 +1026,7 @@ static void b43_radio_init2056_post(struct b43_wldev *dev)
933 b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2); 1026 b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
934 b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC); 1027 b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
935 b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1); 1028 b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
936 if (dev->phy.n->init_por) 1029 if (dev->phy.do_full_init)
937 b43_radio_2056_rcal(dev); 1030 b43_radio_2056_rcal(dev);
938} 1031}
939 1032
@@ -946,8 +1039,6 @@ static void b43_radio_init2056(struct b43_wldev *dev)
946 b43_radio_init2056_pre(dev); 1039 b43_radio_init2056_pre(dev);
947 b2056_upload_inittabs(dev, 0, 0); 1040 b2056_upload_inittabs(dev, 0, 0);
948 b43_radio_init2056_post(dev); 1041 b43_radio_init2056_post(dev);
949
950 dev->phy.n->init_por = false;
951} 1042}
952 1043
953/************************************************** 1044/**************************************************
@@ -1164,23 +1255,20 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
1164 u16 seq_mode; 1255 u16 seq_mode;
1165 u32 tmp; 1256 u32 tmp;
1166 1257
1167 if (nphy->hang_avoid) 1258 b43_nphy_stay_in_carrier_search(dev, true);
1168 b43_nphy_stay_in_carrier_search(dev, true);
1169 1259
1170 if ((nphy->bb_mult_save & 0x80000000) == 0) { 1260 if ((nphy->bb_mult_save & 0x80000000) == 0) {
1171 tmp = b43_ntab_read(dev, B43_NTAB16(15, 87)); 1261 tmp = b43_ntab_read(dev, B43_NTAB16(15, 87));
1172 nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000; 1262 nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
1173 } 1263 }
1174 1264
1265 /* TODO: add modify_bbmult argument */
1175 if (!dev->phy.is_40mhz) 1266 if (!dev->phy.is_40mhz)
1176 tmp = 0x6464; 1267 tmp = 0x6464;
1177 else 1268 else
1178 tmp = 0x4747; 1269 tmp = 0x4747;
1179 b43_ntab_write(dev, B43_NTAB16(15, 87), tmp); 1270 b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
1180 1271
1181 if (nphy->hang_avoid)
1182 b43_nphy_stay_in_carrier_search(dev, false);
1183
1184 b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1)); 1272 b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1));
1185 1273
1186 if (loops != 0xFFFF) 1274 if (loops != 0xFFFF)
@@ -1213,6 +1301,8 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
1213 b43err(dev->wl, "run samples timeout\n"); 1301 b43err(dev->wl, "run samples timeout\n");
1214 1302
1215 b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); 1303 b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
1304
1305 b43_nphy_stay_in_carrier_search(dev, false);
1216} 1306}
1217 1307
1218/************************************************** 1308/**************************************************
@@ -1588,8 +1678,8 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1588 struct b43_phy_n *nphy = dev->phy.n; 1678 struct b43_phy_n *nphy = dev->phy.n;
1589 1679
1590 u16 saved_regs_phy_rfctl[2]; 1680 u16 saved_regs_phy_rfctl[2];
1591 u16 saved_regs_phy[13]; 1681 u16 saved_regs_phy[22];
1592 u16 regs_to_store[] = { 1682 u16 regs_to_store_rev3[] = {
1593 B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER, 1683 B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
1594 B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2, 1684 B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
1595 B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER, 1685 B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
@@ -1598,6 +1688,20 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1598 B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2, 1688 B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
1599 B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2 1689 B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
1600 }; 1690 };
1691 u16 regs_to_store_rev7[] = {
1692 B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
1693 B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
1694 B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
1695 0x342, 0x343, 0x346, 0x347,
1696 0x2ff,
1697 B43_NPHY_TXF_40CO_B1S0, B43_NPHY_TXF_40CO_B32S1,
1698 B43_NPHY_RFCTL_CMD,
1699 B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
1700 0x340, 0x341, 0x344, 0x345,
1701 B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
1702 };
1703 u16 *regs_to_store;
1704 int regs_amount;
1601 1705
1602 u16 class; 1706 u16 class;
1603 1707
@@ -1617,6 +1721,15 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1617 u8 rx_core_state; 1721 u8 rx_core_state;
1618 int core, i, j, vcm; 1722 int core, i, j, vcm;
1619 1723
1724 if (dev->phy.rev >= 7) {
1725 regs_to_store = regs_to_store_rev7;
1726 regs_amount = ARRAY_SIZE(regs_to_store_rev7);
1727 } else {
1728 regs_to_store = regs_to_store_rev3;
1729 regs_amount = ARRAY_SIZE(regs_to_store_rev3);
1730 }
1731 BUG_ON(regs_amount > ARRAY_SIZE(saved_regs_phy));
1732
1620 class = b43_nphy_classifier(dev, 0, 0); 1733 class = b43_nphy_classifier(dev, 0, 0);
1621 b43_nphy_classifier(dev, 7, 4); 1734 b43_nphy_classifier(dev, 7, 4);
1622 b43_nphy_read_clip_detection(dev, clip_state); 1735 b43_nphy_read_clip_detection(dev, clip_state);
@@ -1624,22 +1737,29 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1624 1737
1625 saved_regs_phy_rfctl[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); 1738 saved_regs_phy_rfctl[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
1626 saved_regs_phy_rfctl[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); 1739 saved_regs_phy_rfctl[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
1627 for (i = 0; i < ARRAY_SIZE(regs_to_store); i++) 1740 for (i = 0; i < regs_amount; i++)
1628 saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]); 1741 saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]);
1629 1742
1630 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_OFF, 0, 7); 1743 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_OFF, 0, 7);
1631 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 1, 7); 1744 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 1, 7);
1632 b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false); 1745
1633 b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false); 1746 if (dev->phy.rev >= 7) {
1634 b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false); 1747 /* TODO */
1635 b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false); 1748 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
1636 1749 } else {
1637 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 1750 }
1638 b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
1639 b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
1640 } else { 1751 } else {
1641 b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false); 1752 b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
1642 b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false); 1753 b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
1754 b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
1755 b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
1756 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
1757 b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
1758 b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
1759 } else {
1760 b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
1761 b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
1762 }
1643 } 1763 }
1644 1764
1645 rx_core_state = b43_nphy_get_rx_core_state(dev); 1765 rx_core_state = b43_nphy_get_rx_core_state(dev);
@@ -1654,8 +1774,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1654 1774
1655 /* Grab RSSI results for every possible VCM */ 1775 /* Grab RSSI results for every possible VCM */
1656 for (vcm = 0; vcm < 8; vcm++) { 1776 for (vcm = 0; vcm < 8; vcm++) {
1657 b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3, 1777 if (dev->phy.rev >= 7)
1658 vcm << 2); 1778 ;
1779 else
1780 b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
1781 0xE3, vcm << 2);
1659 b43_nphy_poll_rssi(dev, N_RSSI_NB, results[vcm], 8); 1782 b43_nphy_poll_rssi(dev, N_RSSI_NB, results[vcm], 8);
1660 } 1783 }
1661 1784
@@ -1682,8 +1805,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1682 } 1805 }
1683 1806
1684 /* Select the best VCM */ 1807 /* Select the best VCM */
1685 b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3, 1808 if (dev->phy.rev >= 7)
1686 vcm_final << 2); 1809 ;
1810 else
1811 b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
1812 0xE3, vcm_final << 2);
1687 1813
1688 for (i = 0; i < 4; i++) { 1814 for (i = 0; i < 4; i++) {
1689 if (core != i / 2) 1815 if (core != i / 2)
@@ -1736,9 +1862,9 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1736 1862
1737 b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1); 1863 b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
1738 b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX); 1864 b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX);
1739 b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, ~0x1); 1865 b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
1740 1866
1741 for (i = 0; i < ARRAY_SIZE(regs_to_store); i++) 1867 for (i = 0; i < regs_amount; i++)
1742 b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]); 1868 b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]);
1743 1869
1744 /* Store for future configuration */ 1870 /* Store for future configuration */
@@ -2494,8 +2620,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
2494 struct ssb_sprom *sprom = dev->dev->bus_sprom; 2620 struct ssb_sprom *sprom = dev->dev->bus_sprom;
2495 2621
2496 /* TX to RX */ 2622 /* TX to RX */
2497 u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F }; 2623 u8 tx2rx_events[7] = { 0x4, 0x3, 0x5, 0x2, 0x1, 0x8, 0x1F };
2498 u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 }; 2624 u8 tx2rx_delays[7] = { 8, 4, 4, 4, 4, 6, 1 };
2499 /* RX to TX */ 2625 /* RX to TX */
2500 u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3, 2626 u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
2501 0x1F }; 2627 0x1F };
@@ -2503,6 +2629,23 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
2503 u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F }; 2629 u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
2504 u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 }; 2630 u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
2505 2631
2632 u16 vmids[5][4] = {
2633 { 0xa2, 0xb4, 0xb4, 0x89, }, /* 0 */
2634 { 0xb4, 0xb4, 0xb4, 0x24, }, /* 1 */
2635 { 0xa2, 0xb4, 0xb4, 0x74, }, /* 2 */
2636 { 0xa2, 0xb4, 0xb4, 0x270, }, /* 3 */
2637 { 0xa2, 0xb4, 0xb4, 0x00, }, /* 4 and 5 */
2638 };
2639 u16 gains[5][4] = {
2640 { 0x02, 0x02, 0x02, 0x00, }, /* 0 */
2641 { 0x02, 0x02, 0x02, 0x02, }, /* 1 */
2642 { 0x02, 0x02, 0x02, 0x04, }, /* 2 */
2643 { 0x02, 0x02, 0x02, 0x00, }, /* 3 */
2644 { 0x02, 0x02, 0x02, 0x00, }, /* 4 and 5 */
2645 };
2646 u16 *vmid, *gain;
2647
2648 u8 pdet_range;
2506 u16 tmp16; 2649 u16 tmp16;
2507 u32 tmp32; 2650 u32 tmp32;
2508 2651
@@ -2561,7 +2704,71 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
2561 b43_ntab_write(dev, B43_NTAB16(8, 0), 2); 2704 b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
2562 b43_ntab_write(dev, B43_NTAB16(8, 16), 2); 2705 b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
2563 2706
2564 /* TODO */ 2707 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
2708 pdet_range = sprom->fem.ghz2.pdet_range;
2709 else
2710 pdet_range = sprom->fem.ghz5.pdet_range;
2711 vmid = vmids[min_t(u16, pdet_range, 4)];
2712 gain = gains[min_t(u16, pdet_range, 4)];
2713 switch (pdet_range) {
2714 case 3:
2715 if (!(dev->phy.rev >= 4 &&
2716 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
2717 break;
2718 /* FALL THROUGH */
2719 case 0:
2720 case 1:
2721 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
2722 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
2723 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
2724 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
2725 break;
2726 case 2:
2727 if (dev->phy.rev >= 6) {
2728 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
2729 vmid[3] = 0x94;
2730 else
2731 vmid[3] = 0x8e;
2732 gain[3] = 3;
2733 } else if (dev->phy.rev == 5) {
2734 vmid[3] = 0x84;
2735 gain[3] = 2;
2736 }
2737 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
2738 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
2739 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
2740 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
2741 break;
2742 case 4:
2743 case 5:
2744 if (b43_current_band(dev->wl) != IEEE80211_BAND_2GHZ) {
2745 if (pdet_range == 4) {
2746 vmid[3] = 0x8e;
2747 tmp16 = 0x96;
2748 gain[3] = 0x2;
2749 } else {
2750 vmid[3] = 0x89;
2751 tmp16 = 0x89;
2752 gain[3] = 0;
2753 }
2754 } else {
2755 if (pdet_range == 4) {
2756 vmid[3] = 0x89;
2757 tmp16 = 0x8b;
2758 gain[3] = 0x2;
2759 } else {
2760 vmid[3] = 0x74;
2761 tmp16 = 0x70;
2762 gain[3] = 0;
2763 }
2764 }
2765 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
2766 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
2767 vmid[3] = tmp16;
2768 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
2769 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
2770 break;
2771 }
2565 2772
2566 b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00); 2773 b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
2567 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00); 2774 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
@@ -2600,7 +2807,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
2600 /* Dropped probably-always-true condition */ 2807 /* Dropped probably-always-true condition */
2601 b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH0, 0x03eb); 2808 b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH0, 0x03eb);
2602 b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH1, 0x03eb); 2809 b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH1, 0x03eb);
2603 b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341); 2810 b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH0, 0x0341);
2604 b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341); 2811 b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
2605 b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH0, 0x042b); 2812 b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH0, 0x042b);
2606 b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH1, 0x042b); 2813 b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH1, 0x042b);
@@ -3211,6 +3418,20 @@ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
3211 u8 idx, delta; 3418 u8 idx, delta;
3212 u8 i, stf_mode; 3419 u8 i, stf_mode;
3213 3420
3421 /* Array adj_pwr_tbl corresponds to the hardware table. It consists of
3422 * 21 groups, each containing 4 entries.
3423 *
3424 * First group has entries for CCK modulation.
3425 * The rest of groups has 1 entry per modulation (SISO, CDD, STBC, SDM).
3426 *
3427 * Group 0 is for CCK
3428 * Groups 1..4 use BPSK (group per coding rate)
3429 * Groups 5..8 use QPSK (group per coding rate)
3430 * Groups 9..12 use 16-QAM (group per coding rate)
3431 * Groups 13..16 use 64-QAM (group per coding rate)
3432 * Groups 17..20 are unknown
3433 */
3434
3214 for (i = 0; i < 4; i++) 3435 for (i = 0; i < 4; i++)
3215 nphy->adj_pwr_tbl[i] = nphy->tx_power_offset[i]; 3436 nphy->adj_pwr_tbl[i] = nphy->tx_power_offset[i];
3216 3437
@@ -3409,10 +3630,8 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
3409 } 3630 }
3410 3631
3411 b43_nphy_tx_prepare_adjusted_power_table(dev); 3632 b43_nphy_tx_prepare_adjusted_power_table(dev);
3412 /*
3413 b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, nphy->adj_pwr_tbl); 3633 b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, nphy->adj_pwr_tbl);
3414 b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, nphy->adj_pwr_tbl); 3634 b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, nphy->adj_pwr_tbl);
3415 */
3416 3635
3417 if (nphy->hang_avoid) 3636 if (nphy->hang_avoid)
3418 b43_nphy_stay_in_carrier_search(dev, false); 3637 b43_nphy_stay_in_carrier_search(dev, false);
@@ -5124,7 +5343,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
5124 b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015); 5343 b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
5125 b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320); 5344 b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
5126 if (phy->rev >= 3 && phy->rev <= 6) 5345 if (phy->rev >= 3 && phy->rev <= 6)
5127 b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014); 5346 b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0032);
5128 b43_nphy_tx_lp_fbw(dev); 5347 b43_nphy_tx_lp_fbw(dev);
5129 if (phy->rev >= 3) 5348 if (phy->rev >= 3)
5130 b43_nphy_spur_workaround(dev); 5349 b43_nphy_spur_workaround(dev);
@@ -5338,7 +5557,6 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
5338 nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4); 5557 nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
5339 nphy->spur_avoid = (phy->rev >= 3) ? 5558 nphy->spur_avoid = (phy->rev >= 3) ?
5340 B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE; 5559 B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
5341 nphy->init_por = true;
5342 nphy->gain_boost = true; /* this way we follow wl, assume it is true */ 5560 nphy->gain_boost = true; /* this way we follow wl, assume it is true */
5343 nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */ 5561 nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
5344 nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */ 5562 nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -5379,8 +5597,6 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
5379 nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2; 5597 nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
5380 nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2; 5598 nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
5381 } 5599 }
5382
5383 nphy->init_por = true;
5384} 5600}
5385 5601
5386static void b43_nphy_op_free(struct b43_wldev *dev) 5602static void b43_nphy_op_free(struct b43_wldev *dev)
@@ -5441,8 +5657,11 @@ static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
5441{ 5657{
5442 /* Register 1 is a 32-bit register. */ 5658 /* Register 1 is a 32-bit register. */
5443 B43_WARN_ON(reg == 1); 5659 B43_WARN_ON(reg == 1);
5444 /* N-PHY needs 0x100 for read access */ 5660
5445 reg |= 0x100; 5661 if (dev->phy.rev >= 7)
5662 reg |= 0x200; /* Radio 0x2057 */
5663 else
5664 reg |= 0x100;
5446 5665
5447 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); 5666 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
5448 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); 5667 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
@@ -5488,10 +5707,12 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
5488 } 5707 }
5489 } else { 5708 } else {
5490 if (dev->phy.rev >= 7) { 5709 if (dev->phy.rev >= 7) {
5491 b43_radio_2057_init(dev); 5710 if (!dev->phy.radio_on)
5711 b43_radio_2057_init(dev);
5492 b43_switch_channel(dev, dev->phy.channel); 5712 b43_switch_channel(dev, dev->phy.channel);
5493 } else if (dev->phy.rev >= 3) { 5713 } else if (dev->phy.rev >= 3) {
5494 b43_radio_init2056(dev); 5714 if (!dev->phy.radio_on)
5715 b43_radio_init2056(dev);
5495 b43_switch_channel(dev, dev->phy.channel); 5716 b43_switch_channel(dev, dev->phy.channel);
5496 } else { 5717 } else {
5497 b43_radio_init2055(dev); 5718 b43_radio_init2055(dev);
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 9a5b6bc27d24..ecfbf66dbc3b 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -931,7 +931,6 @@ struct b43_phy_n {
931 u16 papd_epsilon_offset[2]; 931 u16 papd_epsilon_offset[2];
932 s32 preamble_override; 932 s32 preamble_override;
933 u32 bb_mult_save; 933 u32 bb_mult_save;
934 bool init_por;
935 934
936 bool gain_boost; 935 bool gain_boost;
937 bool elna_gain_config; 936 bool elna_gain_config;
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index b4fd9345d673..2ce25607c60d 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -48,7 +48,7 @@ struct b2056_inittabs_pts {
48 unsigned int rx_length; 48 unsigned int rx_length;
49}; 49};
50 50
51static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = { 51static const struct b2056_inittab_entry b2056_inittab_phy_rev3_syn[] = {
52 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 52 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
53 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 53 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
54 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 54 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -232,7 +232,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
232 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 232 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
233}; 233};
234 234
235static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = { 235static const struct b2056_inittab_entry b2056_inittab_phy_rev3_tx[] = {
236 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 236 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
237 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 237 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
238 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 238 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -380,7 +380,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
380 [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 380 [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
381}; 381};
382 382
383static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = { 383static const struct b2056_inittab_entry b2056_inittab_phy_rev3_rx[] = {
384 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 384 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
385 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 385 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
386 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 386 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -530,7 +530,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
530 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 530 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
531}; 531};
532 532
533static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = { 533static const struct b2056_inittab_entry b2056_inittab_phy_rev4_syn[] = {
534 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 534 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
535 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 535 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
536 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 536 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -714,7 +714,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
714 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 714 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
715}; 715};
716 716
717static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = { 717static const struct b2056_inittab_entry b2056_inittab_phy_rev4_tx[] = {
718 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 718 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
719 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 719 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
720 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 720 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -862,7 +862,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
862 [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 862 [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
863}; 863};
864 864
865static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = { 865static const struct b2056_inittab_entry b2056_inittab_phy_rev4_rx[] = {
866 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 866 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
867 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 867 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
868 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 868 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1012,7 +1012,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
1012 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1012 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1013}; 1013};
1014 1014
1015static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = { 1015static const struct b2056_inittab_entry b2056_inittab_radio_rev5_syn[] = {
1016 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1016 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1017 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1017 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1018 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1018 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1196,7 +1196,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
1196 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1196 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1197}; 1197};
1198 1198
1199static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = { 1199static const struct b2056_inittab_entry b2056_inittab_radio_rev5_tx[] = {
1200 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1200 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1201 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1201 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1202 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1202 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1352,7 +1352,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
1352 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, }, 1352 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
1353}; 1353};
1354 1354
1355static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = { 1355static const struct b2056_inittab_entry b2056_inittab_radio_rev5_rx[] = {
1356 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1356 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1357 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1357 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1358 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1358 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1502,7 +1502,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
1502 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1502 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1503}; 1503};
1504 1504
1505static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = { 1505static const struct b2056_inittab_entry b2056_inittab_radio_rev6_syn[] = {
1506 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1506 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1507 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1507 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1508 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1508 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1686,7 +1686,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
1686 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1686 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1687}; 1687};
1688 1688
1689static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = { 1689static const struct b2056_inittab_entry b2056_inittab_radio_rev6_tx[] = {
1690 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1690 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1691 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1691 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1692 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1692 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1842,7 +1842,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
1842 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, }, 1842 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
1843}; 1843};
1844 1844
1845static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = { 1845static const struct b2056_inittab_entry b2056_inittab_radio_rev6_rx[] = {
1846 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1846 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1847 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1847 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1848 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1848 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1992,7 +1992,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
1992 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1992 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1993}; 1993};
1994 1994
1995static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = { 1995static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_syn[] = {
1996 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1996 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1997 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1997 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1998 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1998 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2176,7 +2176,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
2176 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2176 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2177}; 2177};
2178 2178
2179static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = { 2179static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_tx[] = {
2180 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2180 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2181 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2181 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2182 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2182 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2332,7 +2332,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
2332 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, }, 2332 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
2333}; 2333};
2334 2334
2335static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = { 2335static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_rx[] = {
2336 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2336 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2337 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2337 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2338 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2338 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2482,7 +2482,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
2482 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2482 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2483}; 2483};
2484 2484
2485static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = { 2485static const struct b2056_inittab_entry b2056_inittab_radio_rev8_syn[] = {
2486 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2486 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2487 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2487 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2488 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2488 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2666,7 +2666,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
2666 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2666 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2667}; 2667};
2668 2668
2669static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = { 2669static const struct b2056_inittab_entry b2056_inittab_radio_rev8_tx[] = {
2670 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2670 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2671 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2671 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2672 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2672 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2822,7 +2822,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
2822 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, }, 2822 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
2823}; 2823};
2824 2824
2825static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = { 2825static const struct b2056_inittab_entry b2056_inittab_radio_rev8_rx[] = {
2826 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2826 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2827 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2827 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2828 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2828 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2972,24 +2972,69 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
2972 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2972 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2973}; 2973};
2974 2974
2975#define INITTABSPTS(prefix) \ 2975static const struct b2056_inittab_entry b2056_inittab_radio_rev11_syn[] = {
2976 .syn = prefix##_syn, \ 2976 [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
2977 .syn_length = ARRAY_SIZE(prefix##_syn), \ 2977 [B2056_SYN_PLL_CP2] = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
2978 .tx = prefix##_tx, \ 2978 [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
2979 .tx_length = ARRAY_SIZE(prefix##_tx), \ 2979 [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
2980 .rx = prefix##_rx, \ 2980 [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
2981 .rx_length = ARRAY_SIZE(prefix##_rx) 2981 [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
2982 [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
2983 [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
2984};
2982 2985
2983static const struct b2056_inittabs_pts b2056_inittabs[] = { 2986static const struct b2056_inittab_entry b2056_inittab_radio_rev11_tx[] = {
2984 [3] = { INITTABSPTS(b2056_inittab_rev3) }, 2987 [B2056_TX_PA_SPARE2] = { .ghz5 = 0x00ee, .ghz2 = 0x00ee, UPLOAD, },
2985 [4] = { INITTABSPTS(b2056_inittab_rev4) }, 2988 [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
2986 [5] = { INITTABSPTS(b2056_inittab_rev5) }, 2989 [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
2987 [6] = { INITTABSPTS(b2056_inittab_rev6) }, 2990 [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
2988 [7] = { INITTABSPTS(b2056_inittab_rev7) }, 2991 [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
2989 [8] = { INITTABSPTS(b2056_inittab_rev8) }, 2992 [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
2990 [9] = { INITTABSPTS(b2056_inittab_rev7) }, 2993 [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
2994 [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
2995 [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
2996 [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
2997 [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
2998 [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
2999 [B2056_TX_TXSPARE1] = { .ghz5 = 0x0030, .ghz2 = 0x0030, UPLOAD, },
3000};
3001
3002static const struct b2056_inittab_entry b2056_inittab_radio_rev11_rx[] = {
3003 [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
3004 [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
3005 [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
3006 [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
3007 [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
3008 [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
3009 [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
3010 [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
3011 [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
3012 [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
3013 [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
3014 [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
3015 [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
3016 [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
3017 [B2056_RX_RXSPARE3] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
2991}; 3018};
2992 3019
3020#define INITTABSPTS(prefix) \
3021 static const struct b2056_inittabs_pts prefix = { \
3022 .syn = prefix##_syn, \
3023 .syn_length = ARRAY_SIZE(prefix##_syn), \
3024 .tx = prefix##_tx, \
3025 .tx_length = ARRAY_SIZE(prefix##_tx), \
3026 .rx = prefix##_rx, \
3027 .rx_length = ARRAY_SIZE(prefix##_rx), \
3028 }
3029
3030INITTABSPTS(b2056_inittab_phy_rev3);
3031INITTABSPTS(b2056_inittab_phy_rev4);
3032INITTABSPTS(b2056_inittab_radio_rev5);
3033INITTABSPTS(b2056_inittab_radio_rev6);
3034INITTABSPTS(b2056_inittab_radio_rev7_9);
3035INITTABSPTS(b2056_inittab_radio_rev8);
3036INITTABSPTS(b2056_inittab_radio_rev11);
3037
2993#define RADIOREGS3(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \ 3038#define RADIOREGS3(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
2994 r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \ 3039 r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
2995 r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, \ 3040 r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, \
@@ -3041,7 +3086,7 @@ static const struct b2056_inittabs_pts b2056_inittabs[] = {
3041 .phy_regs.phy_bw6 = r5 3086 .phy_regs.phy_bw6 = r5
3042 3087
3043/* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */ 3088/* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
3044static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = { 3089static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev3[] = {
3045 { .freq = 4920, 3090 { .freq = 4920,
3046 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 3091 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
3047 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 3092 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -4036,7 +4081,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] =
4036 }, 4081 },
4037}; 4082};
4038 4083
4039static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] = { 4084static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev4[] = {
4040 { .freq = 4920, 4085 { .freq = 4920,
4041 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 4086 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
4042 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 4087 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -5031,7 +5076,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] =
5031 }, 5076 },
5032}; 5077};
5033 5078
5034static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] = { 5079static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev5[] = {
5035 { .freq = 4920, 5080 { .freq = 4920,
5036 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 5081 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
5037 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 5082 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -6026,7 +6071,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] =
6026 }, 6071 },
6027}; 6072};
6028 6073
6029static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] = { 6074static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev6[] = {
6030 { .freq = 4920, 6075 { .freq = 4920,
6031 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 6076 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
6032 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 6077 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -7021,7 +7066,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] =
7021 }, 7066 },
7022}; 7067};
7023 7068
7024static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[] = { 7069static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev7_9[] = {
7025 { .freq = 4920, 7070 { .freq = 4920,
7026 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 7071 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
7027 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 7072 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -8016,7 +8061,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[]
8016 }, 8061 },
8017}; 8062};
8018 8063
8019static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] = { 8064static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev8[] = {
8020 { .freq = 4920, 8065 { .freq = 4920,
8021 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 8066 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
8022 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 8067 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -9011,6 +9056,1154 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] =
9011 }, 9056 },
9012}; 9057};
9013 9058
9059static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev11[] = {
9060 {
9061 .freq = 4920,
9062 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x02,
9063 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
9064 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9065 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9066 0x00, 0x0f, 0x00, 0x6f, 0x00),
9067 PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
9068 },
9069 {
9070 .freq = 4930,
9071 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x02,
9072 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
9073 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9074 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9075 0x00, 0x0f, 0x00, 0x6f, 0x00),
9076 PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
9077 },
9078 {
9079 .freq = 4940,
9080 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x02,
9081 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
9082 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9083 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9084 0x00, 0x0f, 0x00, 0x6f, 0x00),
9085 PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
9086 },
9087 {
9088 .freq = 4950,
9089 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x02,
9090 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
9091 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9092 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9093 0x00, 0x0f, 0x00, 0x6f, 0x00),
9094 PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
9095 },
9096 {
9097 .freq = 4960,
9098 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x02,
9099 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9100 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9101 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9102 0x00, 0x0f, 0x00, 0x6f, 0x00),
9103 PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
9104 },
9105 {
9106 .freq = 4970,
9107 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x02,
9108 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9109 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9110 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9111 0x00, 0x0f, 0x00, 0x6f, 0x00),
9112 PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
9113 },
9114 {
9115 .freq = 4980,
9116 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x02,
9117 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9118 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9119 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9120 0x00, 0x0f, 0x00, 0x6f, 0x00),
9121 PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
9122 },
9123 {
9124 .freq = 4990,
9125 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x02,
9126 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9127 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9128 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9129 0x00, 0x0f, 0x00, 0x6f, 0x00),
9130 PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
9131 },
9132 {
9133 .freq = 5000,
9134 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x02,
9135 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9136 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9137 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9138 0x00, 0x0f, 0x00, 0x6f, 0x00),
9139 PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
9140 },
9141 {
9142 .freq = 5010,
9143 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x02,
9144 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9145 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9146 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9147 0x00, 0x0f, 0x00, 0x6f, 0x00),
9148 PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
9149 },
9150 {
9151 .freq = 5020,
9152 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x02,
9153 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9154 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9155 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9156 0x00, 0x0f, 0x00, 0x6f, 0x00),
9157 PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
9158 },
9159 {
9160 .freq = 5030,
9161 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x02,
9162 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9163 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9164 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9165 0x00, 0x0f, 0x00, 0x6f, 0x00),
9166 PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
9167 },
9168 {
9169 .freq = 5040,
9170 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x02,
9171 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9172 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9173 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9174 0x00, 0x0f, 0x00, 0x6f, 0x00),
9175 PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
9176 },
9177 {
9178 .freq = 5050,
9179 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x02,
9180 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9181 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9182 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9183 0x00, 0x0f, 0x00, 0x6f, 0x00),
9184 PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
9185 },
9186 {
9187 .freq = 5060,
9188 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x02,
9189 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9190 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9191 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9192 0x00, 0x0f, 0x00, 0x6f, 0x00),
9193 PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
9194 },
9195 {
9196 .freq = 5070,
9197 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x02,
9198 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9199 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9200 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
9201 0x00, 0x0f, 0x00, 0x6f, 0x00),
9202 PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
9203 },
9204 {
9205 .freq = 5080,
9206 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x02,
9207 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9208 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9209 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
9210 0x00, 0x0f, 0x00, 0x6f, 0x00),
9211 PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
9212 },
9213 {
9214 .freq = 5090,
9215 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x02,
9216 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9217 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9218 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
9219 0x00, 0x0f, 0x00, 0x6f, 0x00),
9220 PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
9221 },
9222 {
9223 .freq = 5100,
9224 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x02,
9225 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9226 0xff, 0xfd, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
9227 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x77,
9228 0x00, 0x0f, 0x00, 0x6f, 0x00),
9229 PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
9230 },
9231 {
9232 .freq = 5110,
9233 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x02,
9234 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9235 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
9236 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
9237 0x00, 0x0f, 0x00, 0x6f, 0x00),
9238 PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
9239 },
9240 {
9241 .freq = 5120,
9242 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x02,
9243 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9244 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
9245 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
9246 0x00, 0x0f, 0x00, 0x6f, 0x00),
9247 PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
9248 },
9249 {
9250 .freq = 5130,
9251 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x02,
9252 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9253 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
9254 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
9255 0x00, 0x0f, 0x00, 0x6f, 0x00),
9256 PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
9257 },
9258 {
9259 .freq = 5140,
9260 RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x02,
9261 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9262 0xff, 0xfb, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
9263 0x00, 0x6f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x77,
9264 0x00, 0x0f, 0x00, 0x6f, 0x00),
9265 PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
9266 },
9267 {
9268 .freq = 5160,
9269 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x02,
9270 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9271 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
9272 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
9273 0x00, 0x0e, 0x00, 0x6f, 0x00),
9274 PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
9275 },
9276 {
9277 .freq = 5170,
9278 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x02,
9279 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9280 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
9281 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
9282 0x00, 0x0e, 0x00, 0x6f, 0x00),
9283 PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
9284 },
9285 {
9286 .freq = 5180,
9287 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x02,
9288 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9289 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0e,
9290 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
9291 0x00, 0x0e, 0x00, 0x6f, 0x00),
9292 PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
9293 },
9294 {
9295 .freq = 5190,
9296 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x02,
9297 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9298 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0d,
9299 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
9300 0x00, 0x0d, 0x00, 0x6f, 0x00),
9301 PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
9302 },
9303 {
9304 .freq = 5200,
9305 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x02,
9306 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9307 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
9308 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
9309 0x00, 0x0d, 0x00, 0x6f, 0x00),
9310 PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
9311 },
9312 {
9313 .freq = 5210,
9314 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x02,
9315 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9316 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
9317 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
9318 0x00, 0x0d, 0x00, 0x6f, 0x00),
9319 PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
9320 },
9321 {
9322 .freq = 5220,
9323 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x02,
9324 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
9325 0xfe, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
9326 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
9327 0x00, 0x0d, 0x00, 0x6f, 0x00),
9328 PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
9329 },
9330 {
9331 .freq = 5230,
9332 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x02,
9333 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
9334 0xee, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
9335 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
9336 0x00, 0x0d, 0x00, 0x6f, 0x00),
9337 PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
9338 },
9339 {
9340 .freq = 5240,
9341 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x02,
9342 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
9343 0xee, 0xc8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
9344 0x00, 0x6f, 0x00, 0xc8, 0x00, 0x05, 0x00, 0x77,
9345 0x00, 0x0d, 0x00, 0x6f, 0x00),
9346 PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
9347 },
9348 {
9349 .freq = 5250,
9350 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x02,
9351 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
9352 0xed, 0xc7, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
9353 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x05, 0x00, 0x77,
9354 0x00, 0x0d, 0x00, 0x6f, 0x00),
9355 PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
9356 },
9357 {
9358 .freq = 5260,
9359 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x02,
9360 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0e, 0x00,
9361 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0d,
9362 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
9363 0x00, 0x0d, 0x00, 0x6f, 0x00),
9364 PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
9365 },
9366 {
9367 .freq = 5270,
9368 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x02,
9369 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8e, 0x0e, 0x00,
9370 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0c,
9371 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
9372 0x00, 0x0c, 0x00, 0x6f, 0x00),
9373 PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
9374 },
9375 {
9376 .freq = 5280,
9377 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x02,
9378 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
9379 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
9380 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
9381 0x00, 0x0c, 0x00, 0x6f, 0x00),
9382 PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
9383 },
9384 {
9385 .freq = 5290,
9386 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x02,
9387 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
9388 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
9389 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
9390 0x00, 0x0c, 0x00, 0x6f, 0x00),
9391 PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
9392 },
9393 {
9394 .freq = 5300,
9395 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x02,
9396 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
9397 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
9398 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
9399 0x00, 0x0c, 0x00, 0x6f, 0x00),
9400 PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
9401 },
9402 {
9403 .freq = 5310,
9404 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x02,
9405 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
9406 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
9407 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
9408 0x00, 0x0c, 0x00, 0x6f, 0x00),
9409 PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
9410 },
9411 {
9412 .freq = 5320,
9413 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x02,
9414 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
9415 0xdb, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
9416 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
9417 0x00, 0x0c, 0x00, 0x6f, 0x00),
9418 PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
9419 },
9420 {
9421 .freq = 5330,
9422 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x02,
9423 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
9424 0xcb, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
9425 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
9426 0x00, 0x0b, 0x00, 0x6f, 0x00),
9427 PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
9428 },
9429 {
9430 .freq = 5340,
9431 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x02,
9432 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
9433 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
9434 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
9435 0x00, 0x0b, 0x00, 0x6f, 0x00),
9436 PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
9437 },
9438 {
9439 .freq = 5350,
9440 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x02,
9441 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
9442 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
9443 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
9444 0x00, 0x0b, 0x00, 0x6f, 0x00),
9445 PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
9446 },
9447 {
9448 .freq = 5360,
9449 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x02,
9450 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
9451 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
9452 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
9453 0x00, 0x0a, 0x00, 0x6f, 0x00),
9454 PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
9455 },
9456 {
9457 .freq = 5370,
9458 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x02,
9459 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
9460 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
9461 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
9462 0x00, 0x0a, 0x00, 0x6f, 0x00),
9463 PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
9464 },
9465 {
9466 .freq = 5380,
9467 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x02,
9468 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
9469 0xb8, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
9470 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
9471 0x00, 0x0a, 0x00, 0x6f, 0x00),
9472 PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
9473 },
9474 {
9475 .freq = 5390,
9476 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x02,
9477 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
9478 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
9479 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
9480 0x00, 0x0a, 0x00, 0x6f, 0x00),
9481 PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
9482 },
9483 {
9484 .freq = 5400,
9485 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x02,
9486 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
9487 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
9488 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
9489 0x00, 0x0a, 0x00, 0x6f, 0x00),
9490 PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
9491 },
9492 {
9493 .freq = 5410,
9494 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x02,
9495 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
9496 0xb7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
9497 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
9498 0x00, 0x0a, 0x00, 0x6f, 0x00),
9499 PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
9500 },
9501 {
9502 .freq = 5420,
9503 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x02,
9504 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
9505 0xa7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
9506 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
9507 0x00, 0x0a, 0x00, 0x6f, 0x00),
9508 PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
9509 },
9510 {
9511 .freq = 5430,
9512 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x02,
9513 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0b, 0x00,
9514 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
9515 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
9516 0x00, 0x0a, 0x00, 0x6f, 0x00),
9517 PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
9518 },
9519 {
9520 .freq = 5440,
9521 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x02,
9522 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
9523 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x09,
9524 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
9525 0x00, 0x09, 0x00, 0x6f, 0x00),
9526 PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
9527 },
9528 {
9529 .freq = 5450,
9530 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x02,
9531 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
9532 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
9533 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
9534 0x00, 0x09, 0x00, 0x6f, 0x00),
9535 PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
9536 },
9537 {
9538 .freq = 5460,
9539 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x02,
9540 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
9541 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
9542 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
9543 0x00, 0x09, 0x00, 0x6f, 0x00),
9544 PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
9545 },
9546 {
9547 .freq = 5470,
9548 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x02,
9549 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
9550 0x94, 0x73, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
9551 0x00, 0x6f, 0x00, 0x73, 0x00, 0x01, 0x00, 0x77,
9552 0x00, 0x09, 0x00, 0x6f, 0x00),
9553 PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
9554 },
9555 {
9556 .freq = 5480,
9557 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x02,
9558 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
9559 0x84, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9560 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9561 0x00, 0x09, 0x00, 0x6f, 0x00),
9562 PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
9563 },
9564 {
9565 .freq = 5490,
9566 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x02,
9567 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
9568 0x83, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9569 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9570 0x00, 0x09, 0x00, 0x6f, 0x00),
9571 PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
9572 },
9573 {
9574 .freq = 5500,
9575 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x02,
9576 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
9577 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9578 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9579 0x00, 0x09, 0x00, 0x6f, 0x00),
9580 PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
9581 },
9582 {
9583 .freq = 5510,
9584 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x02,
9585 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
9586 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9587 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9588 0x00, 0x09, 0x00, 0x6f, 0x00),
9589 PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
9590 },
9591 {
9592 .freq = 5520,
9593 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x02,
9594 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
9595 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9596 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9597 0x00, 0x09, 0x00, 0x6f, 0x00),
9598 PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
9599 },
9600 {
9601 .freq = 5530,
9602 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x02,
9603 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
9604 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9605 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9606 0x00, 0x09, 0x00, 0x6f, 0x00),
9607 PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
9608 },
9609 {
9610 .freq = 5540,
9611 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x02,
9612 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
9613 0x71, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9614 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9615 0x00, 0x09, 0x00, 0x6f, 0x00),
9616 PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
9617 },
9618 {
9619 .freq = 5550,
9620 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x02,
9621 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
9622 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9623 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9624 0x00, 0x09, 0x00, 0x6f, 0x00),
9625 PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
9626 },
9627 {
9628 .freq = 5560,
9629 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x02,
9630 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
9631 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9632 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9633 0x00, 0x09, 0x00, 0x6f, 0x00),
9634 PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
9635 },
9636 {
9637 .freq = 5570,
9638 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x02,
9639 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
9640 0x61, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9641 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
9642 0x00, 0x09, 0x00, 0x6f, 0x00),
9643 PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
9644 },
9645 {
9646 .freq = 5580,
9647 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x02,
9648 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
9649 0x60, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
9650 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
9651 0x00, 0x08, 0x00, 0x6f, 0x00),
9652 PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
9653 },
9654 {
9655 .freq = 5590,
9656 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x02,
9657 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
9658 0x50, 0x61, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
9659 0x00, 0x6f, 0x00, 0x61, 0x00, 0x00, 0x00, 0x77,
9660 0x00, 0x08, 0x00, 0x6f, 0x00),
9661 PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
9662 },
9663 {
9664 .freq = 5600,
9665 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x02,
9666 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
9667 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
9668 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
9669 0x00, 0x08, 0x00, 0x6f, 0x00),
9670 PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
9671 },
9672 {
9673 .freq = 5610,
9674 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x02,
9675 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
9676 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
9677 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
9678 0x00, 0x08, 0x00, 0x6f, 0x00),
9679 PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
9680 },
9681 {
9682 .freq = 5620,
9683 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x02,
9684 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
9685 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
9686 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
9687 0x00, 0x07, 0x00, 0x6f, 0x00),
9688 PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
9689 },
9690 {
9691 .freq = 5630,
9692 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x02,
9693 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
9694 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
9695 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
9696 0x00, 0x07, 0x00, 0x6f, 0x00),
9697 PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
9698 },
9699 {
9700 .freq = 5640,
9701 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x02,
9702 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
9703 0x40, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
9704 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
9705 0x00, 0x07, 0x00, 0x6f, 0x00),
9706 PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
9707 },
9708 {
9709 .freq = 5650,
9710 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x02,
9711 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
9712 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
9713 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
9714 0x00, 0x07, 0x00, 0x6f, 0x00),
9715 PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
9716 },
9717 {
9718 .freq = 5660,
9719 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x02,
9720 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
9721 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9722 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
9723 0x00, 0x06, 0x00, 0x6f, 0x00),
9724 PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
9725 },
9726 {
9727 .freq = 5670,
9728 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x02,
9729 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
9730 0x40, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9731 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9732 0x00, 0x06, 0x00, 0x6f, 0x00),
9733 PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
9734 },
9735 {
9736 .freq = 5680,
9737 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x02,
9738 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
9739 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9740 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9741 0x00, 0x06, 0x00, 0x6f, 0x00),
9742 PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
9743 },
9744 {
9745 .freq = 5690,
9746 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x02,
9747 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
9748 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9749 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9750 0x00, 0x06, 0x00, 0x6f, 0x00),
9751 PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
9752 },
9753 {
9754 .freq = 5700,
9755 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x02,
9756 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
9757 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9758 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9759 0x00, 0x06, 0x00, 0x6e, 0x00),
9760 PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
9761 },
9762 {
9763 .freq = 5710,
9764 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x02,
9765 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
9766 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9767 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9768 0x00, 0x06, 0x00, 0x6e, 0x00),
9769 PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
9770 },
9771 {
9772 .freq = 5720,
9773 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x02,
9774 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
9775 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9776 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9777 0x00, 0x06, 0x00, 0x6e, 0x00),
9778 PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
9779 },
9780 {
9781 .freq = 5725,
9782 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x05, 0x05, 0x02,
9783 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
9784 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9785 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9786 0x00, 0x06, 0x00, 0x6e, 0x00),
9787 PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
9788 },
9789 {
9790 .freq = 5730,
9791 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x02,
9792 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
9793 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9794 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9795 0x00, 0x06, 0x00, 0x6e, 0x00),
9796 PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
9797 },
9798 {
9799 .freq = 5735,
9800 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x05, 0x05, 0x02,
9801 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
9802 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9803 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9804 0x00, 0x06, 0x00, 0x6d, 0x00),
9805 PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
9806 },
9807 {
9808 .freq = 5740,
9809 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x02,
9810 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
9811 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9812 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9813 0x00, 0x06, 0x00, 0x6d, 0x00),
9814 PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
9815 },
9816 {
9817 .freq = 5745,
9818 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x05, 0x05, 0x02,
9819 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
9820 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9821 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9822 0x00, 0x06, 0x00, 0x6d, 0x00),
9823 PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
9824 },
9825 {
9826 .freq = 5750,
9827 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x02,
9828 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
9829 0x20, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9830 0x00, 0x6d, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
9831 0x00, 0x05, 0x00, 0x6d, 0x00),
9832 PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
9833 },
9834 {
9835 .freq = 5755,
9836 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x05, 0x05, 0x02,
9837 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
9838 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9839 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
9840 0x00, 0x05, 0x00, 0x6c, 0x00),
9841 PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
9842 },
9843 {
9844 .freq = 5760,
9845 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x02,
9846 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
9847 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9848 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
9849 0x00, 0x05, 0x00, 0x6c, 0x00),
9850 PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
9851 },
9852 {
9853 .freq = 5765,
9854 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x05, 0x05, 0x02,
9855 0x15, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
9856 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9857 0x00, 0x6c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
9858 0x00, 0x05, 0x00, 0x6c, 0x00),
9859 PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
9860 },
9861 {
9862 .freq = 5770,
9863 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x02,
9864 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
9865 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9866 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
9867 0x00, 0x05, 0x00, 0x6b, 0x00),
9868 PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
9869 },
9870 {
9871 .freq = 5775,
9872 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x05, 0x05, 0x02,
9873 0x15, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
9874 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9875 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
9876 0x00, 0x05, 0x00, 0x6b, 0x00),
9877 PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
9878 },
9879 {
9880 .freq = 5780,
9881 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x02,
9882 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
9883 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9884 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
9885 0x00, 0x05, 0x00, 0x6b, 0x00),
9886 PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
9887 },
9888 {
9889 .freq = 5785,
9890 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x05, 0x05, 0x02,
9891 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9892 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9893 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
9894 0x00, 0x05, 0x00, 0x6b, 0x00),
9895 PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
9896 },
9897 {
9898 .freq = 5790,
9899 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x02,
9900 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9901 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9902 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
9903 0x00, 0x05, 0x00, 0x6b, 0x00),
9904 PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
9905 },
9906 {
9907 .freq = 5795,
9908 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x05, 0x05, 0x02,
9909 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9910 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9911 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9912 0x00, 0x05, 0x00, 0x6b, 0x00),
9913 PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
9914 },
9915 {
9916 .freq = 5800,
9917 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x02,
9918 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9919 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9920 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9921 0x00, 0x05, 0x00, 0x6b, 0x00),
9922 PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
9923 },
9924 {
9925 .freq = 5805,
9926 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x05, 0x05, 0x02,
9927 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9928 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9929 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9930 0x00, 0x05, 0x00, 0x6a, 0x00),
9931 PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
9932 },
9933 {
9934 .freq = 5810,
9935 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x02,
9936 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9937 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9938 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9939 0x00, 0x05, 0x00, 0x6a, 0x00),
9940 PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
9941 },
9942 {
9943 .freq = 5815,
9944 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x05, 0x05, 0x02,
9945 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9946 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9947 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9948 0x00, 0x05, 0x00, 0x6a, 0x00),
9949 PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
9950 },
9951 {
9952 .freq = 5820,
9953 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x02,
9954 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9955 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9956 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9957 0x00, 0x05, 0x00, 0x6a, 0x00),
9958 PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
9959 },
9960 {
9961 .freq = 5825,
9962 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x05, 0x05, 0x02,
9963 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9964 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9965 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9966 0x00, 0x05, 0x00, 0x69, 0x00),
9967 PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
9968 },
9969 {
9970 .freq = 5830,
9971 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x02,
9972 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9973 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9974 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9975 0x00, 0x05, 0x00, 0x69, 0x00),
9976 PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
9977 },
9978 {
9979 .freq = 5840,
9980 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x02,
9981 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9982 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
9983 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9984 0x00, 0x04, 0x00, 0x69, 0x00),
9985 PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
9986 },
9987 {
9988 .freq = 5850,
9989 RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x02,
9990 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
9991 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
9992 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9993 0x00, 0x04, 0x00, 0x69, 0x00),
9994 PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
9995 },
9996 {
9997 .freq = 5860,
9998 RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x02,
9999 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
10000 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
10001 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
10002 0x00, 0x04, 0x00, 0x69, 0x00),
10003 PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
10004 },
10005 {
10006 .freq = 5870,
10007 RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x02,
10008 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
10009 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
10010 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
10011 0x00, 0x04, 0x00, 0x68, 0x00),
10012 PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
10013 },
10014 {
10015 .freq = 5880,
10016 RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x02,
10017 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
10018 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
10019 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
10020 0x00, 0x04, 0x00, 0x68, 0x00),
10021 PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
10022 },
10023 {
10024 .freq = 5890,
10025 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x02,
10026 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
10027 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
10028 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
10029 0x00, 0x04, 0x00, 0x68, 0x00),
10030 PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
10031 },
10032 {
10033 .freq = 5900,
10034 RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x02,
10035 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
10036 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
10037 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
10038 0x00, 0x04, 0x00, 0x68, 0x00),
10039 PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
10040 },
10041 {
10042 .freq = 5910,
10043 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x02,
10044 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
10045 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
10046 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
10047 0x00, 0x04, 0x00, 0x68, 0x00),
10048 PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
10049 },
10050 {
10051 .freq = 2412,
10052 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x06, 0x06, 0x04,
10053 0x2b, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
10054 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
10055 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
10056 0x70, 0x00, 0x0b, 0x00, 0x0a),
10057 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
10058 },
10059 {
10060 .freq = 2417,
10061 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x06, 0x06, 0x04,
10062 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
10063 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
10064 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
10065 0x70, 0x00, 0x0b, 0x00, 0x0a),
10066 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
10067 },
10068 {
10069 .freq = 2422,
10070 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x06, 0x06, 0x04,
10071 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
10072 0x00, 0x00, 0x67, 0x00, 0x03, 0x00, 0x70, 0x00,
10073 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
10074 0x70, 0x00, 0x0b, 0x00, 0x0a),
10075 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
10076 },
10077 {
10078 .freq = 2427,
10079 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x06, 0x06, 0x04,
10080 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
10081 0x00, 0x00, 0x57, 0x00, 0x03, 0x00, 0x70, 0x00,
10082 0x0a, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
10083 0x70, 0x00, 0x0a, 0x00, 0x0a),
10084 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
10085 },
10086 {
10087 .freq = 2432,
10088 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x06, 0x06, 0x04,
10089 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
10090 0x00, 0x00, 0x56, 0x00, 0x03, 0x00, 0x70, 0x00,
10091 0x0a, 0x00, 0x0a, 0x00, 0x77, 0x00, 0x03, 0x00,
10092 0x70, 0x00, 0x0a, 0x00, 0x0a),
10093 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
10094 },
10095 {
10096 .freq = 2437,
10097 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x06, 0x06, 0x04,
10098 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
10099 0x00, 0x00, 0x46, 0x00, 0x03, 0x00, 0x70, 0x00,
10100 0x0a, 0x00, 0x0a, 0x00, 0x76, 0x00, 0x03, 0x00,
10101 0x70, 0x00, 0x0a, 0x00, 0x0a),
10102 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
10103 },
10104 {
10105 .freq = 2442,
10106 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x06, 0x06, 0x04,
10107 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
10108 0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
10109 0x0a, 0x00, 0x0a, 0x00, 0x66, 0x00, 0x02, 0x00,
10110 0x70, 0x00, 0x0a, 0x00, 0x0a),
10111 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
10112 },
10113 {
10114 .freq = 2447,
10115 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x06, 0x06, 0x04,
10116 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
10117 0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
10118 0x0a, 0x00, 0x09, 0x00, 0x55, 0x00, 0x02, 0x00,
10119 0x70, 0x00, 0x0a, 0x00, 0x09),
10120 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
10121 },
10122 {
10123 .freq = 2452,
10124 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x06, 0x06, 0x04,
10125 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
10126 0x00, 0x00, 0x23, 0x00, 0x02, 0x00, 0x70, 0x00,
10127 0x0a, 0x00, 0x09, 0x00, 0x45, 0x00, 0x02, 0x00,
10128 0x70, 0x00, 0x0a, 0x00, 0x09),
10129 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
10130 },
10131 {
10132 .freq = 2457,
10133 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x06, 0x06, 0x04,
10134 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
10135 0x00, 0x00, 0x12, 0x00, 0x02, 0x00, 0x70, 0x00,
10136 0x0a, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
10137 0x70, 0x00, 0x0a, 0x00, 0x09),
10138 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
10139 },
10140 {
10141 .freq = 2462,
10142 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x06, 0x06, 0x04,
10143 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
10144 0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x70, 0x00,
10145 0x09, 0x00, 0x09, 0x00, 0x33, 0x00, 0x02, 0x00,
10146 0x70, 0x00, 0x09, 0x00, 0x09),
10147 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
10148 },
10149 {
10150 .freq = 2467,
10151 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x06, 0x06, 0x04,
10152 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
10153 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
10154 0x09, 0x00, 0x09, 0x00, 0x22, 0x00, 0x02, 0x00,
10155 0x70, 0x00, 0x09, 0x00, 0x09),
10156 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
10157 },
10158 {
10159 .freq = 2472,
10160 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x06, 0x06, 0x04,
10161 0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
10162 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
10163 0x09, 0x00, 0x09, 0x00, 0x11, 0x00, 0x02, 0x00,
10164 0x70, 0x00, 0x09, 0x00, 0x09),
10165 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
10166 },
10167 {
10168 .freq = 2484,
10169 RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x06, 0x06, 0x04,
10170 0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
10171 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
10172 0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x02, 0x00,
10173 0x70, 0x00, 0x09, 0x00, 0x09),
10174 PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
10175 },
10176};
10177
10178static const struct b2056_inittabs_pts
10179*b43_nphy_get_inittabs_rev3(struct b43_wldev *dev)
10180{
10181 struct b43_phy *phy = &dev->phy;
10182
10183 switch (dev->phy.rev) {
10184 case 3:
10185 return &b2056_inittab_phy_rev3;
10186 case 4:
10187 return &b2056_inittab_phy_rev4;
10188 default:
10189 switch (phy->radio_rev) {
10190 case 5:
10191 return &b2056_inittab_radio_rev5;
10192 case 6:
10193 return &b2056_inittab_radio_rev6;
10194 case 7:
10195 case 9:
10196 return &b2056_inittab_radio_rev7_9;
10197 case 8:
10198 return &b2056_inittab_radio_rev8;
10199 case 11:
10200 return &b2056_inittab_radio_rev11;
10201 }
10202 }
10203
10204 return NULL;
10205}
10206
9014static void b2056_upload_inittab(struct b43_wldev *dev, bool ghz5, 10207static void b2056_upload_inittab(struct b43_wldev *dev, bool ghz5,
9015 bool ignore_uploadflag, u16 routing, 10208 bool ignore_uploadflag, u16 routing,
9016 const struct b2056_inittab_entry *e, 10209 const struct b2056_inittab_entry *e,
@@ -9037,11 +10230,11 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
9037{ 10230{
9038 const struct b2056_inittabs_pts *pts; 10231 const struct b2056_inittabs_pts *pts;
9039 10232
9040 if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) { 10233 pts = b43_nphy_get_inittabs_rev3(dev);
10234 if (!pts) {
9041 B43_WARN_ON(1); 10235 B43_WARN_ON(1);
9042 return; 10236 return;
9043 } 10237 }
9044 pts = &b2056_inittabs[dev->phy.rev];
9045 10238
9046 b2056_upload_inittab(dev, ghz5, ignore_uploadflag, 10239 b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
9047 B2056_SYN, pts->syn, pts->syn_length); 10240 B2056_SYN, pts->syn, pts->syn_length);
@@ -9060,11 +10253,12 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
9060 const struct b2056_inittabs_pts *pts; 10253 const struct b2056_inittabs_pts *pts;
9061 const struct b2056_inittab_entry *e; 10254 const struct b2056_inittab_entry *e;
9062 10255
9063 if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) { 10256 pts = b43_nphy_get_inittabs_rev3(dev);
10257 if (!pts) {
9064 B43_WARN_ON(1); 10258 B43_WARN_ON(1);
9065 return; 10259 return;
9066 } 10260 }
9067 pts = &b2056_inittabs[dev->phy.rev]; 10261
9068 e = &pts->syn[B2056_SYN_PLL_CP2]; 10262 e = &pts->syn[B2056_SYN_PLL_CP2];
9069 10263
9070 b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2); 10264 b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
@@ -9073,38 +10267,46 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
9073const struct b43_nphy_channeltab_entry_rev3 * 10267const struct b43_nphy_channeltab_entry_rev3 *
9074b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq) 10268b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
9075{ 10269{
10270 struct b43_phy *phy = &dev->phy;
9076 const struct b43_nphy_channeltab_entry_rev3 *e; 10271 const struct b43_nphy_channeltab_entry_rev3 *e;
9077 unsigned int length, i; 10272 unsigned int length, i;
9078 10273
9079 switch (dev->phy.rev) { 10274 switch (phy->rev) {
9080 case 3: 10275 case 3:
9081 e = b43_nphy_channeltab_rev3; 10276 e = b43_nphy_channeltab_phy_rev3;
9082 length = ARRAY_SIZE(b43_nphy_channeltab_rev3); 10277 length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev3);
9083 break; 10278 break;
9084 case 4: 10279 case 4:
9085 e = b43_nphy_channeltab_rev4; 10280 e = b43_nphy_channeltab_phy_rev4;
9086 length = ARRAY_SIZE(b43_nphy_channeltab_rev4); 10281 length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev4);
9087 break;
9088 case 5:
9089 e = b43_nphy_channeltab_rev5;
9090 length = ARRAY_SIZE(b43_nphy_channeltab_rev5);
9091 break;
9092 case 6:
9093 e = b43_nphy_channeltab_rev6;
9094 length = ARRAY_SIZE(b43_nphy_channeltab_rev6);
9095 break;
9096 case 7:
9097 case 9:
9098 e = b43_nphy_channeltab_rev7_9;
9099 length = ARRAY_SIZE(b43_nphy_channeltab_rev7_9);
9100 break;
9101 case 8:
9102 e = b43_nphy_channeltab_rev8;
9103 length = ARRAY_SIZE(b43_nphy_channeltab_rev8);
9104 break; 10282 break;
9105 default: 10283 default:
9106 B43_WARN_ON(1); 10284 switch (phy->radio_rev) {
9107 return NULL; 10285 case 5:
10286 e = b43_nphy_channeltab_radio_rev5;
10287 length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev5);
10288 break;
10289 case 6:
10290 e = b43_nphy_channeltab_radio_rev6;
10291 length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev6);
10292 break;
10293 case 7:
10294 case 9:
10295 e = b43_nphy_channeltab_radio_rev7_9;
10296 length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev7_9);
10297 break;
10298 case 8:
10299 e = b43_nphy_channeltab_radio_rev8;
10300 length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev8);
10301 break;
10302 case 11:
10303 e = b43_nphy_channeltab_radio_rev11;
10304 length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev11);
10305 break;
10306 default:
10307 B43_WARN_ON(1);
10308 return NULL;
10309 }
9108 } 10310 }
9109 10311
9110 for (i = 0; i < length; i++, e++) { 10312 for (i = 0; i < length; i++, e++) {
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 94c755fdda14..4047c05e3807 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1627,74 +1627,7 @@ static const u32 b43_ntab_tdtrn_r3[] = {
1627 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be, 1627 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
1628}; 1628};
1629 1629
1630static const u32 b43_ntab_noisevar0_r3[] = { 1630static const u32 b43_ntab_noisevar_r3[] = {
1631 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1632 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1633 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1634 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1635 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1636 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1637 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1638 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1639 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1640 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1641 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1642 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1643 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1644 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1645 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1646 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1647 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1648 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1649 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1650 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1651 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1652 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1653 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1654 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1655 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1656 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1657 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1658 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1659 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1660 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1661 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1662 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1663 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1664 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1665 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1666 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1667 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1668 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1669 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1670 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1671 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1672 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1673 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1674 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1675 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1676 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1677 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1678 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1679 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1680 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1681 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1682 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1683 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1684 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1685 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1686 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1687 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1688 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1689 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1690 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1691 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1692 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1693 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1694 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1695};
1696
1697static const u32 b43_ntab_noisevar1_r3[] = {
1698 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 1631 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1699 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 1632 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1700 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 1633 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
@@ -3109,31 +3042,32 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
3109 antswlut = sprom->fem.ghz2.antswlut; 3042 antswlut = sprom->fem.ghz2.antswlut;
3110 3043
3111 /* Static tables */ 3044 /* Static tables */
3112 ntab_upload(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3); 3045 if (dev->phy.do_full_init) {
3113 ntab_upload(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3); 3046 ntab_upload(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
3114 ntab_upload(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3); 3047 ntab_upload(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
3115 ntab_upload(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3); 3048 ntab_upload(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
3116 ntab_upload(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3); 3049 ntab_upload(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
3117 ntab_upload(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3); 3050 ntab_upload(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
3118 ntab_upload(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3); 3051 ntab_upload(dev, B43_NTAB_NOISEVAR_R3, b43_ntab_noisevar_r3);
3119 ntab_upload(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3); 3052 ntab_upload(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
3120 ntab_upload(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3); 3053 ntab_upload(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
3121 ntab_upload(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3); 3054 ntab_upload(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
3122 ntab_upload(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3); 3055 ntab_upload(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
3123 ntab_upload(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3); 3056 ntab_upload(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
3124 ntab_upload(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3); 3057 ntab_upload(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
3125 ntab_upload(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3); 3058 ntab_upload(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
3126 ntab_upload(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3); 3059 ntab_upload(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
3127 ntab_upload(dev, B43_NTAB_C0_ESTPLT_R3, b43_ntab_estimatepowerlt0_r3); 3060 ntab_upload(dev, B43_NTAB_C0_ESTPLT_R3, b43_ntab_estimatepowerlt0_r3);
3128 ntab_upload(dev, B43_NTAB_C1_ESTPLT_R3, b43_ntab_estimatepowerlt1_r3); 3061 ntab_upload(dev, B43_NTAB_C1_ESTPLT_R3, b43_ntab_estimatepowerlt1_r3);
3129 ntab_upload(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3); 3062 ntab_upload(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
3130 ntab_upload(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3); 3063 ntab_upload(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
3131 ntab_upload(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3); 3064 ntab_upload(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
3132 ntab_upload(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3); 3065 ntab_upload(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
3133 ntab_upload(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3); 3066 ntab_upload(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
3134 ntab_upload(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3); 3067 ntab_upload(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
3135 ntab_upload(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3); 3068 ntab_upload(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
3136 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3); 3069 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
3070 }
3137 3071
3138 /* Volatile tables */ 3072 /* Volatile tables */
3139 if (antswlut < ARRAY_SIZE(b43_ntab_antswctl_r3)) 3073 if (antswlut < ARRAY_SIZE(b43_ntab_antswctl_r3))
@@ -3146,20 +3080,22 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
3146static void b43_nphy_tables_init_rev0(struct b43_wldev *dev) 3080static void b43_nphy_tables_init_rev0(struct b43_wldev *dev)
3147{ 3081{
3148 /* Static tables */ 3082 /* Static tables */
3149 ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct); 3083 if (dev->phy.do_full_init) {
3150 ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup); 3084 ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
3151 ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap); 3085 ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
3152 ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn); 3086 ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
3153 ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel); 3087 ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
3154 ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot); 3088 ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
3155 ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0); 3089 ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
3156 ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1); 3090 ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
3157 ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0); 3091 ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
3158 ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1); 3092 ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
3159 ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest); 3093 ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
3160 ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs); 3094 ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
3161 ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10); 3095 ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
3162 ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11); 3096 ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
3097 ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
3098 }
3163 3099
3164 /* Volatile tables */ 3100 /* Volatile tables */
3165 ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi); 3101 ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 9ff33adcff89..3a58aee4c4cf 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -143,8 +143,7 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
143#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */ 143#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */
144#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */ 144#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */
145#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */ 145#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */
146#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 0) /* noise variance 0 */ 146#define B43_NTAB_NOISEVAR_R3 B43_NTAB32(16, 0) /* noise variance */
147#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
148#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */ 147#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */
149#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */ 148#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
150#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */ 149#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c
index 9b1a038be08b..c218c08fb2f5 100644
--- a/drivers/net/wireless/b43/wa.c
+++ b/drivers/net/wireless/b43/wa.c
@@ -441,7 +441,7 @@ static void b43_wa_altagc(struct b43_wldev *dev)
441 441
442static void b43_wa_tr_ltov(struct b43_wldev *dev) /* TR Lookup Table Original Values */ 442static void b43_wa_tr_ltov(struct b43_wldev *dev) /* TR Lookup Table Original Values */
443{ 443{
444 b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0xC480); 444 b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0x7654);
445} 445}
446 446
447static void b43_wa_cpll_nonpilot(struct b43_wldev *dev) 447static void b43_wa_cpll_nonpilot(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 31adb8cf0291..4f38f19b8e3d 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -408,7 +408,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
408 mac_ctl |= B43_TXH_MAC_HWSEQ; 408 mac_ctl |= B43_TXH_MAC_HWSEQ;
409 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 409 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
410 mac_ctl |= B43_TXH_MAC_STMSDU; 410 mac_ctl |= B43_TXH_MAC_STMSDU;
411 if (phy->type == B43_PHYTYPE_A) 411 if (!phy->gmode)
412 mac_ctl |= B43_TXH_MAC_5GHZ; 412 mac_ctl |= B43_TXH_MAC_5GHZ;
413 413
414 /* Overwrite rates[0].count to make the retry calculation 414 /* Overwrite rates[0].count to make the retry calculation
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 1d2ceac3a221..98e67c18f276 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -33,7 +33,7 @@ brcmfmac-objs += \
33 bcdc.o \ 33 bcdc.o \
34 dhd_common.o \ 34 dhd_common.o \
35 dhd_linux.o \ 35 dhd_linux.o \
36 nvram.o \ 36 firmware.o \
37 btcoex.o 37 btcoex.o
38brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ 38brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
39 dhd_sdio.o \ 39 dhd_sdio.o \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 939d6b132922..16f9ab2568a8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -186,7 +186,7 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
186void brcmf_txflowblock_if(struct brcmf_if *ifp, 186void brcmf_txflowblock_if(struct brcmf_if *ifp,
187 enum brcmf_netif_stop_reason reason, bool state); 187 enum brcmf_netif_stop_reason reason, bool state);
188u32 brcmf_get_chip_info(struct brcmf_if *ifp); 188u32 brcmf_get_chip_info(struct brcmf_if *ifp);
189void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, 189void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
190 bool success); 190 bool success);
191 191
192/* Sets dongle media info (drv_version, mac address). */ 192/* Sets dongle media info (drv_version, mac address). */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index c4535616064e..7735328fff21 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -63,7 +63,6 @@ struct brcmf_bus_dcmd {
63 */ 63 */
64struct brcmf_bus_ops { 64struct brcmf_bus_ops {
65 int (*preinit)(struct device *dev); 65 int (*preinit)(struct device *dev);
66 int (*init)(struct device *dev);
67 void (*stop)(struct device *dev); 66 void (*stop)(struct device *dev);
68 int (*txdata)(struct device *dev, struct sk_buff *skb); 67 int (*txdata)(struct device *dev, struct sk_buff *skb);
69 int (*txctl)(struct device *dev, unsigned char *msg, uint len); 68 int (*txctl)(struct device *dev, unsigned char *msg, uint len);
@@ -99,6 +98,7 @@ struct brcmf_bus {
99 unsigned long tx_realloc; 98 unsigned long tx_realloc;
100 u32 chip; 99 u32 chip;
101 u32 chiprev; 100 u32 chiprev;
101 bool always_use_fws_queue;
102 102
103 struct brcmf_bus_ops *ops; 103 struct brcmf_bus_ops *ops;
104}; 104};
@@ -113,11 +113,6 @@ static inline int brcmf_bus_preinit(struct brcmf_bus *bus)
113 return bus->ops->preinit(bus->dev); 113 return bus->ops->preinit(bus->dev);
114} 114}
115 115
116static inline int brcmf_bus_init(struct brcmf_bus *bus)
117{
118 return bus->ops->init(bus->dev);
119}
120
121static inline void brcmf_bus_stop(struct brcmf_bus *bus) 116static inline void brcmf_bus_stop(struct brcmf_bus *bus)
122{ 117{
123 bus->ops->stop(bus->dev); 118 bus->ops->stop(bus->dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 6a8983a1fb9c..ed3e32ce8c23 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -32,6 +32,9 @@
32#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40 32#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
33#define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00" 33#define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00"
34 34
35/* boost value for RSSI_DELTA in preferred join selection */
36#define BRCMF_JOIN_PREF_RSSI_BOOST 8
37
35 38
36bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, 39bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
37 struct sk_buff *pkt, int prec) 40 struct sk_buff *pkt, int prec)
@@ -246,6 +249,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
246{ 249{
247 s8 eventmask[BRCMF_EVENTING_MASK_LEN]; 250 s8 eventmask[BRCMF_EVENTING_MASK_LEN];
248 u8 buf[BRCMF_DCMD_SMLEN]; 251 u8 buf[BRCMF_DCMD_SMLEN];
252 struct brcmf_join_pref_params join_pref_params[2];
249 char *ptr; 253 char *ptr;
250 s32 err; 254 s32 err;
251 255
@@ -298,6 +302,20 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
298 goto done; 302 goto done;
299 } 303 }
300 304
305 /* Setup join_pref to select target by RSSI(with boost on 5GHz) */
306 join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
307 join_pref_params[0].len = 2;
308 join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
309 join_pref_params[0].band = WLC_BAND_5G;
310 join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
311 join_pref_params[1].len = 2;
312 join_pref_params[1].rssi_gain = 0;
313 join_pref_params[1].band = 0;
314 err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
315 sizeof(join_pref_params));
316 if (err)
317 brcmf_err("Set join_pref error (%d)\n", err);
318
301 /* Setup event_msgs, enable E_IF */ 319 /* Setup event_msgs, enable E_IF */
302 err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask, 320 err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
303 BRCMF_EVENTING_MASK_LEN); 321 BRCMF_EVENTING_MASK_LEN);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 7d28cd385092..09dd8c13d844 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -190,7 +190,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
190 int ret; 190 int ret;
191 struct brcmf_if *ifp = netdev_priv(ndev); 191 struct brcmf_if *ifp = netdev_priv(ndev);
192 struct brcmf_pub *drvr = ifp->drvr; 192 struct brcmf_pub *drvr = ifp->drvr;
193 struct ethhdr *eh; 193 struct ethhdr *eh = (struct ethhdr *)(skb->data);
194 194
195 brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx); 195 brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
196 196
@@ -236,6 +236,9 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
236 goto done; 236 goto done;
237 } 237 }
238 238
239 if (eh->h_proto == htons(ETH_P_PAE))
240 atomic_inc(&ifp->pend_8021x_cnt);
241
239 ret = brcmf_fws_process_skb(ifp, skb); 242 ret = brcmf_fws_process_skb(ifp, skb);
240 243
241done: 244done:
@@ -538,31 +541,26 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
538 brcmf_netif_rx(ifp, skb); 541 brcmf_netif_rx(ifp, skb);
539} 542}
540 543
541void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, 544void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
542 bool success) 545 bool success)
543{ 546{
544 struct brcmf_if *ifp; 547 struct brcmf_if *ifp;
545 struct ethhdr *eh; 548 struct ethhdr *eh;
546 u8 ifidx;
547 u16 type; 549 u16 type;
548 int res;
549
550 res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
551 550
552 ifp = drvr->iflist[ifidx]; 551 ifp = drvr->iflist[ifidx];
553 if (!ifp) 552 if (!ifp)
554 goto done; 553 goto done;
555 554
556 if (res == 0) { 555 eh = (struct ethhdr *)(txp->data);
557 eh = (struct ethhdr *)(txp->data); 556 type = ntohs(eh->h_proto);
558 type = ntohs(eh->h_proto);
559 557
560 if (type == ETH_P_PAE) { 558 if (type == ETH_P_PAE) {
561 atomic_dec(&ifp->pend_8021x_cnt); 559 atomic_dec(&ifp->pend_8021x_cnt);
562 if (waitqueue_active(&ifp->pend_8021x_wait)) 560 if (waitqueue_active(&ifp->pend_8021x_wait))
563 wake_up(&ifp->pend_8021x_wait); 561 wake_up(&ifp->pend_8021x_wait);
564 }
565 } 562 }
563
566 if (!success) 564 if (!success)
567 ifp->stats.tx_errors++; 565 ifp->stats.tx_errors++;
568done: 566done:
@@ -573,13 +571,17 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
573{ 571{
574 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 572 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
575 struct brcmf_pub *drvr = bus_if->drvr; 573 struct brcmf_pub *drvr = bus_if->drvr;
574 u8 ifidx;
576 575
577 /* await txstatus signal for firmware if active */ 576 /* await txstatus signal for firmware if active */
578 if (brcmf_fws_fc_active(drvr->fws)) { 577 if (brcmf_fws_fc_active(drvr->fws)) {
579 if (!success) 578 if (!success)
580 brcmf_fws_bustxfail(drvr->fws, txp); 579 brcmf_fws_bustxfail(drvr->fws, txp);
581 } else { 580 } else {
582 brcmf_txfinalize(drvr, txp, success); 581 if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
582 brcmu_pkt_buf_free_skb(txp);
583 else
584 brcmf_txfinalize(drvr, txp, ifidx, success);
583 } 585 }
584} 586}
585 587
@@ -914,13 +916,6 @@ int brcmf_bus_start(struct device *dev)
914 916
915 brcmf_dbg(TRACE, "\n"); 917 brcmf_dbg(TRACE, "\n");
916 918
917 /* Bring up the bus */
918 ret = brcmf_bus_init(bus_if);
919 if (ret != 0) {
920 brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret);
921 return ret;
922 }
923
924 /* add primary networking interface */ 919 /* add primary networking interface */
925 ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL); 920 ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
926 if (IS_ERR(ifp)) 921 if (IS_ERR(ifp))
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 13c89a0c4ba7..8fa0dbbbda72 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -42,7 +42,7 @@
42#include <soc.h> 42#include <soc.h>
43#include "sdio_host.h" 43#include "sdio_host.h"
44#include "chip.h" 44#include "chip.h"
45#include "nvram.h" 45#include "firmware.h"
46 46
47#define DCMD_RESP_TIMEOUT 2000 /* In milli second */ 47#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
48 48
@@ -632,43 +632,28 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
632 { BCM4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) } 632 { BCM4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
633}; 633};
634 634
635 635static const char *brcmf_sdio_get_fwname(struct brcmf_chip *ci,
636static const struct firmware *brcmf_sdio_get_fw(struct brcmf_sdio *bus, 636 enum brcmf_firmware_type type)
637 enum brcmf_firmware_type type)
638{ 637{
639 const struct firmware *fw; 638 int i;
640 const char *name;
641 int err, i;
642 639
643 for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) { 640 for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
644 if (brcmf_fwname_data[i].chipid == bus->ci->chip && 641 if (brcmf_fwname_data[i].chipid == ci->chip &&
645 brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) { 642 brcmf_fwname_data[i].revmsk & BIT(ci->chiprev)) {
646 switch (type) { 643 switch (type) {
647 case BRCMF_FIRMWARE_BIN: 644 case BRCMF_FIRMWARE_BIN:
648 name = brcmf_fwname_data[i].bin; 645 return brcmf_fwname_data[i].bin;
649 break;
650 case BRCMF_FIRMWARE_NVRAM: 646 case BRCMF_FIRMWARE_NVRAM:
651 name = brcmf_fwname_data[i].nv; 647 return brcmf_fwname_data[i].nv;
652 break;
653 default: 648 default:
654 brcmf_err("invalid firmware type (%d)\n", type); 649 brcmf_err("invalid firmware type (%d)\n", type);
655 return NULL; 650 return NULL;
656 } 651 }
657 goto found;
658 } 652 }
659 } 653 }
660 brcmf_err("Unknown chipid %d [%d]\n", 654 brcmf_err("Unknown chipid %d [%d]\n",
661 bus->ci->chip, bus->ci->chiprev); 655 ci->chip, ci->chiprev);
662 return NULL; 656 return NULL;
663
664found:
665 err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
666 if ((err) || (!fw)) {
667 brcmf_err("fail to request firmware %s (%d)\n", name, err);
668 return NULL;
669 }
670
671 return fw;
672} 657}
673 658
674static void pkt_align(struct sk_buff *p, int len, int align) 659static void pkt_align(struct sk_buff *p, int len, int align)
@@ -3278,20 +3263,13 @@ static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
3278} 3263}
3279 3264
3280static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus, 3265static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
3281 const struct firmware *nv) 3266 void *vars, u32 varsz)
3282{ 3267{
3283 void *vars;
3284 u32 varsz;
3285 int address; 3268 int address;
3286 int err; 3269 int err;
3287 3270
3288 brcmf_dbg(TRACE, "Enter\n"); 3271 brcmf_dbg(TRACE, "Enter\n");
3289 3272
3290 vars = brcmf_nvram_strip(nv, &varsz);
3291
3292 if (vars == NULL)
3293 return -EINVAL;
3294
3295 address = bus->ci->ramsize - varsz + bus->ci->rambase; 3273 address = bus->ci->ramsize - varsz + bus->ci->rambase;
3296 err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz); 3274 err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz);
3297 if (err) 3275 if (err)
@@ -3300,15 +3278,14 @@ static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
3300 else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz)) 3278 else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz))
3301 err = -EIO; 3279 err = -EIO;
3302 3280
3303 brcmf_nvram_free(vars);
3304
3305 return err; 3281 return err;
3306} 3282}
3307 3283
3308static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus) 3284static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
3285 const struct firmware *fw,
3286 void *nvram, u32 nvlen)
3309{ 3287{
3310 int bcmerror = -EFAULT; 3288 int bcmerror = -EFAULT;
3311 const struct firmware *fw;
3312 u32 rstvec; 3289 u32 rstvec;
3313 3290
3314 sdio_claim_host(bus->sdiodev->func[1]); 3291 sdio_claim_host(bus->sdiodev->func[1]);
@@ -3317,12 +3294,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
3317 /* Keep arm in reset */ 3294 /* Keep arm in reset */
3318 brcmf_chip_enter_download(bus->ci); 3295 brcmf_chip_enter_download(bus->ci);
3319 3296
3320 fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
3321 if (fw == NULL) {
3322 bcmerror = -ENOENT;
3323 goto err;
3324 }
3325
3326 rstvec = get_unaligned_le32(fw->data); 3297 rstvec = get_unaligned_le32(fw->data);
3327 brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec); 3298 brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
3328 3299
@@ -3330,17 +3301,12 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
3330 release_firmware(fw); 3301 release_firmware(fw);
3331 if (bcmerror) { 3302 if (bcmerror) {
3332 brcmf_err("dongle image file download failed\n"); 3303 brcmf_err("dongle image file download failed\n");
3304 brcmf_fw_nvram_free(nvram);
3333 goto err; 3305 goto err;
3334 } 3306 }
3335 3307
3336 fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_NVRAM); 3308 bcmerror = brcmf_sdio_download_nvram(bus, nvram, nvlen);
3337 if (fw == NULL) { 3309 brcmf_fw_nvram_free(nvram);
3338 bcmerror = -ENOENT;
3339 goto err;
3340 }
3341
3342 bcmerror = brcmf_sdio_download_nvram(bus, fw);
3343 release_firmware(fw);
3344 if (bcmerror) { 3310 if (bcmerror) {
3345 brcmf_err("dongle nvram file download failed\n"); 3311 brcmf_err("dongle nvram file download failed\n");
3346 goto err; 3312 goto err;
@@ -3490,97 +3456,6 @@ done:
3490 return err; 3456 return err;
3491} 3457}
3492 3458
3493static int brcmf_sdio_bus_init(struct device *dev)
3494{
3495 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3496 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3497 struct brcmf_sdio *bus = sdiodev->bus;
3498 int err, ret = 0;
3499 u8 saveclk;
3500
3501 brcmf_dbg(TRACE, "Enter\n");
3502
3503 /* try to download image and nvram to the dongle */
3504 if (bus_if->state == BRCMF_BUS_DOWN) {
3505 bus->alp_only = true;
3506 err = brcmf_sdio_download_firmware(bus);
3507 if (err)
3508 return err;
3509 bus->alp_only = false;
3510 }
3511
3512 if (!bus->sdiodev->bus_if->drvr)
3513 return 0;
3514
3515 /* Start the watchdog timer */
3516 bus->sdcnt.tickcnt = 0;
3517 brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
3518
3519 sdio_claim_host(bus->sdiodev->func[1]);
3520
3521 /* Make sure backplane clock is on, needed to generate F2 interrupt */
3522 brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
3523 if (bus->clkstate != CLK_AVAIL)
3524 goto exit;
3525
3526 /* Force clocks on backplane to be sure F2 interrupt propagates */
3527 saveclk = brcmf_sdiod_regrb(bus->sdiodev,
3528 SBSDIO_FUNC1_CHIPCLKCSR, &err);
3529 if (!err) {
3530 brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3531 (saveclk | SBSDIO_FORCE_HT), &err);
3532 }
3533 if (err) {
3534 brcmf_err("Failed to force clock for F2: err %d\n", err);
3535 goto exit;
3536 }
3537
3538 /* Enable function 2 (frame transfers) */
3539 w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
3540 offsetof(struct sdpcmd_regs, tosbmailboxdata));
3541 err = sdio_enable_func(bus->sdiodev->func[SDIO_FUNC_2]);
3542
3543
3544 brcmf_dbg(INFO, "enable F2: err=%d\n", err);
3545
3546 /* If F2 successfully enabled, set core and enable interrupts */
3547 if (!err) {
3548 /* Set up the interrupt mask and enable interrupts */
3549 bus->hostintmask = HOSTINTMASK;
3550 w_sdreg32(bus, bus->hostintmask,
3551 offsetof(struct sdpcmd_regs, hostintmask));
3552
3553 brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
3554 } else {
3555 /* Disable F2 again */
3556 sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
3557 ret = -ENODEV;
3558 }
3559
3560 if (brcmf_chip_sr_capable(bus->ci)) {
3561 brcmf_sdio_sr_init(bus);
3562 } else {
3563 /* Restore previous clock setting */
3564 brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3565 saveclk, &err);
3566 }
3567
3568 if (ret == 0) {
3569 ret = brcmf_sdiod_intr_register(bus->sdiodev);
3570 if (ret != 0)
3571 brcmf_err("intr register failed:%d\n", ret);
3572 }
3573
3574 /* If we didn't come up, turn off backplane clock */
3575 if (ret != 0)
3576 brcmf_sdio_clkctl(bus, CLK_NONE, false);
3577
3578exit:
3579 sdio_release_host(bus->sdiodev->func[1]);
3580
3581 return ret;
3582}
3583
3584void brcmf_sdio_isr(struct brcmf_sdio *bus) 3459void brcmf_sdio_isr(struct brcmf_sdio *bus)
3585{ 3460{
3586 brcmf_dbg(TRACE, "Enter\n"); 3461 brcmf_dbg(TRACE, "Enter\n");
@@ -4020,13 +3895,114 @@ brcmf_sdio_watchdog(unsigned long data)
4020static struct brcmf_bus_ops brcmf_sdio_bus_ops = { 3895static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
4021 .stop = brcmf_sdio_bus_stop, 3896 .stop = brcmf_sdio_bus_stop,
4022 .preinit = brcmf_sdio_bus_preinit, 3897 .preinit = brcmf_sdio_bus_preinit,
4023 .init = brcmf_sdio_bus_init,
4024 .txdata = brcmf_sdio_bus_txdata, 3898 .txdata = brcmf_sdio_bus_txdata,
4025 .txctl = brcmf_sdio_bus_txctl, 3899 .txctl = brcmf_sdio_bus_txctl,
4026 .rxctl = brcmf_sdio_bus_rxctl, 3900 .rxctl = brcmf_sdio_bus_rxctl,
4027 .gettxq = brcmf_sdio_bus_gettxq, 3901 .gettxq = brcmf_sdio_bus_gettxq,
4028}; 3902};
4029 3903
3904static void brcmf_sdio_firmware_callback(struct device *dev,
3905 const struct firmware *code,
3906 void *nvram, u32 nvram_len)
3907{
3908 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3909 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3910 struct brcmf_sdio *bus = sdiodev->bus;
3911 int err = 0;
3912 u8 saveclk;
3913
3914 brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
3915
3916 /* try to download image and nvram to the dongle */
3917 if (bus_if->state == BRCMF_BUS_DOWN) {
3918 bus->alp_only = true;
3919 err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
3920 if (err)
3921 goto fail;
3922 bus->alp_only = false;
3923 }
3924
3925 if (!bus_if->drvr)
3926 return;
3927
3928 /* Start the watchdog timer */
3929 bus->sdcnt.tickcnt = 0;
3930 brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
3931
3932 sdio_claim_host(sdiodev->func[1]);
3933
3934 /* Make sure backplane clock is on, needed to generate F2 interrupt */
3935 brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
3936 if (bus->clkstate != CLK_AVAIL)
3937 goto release;
3938
3939 /* Force clocks on backplane to be sure F2 interrupt propagates */
3940 saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
3941 if (!err) {
3942 brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3943 (saveclk | SBSDIO_FORCE_HT), &err);
3944 }
3945 if (err) {
3946 brcmf_err("Failed to force clock for F2: err %d\n", err);
3947 goto release;
3948 }
3949
3950 /* Enable function 2 (frame transfers) */
3951 w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
3952 offsetof(struct sdpcmd_regs, tosbmailboxdata));
3953 err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
3954
3955
3956 brcmf_dbg(INFO, "enable F2: err=%d\n", err);
3957
3958 /* If F2 successfully enabled, set core and enable interrupts */
3959 if (!err) {
3960 /* Set up the interrupt mask and enable interrupts */
3961 bus->hostintmask = HOSTINTMASK;
3962 w_sdreg32(bus, bus->hostintmask,
3963 offsetof(struct sdpcmd_regs, hostintmask));
3964
3965 brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err);
3966 } else {
3967 /* Disable F2 again */
3968 sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
3969 goto release;
3970 }
3971
3972 if (brcmf_chip_sr_capable(bus->ci)) {
3973 brcmf_sdio_sr_init(bus);
3974 } else {
3975 /* Restore previous clock setting */
3976 brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3977 saveclk, &err);
3978 }
3979
3980 if (err == 0) {
3981 err = brcmf_sdiod_intr_register(sdiodev);
3982 if (err != 0)
3983 brcmf_err("intr register failed:%d\n", err);
3984 }
3985
3986 /* If we didn't come up, turn off backplane clock */
3987 if (err != 0)
3988 brcmf_sdio_clkctl(bus, CLK_NONE, false);
3989
3990 sdio_release_host(sdiodev->func[1]);
3991
3992 err = brcmf_bus_start(dev);
3993 if (err != 0) {
3994 brcmf_err("dongle is not responding\n");
3995 goto fail;
3996 }
3997 return;
3998
3999release:
4000 sdio_release_host(sdiodev->func[1]);
4001fail:
4002 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
4003 device_release_driver(dev);
4004}
4005
4030struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) 4006struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4031{ 4007{
4032 int ret; 4008 int ret;
@@ -4110,8 +4086,13 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4110 goto fail; 4086 goto fail;
4111 } 4087 }
4112 4088
4089 /* Query the F2 block size, set roundup accordingly */
4090 bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
4091 bus->roundup = min(max_roundup, bus->blocksize);
4092
4113 /* Allocate buffers */ 4093 /* Allocate buffers */
4114 if (bus->sdiodev->bus_if->maxctl) { 4094 if (bus->sdiodev->bus_if->maxctl) {
4095 bus->sdiodev->bus_if->maxctl += bus->roundup;
4115 bus->rxblen = 4096 bus->rxblen =
4116 roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN), 4097 roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
4117 ALIGNMENT) + bus->head_align; 4098 ALIGNMENT) + bus->head_align;
@@ -4139,10 +4120,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4139 bus->idletime = BRCMF_IDLE_INTERVAL; 4120 bus->idletime = BRCMF_IDLE_INTERVAL;
4140 bus->idleclock = BRCMF_IDLE_ACTIVE; 4121 bus->idleclock = BRCMF_IDLE_ACTIVE;
4141 4122
4142 /* Query the F2 block size, set roundup accordingly */
4143 bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
4144 bus->roundup = min(max_roundup, bus->blocksize);
4145
4146 /* SR state */ 4123 /* SR state */
4147 bus->sleeping = false; 4124 bus->sleeping = false;
4148 bus->sr_enabled = false; 4125 bus->sr_enabled = false;
@@ -4150,10 +4127,14 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4150 brcmf_sdio_debugfs_create(bus); 4127 brcmf_sdio_debugfs_create(bus);
4151 brcmf_dbg(INFO, "completed!!\n"); 4128 brcmf_dbg(INFO, "completed!!\n");
4152 4129
4153 /* if firmware path present try to download and bring up bus */ 4130 ret = brcmf_fw_get_firmwares(sdiodev->dev, BRCMF_FW_REQUEST_NVRAM,
4154 ret = brcmf_bus_start(bus->sdiodev->dev); 4131 brcmf_sdio_get_fwname(bus->ci,
4132 BRCMF_FIRMWARE_BIN),
4133 brcmf_sdio_get_fwname(bus->ci,
4134 BRCMF_FIRMWARE_NVRAM),
4135 brcmf_sdio_firmware_callback);
4155 if (ret != 0) { 4136 if (ret != 0) {
4156 brcmf_err("dongle is not responding\n"); 4137 brcmf_err("async firmware request failed: %d\n", ret);
4157 goto fail; 4138 goto fail;
4158 } 4139 }
4159 4140
@@ -4173,9 +4154,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
4173 /* De-register interrupt handler */ 4154 /* De-register interrupt handler */
4174 brcmf_sdiod_intr_unregister(bus->sdiodev); 4155 brcmf_sdiod_intr_unregister(bus->sdiodev);
4175 4156
4176 if (bus->sdiodev->bus_if->drvr) { 4157 brcmf_detach(bus->sdiodev->dev);
4177 brcmf_detach(bus->sdiodev->dev);
4178 }
4179 4158
4180 cancel_work_sync(&bus->datawork); 4159 cancel_work_sync(&bus->datawork);
4181 if (bus->brcmf_wq) 4160 if (bus->brcmf_wq)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
new file mode 100644
index 000000000000..7b7d237c1ddb
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -0,0 +1,332 @@
1/*
2 * Copyright (c) 2013 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/device.h>
20#include <linux/firmware.h>
21
22#include "dhd_dbg.h"
23#include "firmware.h"
24
25enum nvram_parser_state {
26 IDLE,
27 KEY,
28 VALUE,
29 COMMENT,
30 END
31};
32
33/**
34 * struct nvram_parser - internal info for parser.
35 *
36 * @state: current parser state.
37 * @fwnv: input buffer being parsed.
38 * @nvram: output buffer with parse result.
39 * @nvram_len: lenght of parse result.
40 * @line: current line.
41 * @column: current column in line.
42 * @pos: byte offset in input buffer.
43 * @entry: start position of key,value entry.
44 */
45struct nvram_parser {
46 enum nvram_parser_state state;
47 const struct firmware *fwnv;
48 u8 *nvram;
49 u32 nvram_len;
50 u32 line;
51 u32 column;
52 u32 pos;
53 u32 entry;
54};
55
56static bool is_nvram_char(char c)
57{
58 /* comment marker excluded */
59 if (c == '#')
60 return false;
61
62 /* key and value may have any other readable character */
63 return (c > 0x20 && c < 0x7f);
64}
65
66static bool is_whitespace(char c)
67{
68 return (c == ' ' || c == '\r' || c == '\n' || c == '\t');
69}
70
71static enum nvram_parser_state brcmf_nvram_handle_idle(struct nvram_parser *nvp)
72{
73 char c;
74
75 c = nvp->fwnv->data[nvp->pos];
76 if (c == '\n')
77 return COMMENT;
78 if (is_whitespace(c))
79 goto proceed;
80 if (c == '#')
81 return COMMENT;
82 if (is_nvram_char(c)) {
83 nvp->entry = nvp->pos;
84 return KEY;
85 }
86 brcmf_dbg(INFO, "warning: ln=%d:col=%d: ignoring invalid character\n",
87 nvp->line, nvp->column);
88proceed:
89 nvp->column++;
90 nvp->pos++;
91 return IDLE;
92}
93
94static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
95{
96 enum nvram_parser_state st = nvp->state;
97 char c;
98
99 c = nvp->fwnv->data[nvp->pos];
100 if (c == '=') {
101 st = VALUE;
102 } else if (!is_nvram_char(c)) {
103 brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
104 nvp->line, nvp->column);
105 return COMMENT;
106 }
107
108 nvp->column++;
109 nvp->pos++;
110 return st;
111}
112
113static enum nvram_parser_state
114brcmf_nvram_handle_value(struct nvram_parser *nvp)
115{
116 char c;
117 char *skv;
118 char *ekv;
119 u32 cplen;
120
121 c = nvp->fwnv->data[nvp->pos];
122 if (!is_nvram_char(c)) {
123 /* key,value pair complete */
124 ekv = (u8 *)&nvp->fwnv->data[nvp->pos];
125 skv = (u8 *)&nvp->fwnv->data[nvp->entry];
126 cplen = ekv - skv;
127 /* copy to output buffer */
128 memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
129 nvp->nvram_len += cplen;
130 nvp->nvram[nvp->nvram_len] = '\0';
131 nvp->nvram_len++;
132 return IDLE;
133 }
134 nvp->pos++;
135 nvp->column++;
136 return VALUE;
137}
138
139static enum nvram_parser_state
140brcmf_nvram_handle_comment(struct nvram_parser *nvp)
141{
142 char *eol, *sol;
143
144 sol = (char *)&nvp->fwnv->data[nvp->pos];
145 eol = strchr(sol, '\n');
146 if (eol == NULL)
147 return END;
148
149 /* eat all moving to next line */
150 nvp->line++;
151 nvp->column = 1;
152 nvp->pos += (eol - sol) + 1;
153 return IDLE;
154}
155
156static enum nvram_parser_state brcmf_nvram_handle_end(struct nvram_parser *nvp)
157{
158 /* final state */
159 return END;
160}
161
162static enum nvram_parser_state
163(*nv_parser_states[])(struct nvram_parser *nvp) = {
164 brcmf_nvram_handle_idle,
165 brcmf_nvram_handle_key,
166 brcmf_nvram_handle_value,
167 brcmf_nvram_handle_comment,
168 brcmf_nvram_handle_end
169};
170
171static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
172 const struct firmware *nv)
173{
174 memset(nvp, 0, sizeof(*nvp));
175 nvp->fwnv = nv;
176 /* Alloc for extra 0 byte + roundup by 4 + length field */
177 nvp->nvram = kzalloc(nv->size + 1 + 3 + sizeof(u32), GFP_KERNEL);
178 if (!nvp->nvram)
179 return -ENOMEM;
180
181 nvp->line = 1;
182 nvp->column = 1;
183 return 0;
184}
185
186/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
187 * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
188 * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
189 * End of buffer is completed with token identifying length of buffer.
190 */
191static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
192{
193 struct nvram_parser nvp;
194 u32 pad;
195 u32 token;
196 __le32 token_le;
197
198 if (brcmf_init_nvram_parser(&nvp, nv) < 0)
199 return NULL;
200
201 while (nvp.pos < nv->size) {
202 nvp.state = nv_parser_states[nvp.state](&nvp);
203 if (nvp.state == END)
204 break;
205 }
206 pad = nvp.nvram_len;
207 *new_length = roundup(nvp.nvram_len + 1, 4);
208 while (pad != *new_length) {
209 nvp.nvram[pad] = 0;
210 pad++;
211 }
212
213 token = *new_length / 4;
214 token = (~token << 16) | (token & 0x0000FFFF);
215 token_le = cpu_to_le32(token);
216
217 memcpy(&nvp.nvram[*new_length], &token_le, sizeof(token_le));
218 *new_length += sizeof(token_le);
219
220 return nvp.nvram;
221}
222
223void brcmf_fw_nvram_free(void *nvram)
224{
225 kfree(nvram);
226}
227
228struct brcmf_fw {
229 struct device *dev;
230 u16 flags;
231 const struct firmware *code;
232 const char *nvram_name;
233 void (*done)(struct device *dev, const struct firmware *fw,
234 void *nvram_image, u32 nvram_len);
235};
236
237static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
238{
239 struct brcmf_fw *fwctx = ctx;
240 u32 nvram_length = 0;
241 void *nvram = NULL;
242
243 brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
244 if (!fw && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
245 goto fail;
246
247 if (fw) {
248 nvram = brcmf_fw_nvram_strip(fw, &nvram_length);
249 release_firmware(fw);
250 if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
251 goto fail;
252 }
253
254 fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
255 kfree(fwctx);
256 return;
257
258fail:
259 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
260 if (fwctx->code)
261 release_firmware(fwctx->code);
262 device_release_driver(fwctx->dev);
263 kfree(fwctx);
264}
265
266static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
267{
268 struct brcmf_fw *fwctx = ctx;
269 int ret;
270
271 brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
272 if (!fw)
273 goto fail;
274
275 /* only requested code so done here */
276 if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
277 fwctx->done(fwctx->dev, fw, NULL, 0);
278 kfree(fwctx);
279 return;
280 }
281 fwctx->code = fw;
282 ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
283 fwctx->dev, GFP_KERNEL, fwctx,
284 brcmf_fw_request_nvram_done);
285
286 if (!ret)
287 return;
288
289 /* when nvram is optional call .done() callback here */
290 if (fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL) {
291 fwctx->done(fwctx->dev, fw, NULL, 0);
292 kfree(fwctx);
293 return;
294 }
295
296 /* failed nvram request */
297 release_firmware(fw);
298fail:
299 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
300 device_release_driver(fwctx->dev);
301 kfree(fwctx);
302}
303
304int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
305 const char *code, const char *nvram,
306 void (*fw_cb)(struct device *dev,
307 const struct firmware *fw,
308 void *nvram_image, u32 nvram_len))
309{
310 struct brcmf_fw *fwctx;
311
312 brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
313 if (!fw_cb || !code)
314 return -EINVAL;
315
316 if ((flags & BRCMF_FW_REQUEST_NVRAM) && !nvram)
317 return -EINVAL;
318
319 fwctx = kzalloc(sizeof(*fwctx), GFP_KERNEL);
320 if (!fwctx)
321 return -ENOMEM;
322
323 fwctx->dev = dev;
324 fwctx->flags = flags;
325 fwctx->done = fw_cb;
326 if (flags & BRCMF_FW_REQUEST_NVRAM)
327 fwctx->nvram_name = nvram;
328
329 return request_firmware_nowait(THIS_MODULE, true, code, dev,
330 GFP_KERNEL, fwctx,
331 brcmf_fw_request_code_done);
332}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/nvram.h b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
index d454580928c9..6431bfd7afff 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/nvram.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
@@ -13,12 +13,24 @@
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16#ifndef BRCMFMAC_NVRAM_H 16#ifndef BRCMFMAC_FIRMWARE_H
17#define BRCMFMAC_NVRAM_H 17#define BRCMFMAC_FIRMWARE_H
18 18
19#define BRCMF_FW_REQUEST 0x000F
20#define BRCMF_FW_REQUEST_NVRAM 0x0001
21#define BRCMF_FW_REQ_FLAGS 0x00F0
22#define BRCMF_FW_REQ_NV_OPTIONAL 0x0010
19 23
20void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length); 24void brcmf_fw_nvram_free(void *nvram);
21void brcmf_nvram_free(void *nvram); 25/*
22 26 * Request firmware(s) asynchronously. When the asynchronous request
27 * fails it will not use the callback, but call device_release_driver()
28 * instead which will call the driver .remove() callback.
29 */
30int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
31 const char *code, const char *nvram,
32 void (*fw_cb)(struct device *dev,
33 const struct firmware *fw,
34 void *nvram_image, u32 nvram_len));
23 35
24#endif /* BRCMFMAC_NVRAM_H */ 36#endif /* BRCMFMAC_FIRMWARE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 614e4888504f..2bc68a2137fc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -53,6 +53,14 @@
53#define BRCMF_OBSS_COEX_OFF 0 53#define BRCMF_OBSS_COEX_OFF 0
54#define BRCMF_OBSS_COEX_ON 1 54#define BRCMF_OBSS_COEX_ON 1
55 55
56/* join preference types for join_pref iovar */
57enum brcmf_join_pref_types {
58 BRCMF_JOIN_PREF_RSSI = 1,
59 BRCMF_JOIN_PREF_WPA,
60 BRCMF_JOIN_PREF_BAND,
61 BRCMF_JOIN_PREF_RSSI_DELTA,
62};
63
56enum brcmf_fil_p2p_if_types { 64enum brcmf_fil_p2p_if_types {
57 BRCMF_FIL_P2P_IF_CLIENT, 65 BRCMF_FIL_P2P_IF_CLIENT,
58 BRCMF_FIL_P2P_IF_GO, 66 BRCMF_FIL_P2P_IF_GO,
@@ -282,6 +290,22 @@ struct brcmf_assoc_params_le {
282 __le16 chanspec_list[1]; 290 __le16 chanspec_list[1];
283}; 291};
284 292
293/**
294 * struct join_pref params - parameters for preferred join selection.
295 *
296 * @type: preference type (see enum brcmf_join_pref_types).
297 * @len: length of bytes following (currently always 2).
298 * @rssi_gain: signal gain for selection (only when @type is RSSI_DELTA).
299 * @band: band to which selection preference applies.
300 * This is used if @type is BAND or RSSI_DELTA.
301 */
302struct brcmf_join_pref_params {
303 u8 type;
304 u8 len;
305 u8 rssi_gain;
306 u8 band;
307};
308
285/* used for join with or without a specific bssid and channel list */ 309/* used for join with or without a specific bssid and channel list */
286struct brcmf_join_params { 310struct brcmf_join_params {
287 struct brcmf_ssid_le ssid_le; 311 struct brcmf_ssid_le ssid_le;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index c3e7d76dbf35..699908de314a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -476,6 +476,7 @@ struct brcmf_fws_info {
476 bool bus_flow_blocked; 476 bool bus_flow_blocked;
477 bool creditmap_received; 477 bool creditmap_received;
478 u8 mode; 478 u8 mode;
479 bool avoid_queueing;
479}; 480};
480 481
481/* 482/*
@@ -1369,13 +1370,12 @@ done:
1369} 1370}
1370 1371
1371static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo, 1372static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
1372 struct sk_buff *skb, u32 genbit, 1373 struct sk_buff *skb, u8 ifidx,
1373 u16 seq) 1374 u32 genbit, u16 seq)
1374{ 1375{
1375 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; 1376 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
1376 u32 hslot; 1377 u32 hslot;
1377 int ret; 1378 int ret;
1378 u8 ifidx;
1379 1379
1380 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); 1380 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
1381 1381
@@ -1389,29 +1389,21 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
1389 1389
1390 entry->generation = genbit; 1390 entry->generation = genbit;
1391 1391
1392 ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb); 1392 brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
1393 if (ret == 0) { 1393 brcmf_skbcb(skb)->htod_seq = seq;
1394 brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit); 1394 if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
1395 brcmf_skbcb(skb)->htod_seq = seq; 1395 brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
1396 if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) { 1396 brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
1397 brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1); 1397 } else {
1398 brcmf_skb_htod_seq_set_field(skb, FROMFW, 0); 1398 brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
1399 } else {
1400 brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
1401 }
1402 ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo,
1403 skb);
1404 } 1399 }
1400 ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb);
1405 1401
1406 if (ret != 0) { 1402 if (ret != 0) {
1407 /* suppress q is full or hdrpull failed, drop this packet */ 1403 /* suppress q is full drop this packet */
1408 brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, 1404 brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true);
1409 true);
1410 } else { 1405 } else {
1411 /* 1406 /* Mark suppressed to avoid a double free during wlfc cleanup */
1412 * Mark suppressed to avoid a double free during
1413 * wlfc cleanup
1414 */
1415 brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot); 1407 brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot);
1416 } 1408 }
1417 1409
@@ -1428,6 +1420,7 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
1428 struct sk_buff *skb; 1420 struct sk_buff *skb;
1429 struct brcmf_skbuff_cb *skcb; 1421 struct brcmf_skbuff_cb *skcb;
1430 struct brcmf_fws_mac_descriptor *entry = NULL; 1422 struct brcmf_fws_mac_descriptor *entry = NULL;
1423 u8 ifidx;
1431 1424
1432 brcmf_dbg(DATA, "flags %d\n", flags); 1425 brcmf_dbg(DATA, "flags %d\n", flags);
1433 1426
@@ -1476,12 +1469,15 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
1476 } 1469 }
1477 brcmf_fws_macdesc_return_req_credit(skb); 1470 brcmf_fws_macdesc_return_req_credit(skb);
1478 1471
1472 if (brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb)) {
1473 brcmu_pkt_buf_free_skb(skb);
1474 return -EINVAL;
1475 }
1479 if (!remove_from_hanger) 1476 if (!remove_from_hanger)
1480 ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit, 1477 ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, ifidx,
1481 seq); 1478 genbit, seq);
1482
1483 if (remove_from_hanger || ret) 1479 if (remove_from_hanger || ret)
1484 brcmf_txfinalize(fws->drvr, skb, true); 1480 brcmf_txfinalize(fws->drvr, skb, ifidx, true);
1485 1481
1486 return 0; 1482 return 0;
1487} 1483}
@@ -1868,7 +1864,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1868 struct ethhdr *eh = (struct ethhdr *)(skb->data); 1864 struct ethhdr *eh = (struct ethhdr *)(skb->data);
1869 int fifo = BRCMF_FWS_FIFO_BCMC; 1865 int fifo = BRCMF_FWS_FIFO_BCMC;
1870 bool multicast = is_multicast_ether_addr(eh->h_dest); 1866 bool multicast = is_multicast_ether_addr(eh->h_dest);
1871 bool pae = eh->h_proto == htons(ETH_P_PAE); 1867 int rc = 0;
1872 1868
1873 brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto)); 1869 brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
1874 /* determine the priority */ 1870 /* determine the priority */
@@ -1876,8 +1872,13 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1876 skb->priority = cfg80211_classify8021d(skb, NULL); 1872 skb->priority = cfg80211_classify8021d(skb, NULL);
1877 1873
1878 drvr->tx_multicast += !!multicast; 1874 drvr->tx_multicast += !!multicast;
1879 if (pae) 1875
1880 atomic_inc(&ifp->pend_8021x_cnt); 1876 if (fws->avoid_queueing) {
1877 rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb);
1878 if (rc < 0)
1879 brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
1880 return rc;
1881 }
1881 1882
1882 /* set control buffer information */ 1883 /* set control buffer information */
1883 skcb->if_flags = 0; 1884 skcb->if_flags = 0;
@@ -1899,15 +1900,12 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1899 brcmf_fws_schedule_deq(fws); 1900 brcmf_fws_schedule_deq(fws);
1900 } else { 1901 } else {
1901 brcmf_err("drop skb: no hanger slot\n"); 1902 brcmf_err("drop skb: no hanger slot\n");
1902 if (pae) { 1903 brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
1903 atomic_dec(&ifp->pend_8021x_cnt); 1904 rc = -ENOMEM;
1904 if (waitqueue_active(&ifp->pend_8021x_wait))
1905 wake_up(&ifp->pend_8021x_wait);
1906 }
1907 brcmu_pkt_buf_free_skb(skb);
1908 } 1905 }
1909 brcmf_fws_unlock(fws); 1906 brcmf_fws_unlock(fws);
1910 return 0; 1907
1908 return rc;
1911} 1909}
1912 1910
1913void brcmf_fws_reset_interface(struct brcmf_if *ifp) 1911void brcmf_fws_reset_interface(struct brcmf_if *ifp)
@@ -1982,7 +1980,8 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
1982 ret = brcmf_proto_txdata(drvr, ifidx, 0, skb); 1980 ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
1983 brcmf_fws_lock(fws); 1981 brcmf_fws_lock(fws);
1984 if (ret < 0) 1982 if (ret < 0)
1985 brcmf_txfinalize(drvr, skb, false); 1983 brcmf_txfinalize(drvr, skb, ifidx,
1984 false);
1986 if (fws->bus_flow_blocked) 1985 if (fws->bus_flow_blocked)
1987 break; 1986 break;
1988 } 1987 }
@@ -2039,6 +2038,13 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
2039 fws->drvr = drvr; 2038 fws->drvr = drvr;
2040 fws->fcmode = fcmode; 2039 fws->fcmode = fcmode;
2041 2040
2041 if ((drvr->bus_if->always_use_fws_queue == false) &&
2042 (fcmode == BRCMF_FWS_FCMODE_NONE)) {
2043 fws->avoid_queueing = true;
2044 brcmf_dbg(INFO, "FWS queueing will be avoided\n");
2045 return 0;
2046 }
2047
2042 fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq"); 2048 fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
2043 if (fws->fws_wq == NULL) { 2049 if (fws->fws_wq == NULL) {
2044 brcmf_err("workqueue creation failed\n"); 2050 brcmf_err("workqueue creation failed\n");
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/nvram.c b/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
deleted file mode 100644
index d5ef86db631b..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
+++ /dev/null
@@ -1,94 +0,0 @@
1/*
2 * Copyright (c) 2013 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/firmware.h>
20
21#include "nvram.h"
22
23/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a file
24 * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
25 * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
26 * End of buffer is completed with token identifying length of buffer.
27 */
28void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length)
29{
30 u8 *nvram;
31 u32 i;
32 u32 len;
33 u32 column;
34 u8 val;
35 bool comment;
36 u32 token;
37 __le32 token_le;
38
39 /* Alloc for extra 0 byte + roundup by 4 + length field */
40 nvram = kmalloc(nv->size + 1 + 3 + sizeof(token_le), GFP_KERNEL);
41 if (!nvram)
42 return NULL;
43
44 len = 0;
45 column = 0;
46 comment = false;
47 for (i = 0; i < nv->size; i++) {
48 val = nv->data[i];
49 if (val == 0)
50 break;
51 if (val == '\r')
52 continue;
53 if (comment && (val != '\n'))
54 continue;
55 comment = false;
56 if (val == '#') {
57 comment = true;
58 continue;
59 }
60 if (val == '\n') {
61 if (column == 0)
62 continue;
63 nvram[len] = 0;
64 len++;
65 column = 0;
66 continue;
67 }
68 nvram[len] = val;
69 len++;
70 column++;
71 }
72 column = len;
73 *new_length = roundup(len + 1, 4);
74 while (column != *new_length) {
75 nvram[column] = 0;
76 column++;
77 }
78
79 token = *new_length / 4;
80 token = (~token << 16) | (token & 0x0000FFFF);
81 token_le = cpu_to_le32(token);
82
83 memcpy(&nvram[*new_length], &token_le, sizeof(token_le));
84 *new_length += sizeof(token_le);
85
86 return nvram;
87}
88
89void brcmf_nvram_free(void *nvram)
90{
91 kfree(nvram);
92}
93
94
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 24f65cd53859..6db51a666f61 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -25,6 +25,7 @@
25#include <dhd_bus.h> 25#include <dhd_bus.h>
26#include <dhd_dbg.h> 26#include <dhd_dbg.h>
27 27
28#include "firmware.h"
28#include "usb_rdl.h" 29#include "usb_rdl.h"
29#include "usb.h" 30#include "usb.h"
30 31
@@ -61,12 +62,6 @@ struct brcmf_usb_image {
61 u8 *image; 62 u8 *image;
62 int image_len; 63 int image_len;
63}; 64};
64static struct list_head fw_image_list;
65
66struct intr_transfer_buf {
67 u32 notification;
68 u32 reserved;
69};
70 65
71struct brcmf_usbdev_info { 66struct brcmf_usbdev_info {
72 struct brcmf_usbdev bus_pub; /* MUST BE FIRST */ 67 struct brcmf_usbdev bus_pub; /* MUST BE FIRST */
@@ -75,7 +70,7 @@ struct brcmf_usbdev_info {
75 struct list_head rx_postq; 70 struct list_head rx_postq;
76 struct list_head tx_freeq; 71 struct list_head tx_freeq;
77 struct list_head tx_postq; 72 struct list_head tx_postq;
78 uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2; 73 uint rx_pipe, tx_pipe, rx_pipe2;
79 74
80 int rx_low_watermark; 75 int rx_low_watermark;
81 int tx_low_watermark; 76 int tx_low_watermark;
@@ -87,7 +82,7 @@ struct brcmf_usbdev_info {
87 struct brcmf_usbreq *tx_reqs; 82 struct brcmf_usbreq *tx_reqs;
88 struct brcmf_usbreq *rx_reqs; 83 struct brcmf_usbreq *rx_reqs;
89 84
90 u8 *image; /* buffer for combine fw and nvram */ 85 const u8 *image; /* buffer for combine fw and nvram */
91 int image_len; 86 int image_len;
92 87
93 struct usb_device *usbdev; 88 struct usb_device *usbdev;
@@ -104,10 +99,6 @@ struct brcmf_usbdev_info {
104 ulong ctl_op; 99 ulong ctl_op;
105 100
106 struct urb *bulk_urb; /* used for FW download */ 101 struct urb *bulk_urb; /* used for FW download */
107 struct urb *intr_urb; /* URB for interrupt endpoint */
108 int intr_size; /* Size of interrupt message */
109 int interval; /* Interrupt polling interval */
110 struct intr_transfer_buf intr; /* Data buffer for interrupt endpoint */
111}; 102};
112 103
113static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo, 104static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
@@ -531,39 +522,6 @@ brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
531 } 522 }
532} 523}
533 524
534static void
535brcmf_usb_intr_complete(struct urb *urb)
536{
537 struct brcmf_usbdev_info *devinfo =
538 (struct brcmf_usbdev_info *)urb->context;
539 int err;
540
541 brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
542
543 if (devinfo == NULL)
544 return;
545
546 if (unlikely(urb->status)) {
547 if (urb->status == -ENOENT ||
548 urb->status == -ESHUTDOWN ||
549 urb->status == -ENODEV) {
550 brcmf_usb_state_change(devinfo,
551 BRCMFMAC_USB_STATE_DOWN);
552 }
553 }
554
555 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN) {
556 brcmf_err("intr cb when DBUS down, ignoring\n");
557 return;
558 }
559
560 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
561 err = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
562 if (err)
563 brcmf_err("usb_submit_urb, err=%d\n", err);
564 }
565}
566
567static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb) 525static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
568{ 526{
569 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 527 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
@@ -619,7 +577,6 @@ static int brcmf_usb_up(struct device *dev)
619{ 577{
620 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 578 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
621 u16 ifnum; 579 u16 ifnum;
622 int ret;
623 580
624 brcmf_dbg(USB, "Enter\n"); 581 brcmf_dbg(USB, "Enter\n");
625 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) 582 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP)
@@ -628,23 +585,6 @@ static int brcmf_usb_up(struct device *dev)
628 /* Success, indicate devinfo is fully up */ 585 /* Success, indicate devinfo is fully up */
629 brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP); 586 brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP);
630 587
631 if (devinfo->intr_urb) {
632 usb_fill_int_urb(devinfo->intr_urb, devinfo->usbdev,
633 devinfo->intr_pipe,
634 &devinfo->intr,
635 devinfo->intr_size,
636 (usb_complete_t)brcmf_usb_intr_complete,
637 devinfo,
638 devinfo->interval);
639
640 ret = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
641 if (ret) {
642 brcmf_err("USB_SUBMIT_URB failed with status %d\n",
643 ret);
644 return -EINVAL;
645 }
646 }
647
648 if (devinfo->ctl_urb) { 588 if (devinfo->ctl_urb) {
649 devinfo->ctl_in_pipe = usb_rcvctrlpipe(devinfo->usbdev, 0); 589 devinfo->ctl_in_pipe = usb_rcvctrlpipe(devinfo->usbdev, 0);
650 devinfo->ctl_out_pipe = usb_sndctrlpipe(devinfo->usbdev, 0); 590 devinfo->ctl_out_pipe = usb_sndctrlpipe(devinfo->usbdev, 0);
@@ -681,8 +621,6 @@ static void brcmf_usb_down(struct device *dev)
681 return; 621 return;
682 622
683 brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN); 623 brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN);
684 if (devinfo->intr_urb)
685 usb_kill_urb(devinfo->intr_urb);
686 624
687 if (devinfo->ctl_urb) 625 if (devinfo->ctl_urb)
688 usb_kill_urb(devinfo->ctl_urb); 626 usb_kill_urb(devinfo->ctl_urb);
@@ -1021,7 +959,7 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
1021 } 959 }
1022 960
1023 err = brcmf_usb_dlstart(devinfo, 961 err = brcmf_usb_dlstart(devinfo,
1024 devinfo->image, devinfo->image_len); 962 (u8 *)devinfo->image, devinfo->image_len);
1025 if (err == 0) 963 if (err == 0)
1026 err = brcmf_usb_dlrun(devinfo); 964 err = brcmf_usb_dlrun(devinfo);
1027 return err; 965 return err;
@@ -1036,7 +974,6 @@ static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
1036 brcmf_usb_free_q(&devinfo->rx_freeq, false); 974 brcmf_usb_free_q(&devinfo->rx_freeq, false);
1037 brcmf_usb_free_q(&devinfo->tx_freeq, false); 975 brcmf_usb_free_q(&devinfo->tx_freeq, false);
1038 976
1039 usb_free_urb(devinfo->intr_urb);
1040 usb_free_urb(devinfo->ctl_urb); 977 usb_free_urb(devinfo->ctl_urb);
1041 usb_free_urb(devinfo->bulk_urb); 978 usb_free_urb(devinfo->bulk_urb);
1042 979
@@ -1080,68 +1017,20 @@ static int check_file(const u8 *headers)
1080 return -1; 1017 return -1;
1081} 1018}
1082 1019
1083static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo) 1020static const char *brcmf_usb_get_fwname(struct brcmf_usbdev_info *devinfo)
1084{ 1021{
1085 s8 *fwname;
1086 const struct firmware *fw;
1087 struct brcmf_usb_image *fw_image;
1088 int err;
1089
1090 brcmf_dbg(USB, "Enter\n");
1091 switch (devinfo->bus_pub.devid) { 1022 switch (devinfo->bus_pub.devid) {
1092 case 43143: 1023 case 43143:
1093 fwname = BRCMF_USB_43143_FW_NAME; 1024 return BRCMF_USB_43143_FW_NAME;
1094 break;
1095 case 43235: 1025 case 43235:
1096 case 43236: 1026 case 43236:
1097 case 43238: 1027 case 43238:
1098 fwname = BRCMF_USB_43236_FW_NAME; 1028 return BRCMF_USB_43236_FW_NAME;
1099 break;
1100 case 43242: 1029 case 43242:
1101 fwname = BRCMF_USB_43242_FW_NAME; 1030 return BRCMF_USB_43242_FW_NAME;
1102 break;
1103 default: 1031 default:
1104 return -EINVAL; 1032 return NULL;
1105 break;
1106 }
1107 brcmf_dbg(USB, "Loading FW %s\n", fwname);
1108 list_for_each_entry(fw_image, &fw_image_list, list) {
1109 if (fw_image->fwname == fwname) {
1110 devinfo->image = fw_image->image;
1111 devinfo->image_len = fw_image->image_len;
1112 return 0;
1113 }
1114 }
1115 /* fw image not yet loaded. Load it now and add to list */
1116 err = request_firmware(&fw, fwname, devinfo->dev);
1117 if (!fw) {
1118 brcmf_err("fail to request firmware %s\n", fwname);
1119 return err;
1120 }
1121 if (check_file(fw->data) < 0) {
1122 brcmf_err("invalid firmware %s\n", fwname);
1123 return -EINVAL;
1124 } 1033 }
1125
1126 fw_image = kzalloc(sizeof(*fw_image), GFP_ATOMIC);
1127 if (!fw_image)
1128 return -ENOMEM;
1129 INIT_LIST_HEAD(&fw_image->list);
1130 list_add_tail(&fw_image->list, &fw_image_list);
1131 fw_image->fwname = fwname;
1132 fw_image->image = vmalloc(fw->size);
1133 if (!fw_image->image)
1134 return -ENOMEM;
1135
1136 memcpy(fw_image->image, fw->data, fw->size);
1137 fw_image->image_len = fw->size;
1138
1139 release_firmware(fw);
1140
1141 devinfo->image = fw_image->image;
1142 devinfo->image_len = fw_image->image_len;
1143
1144 return 0;
1145} 1034}
1146 1035
1147 1036
@@ -1186,11 +1075,6 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
1186 goto error; 1075 goto error;
1187 devinfo->tx_freecount = ntxq; 1076 devinfo->tx_freecount = ntxq;
1188 1077
1189 devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC);
1190 if (!devinfo->intr_urb) {
1191 brcmf_err("usb_alloc_urb (intr) failed\n");
1192 goto error;
1193 }
1194 devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC); 1078 devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC);
1195 if (!devinfo->ctl_urb) { 1079 if (!devinfo->ctl_urb) {
1196 brcmf_err("usb_alloc_urb (ctl) failed\n"); 1080 brcmf_err("usb_alloc_urb (ctl) failed\n");
@@ -1202,16 +1086,6 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
1202 goto error; 1086 goto error;
1203 } 1087 }
1204 1088
1205 if (!brcmf_usb_dlneeded(devinfo))
1206 return &devinfo->bus_pub;
1207
1208 brcmf_dbg(USB, "Start fw downloading\n");
1209 if (brcmf_usb_get_fw(devinfo))
1210 goto error;
1211
1212 if (brcmf_usb_fw_download(devinfo))
1213 goto error;
1214
1215 return &devinfo->bus_pub; 1089 return &devinfo->bus_pub;
1216 1090
1217error: 1091error:
@@ -1222,18 +1096,77 @@ error:
1222 1096
1223static struct brcmf_bus_ops brcmf_usb_bus_ops = { 1097static struct brcmf_bus_ops brcmf_usb_bus_ops = {
1224 .txdata = brcmf_usb_tx, 1098 .txdata = brcmf_usb_tx,
1225 .init = brcmf_usb_up,
1226 .stop = brcmf_usb_down, 1099 .stop = brcmf_usb_down,
1227 .txctl = brcmf_usb_tx_ctlpkt, 1100 .txctl = brcmf_usb_tx_ctlpkt,
1228 .rxctl = brcmf_usb_rx_ctlpkt, 1101 .rxctl = brcmf_usb_rx_ctlpkt,
1229}; 1102};
1230 1103
1104static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
1105{
1106 int ret;
1107
1108 /* Attach to the common driver interface */
1109 ret = brcmf_attach(devinfo->dev);
1110 if (ret) {
1111 brcmf_err("brcmf_attach failed\n");
1112 return ret;
1113 }
1114
1115 ret = brcmf_usb_up(devinfo->dev);
1116 if (ret)
1117 goto fail;
1118
1119 ret = brcmf_bus_start(devinfo->dev);
1120 if (ret)
1121 goto fail;
1122
1123 return 0;
1124fail:
1125 brcmf_detach(devinfo->dev);
1126 return ret;
1127}
1128
1129static void brcmf_usb_probe_phase2(struct device *dev,
1130 const struct firmware *fw,
1131 void *nvram, u32 nvlen)
1132{
1133 struct brcmf_bus *bus = dev_get_drvdata(dev);
1134 struct brcmf_usbdev_info *devinfo;
1135 int ret;
1136
1137 brcmf_dbg(USB, "Start fw downloading\n");
1138 ret = check_file(fw->data);
1139 if (ret < 0) {
1140 brcmf_err("invalid firmware\n");
1141 release_firmware(fw);
1142 goto error;
1143 }
1144
1145 devinfo = bus->bus_priv.usb->devinfo;
1146 devinfo->image = fw->data;
1147 devinfo->image_len = fw->size;
1148
1149 ret = brcmf_usb_fw_download(devinfo);
1150 release_firmware(fw);
1151 if (ret)
1152 goto error;
1153
1154 ret = brcmf_usb_bus_setup(devinfo);
1155 if (ret)
1156 goto error;
1157
1158 return;
1159error:
1160 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
1161 device_release_driver(dev);
1162}
1163
1231static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) 1164static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
1232{ 1165{
1233 struct brcmf_bus *bus = NULL; 1166 struct brcmf_bus *bus = NULL;
1234 struct brcmf_usbdev *bus_pub = NULL; 1167 struct brcmf_usbdev *bus_pub = NULL;
1235 int ret;
1236 struct device *dev = devinfo->dev; 1168 struct device *dev = devinfo->dev;
1169 int ret;
1237 1170
1238 brcmf_dbg(USB, "Enter\n"); 1171 brcmf_dbg(USB, "Enter\n");
1239 bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ); 1172 bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
@@ -1254,22 +1187,18 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
1254 bus->chip = bus_pub->devid; 1187 bus->chip = bus_pub->devid;
1255 bus->chiprev = bus_pub->chiprev; 1188 bus->chiprev = bus_pub->chiprev;
1256 bus->proto_type = BRCMF_PROTO_BCDC; 1189 bus->proto_type = BRCMF_PROTO_BCDC;
1190 bus->always_use_fws_queue = true;
1257 1191
1258 /* Attach to the common driver interface */ 1192 if (!brcmf_usb_dlneeded(devinfo)) {
1259 ret = brcmf_attach(dev); 1193 ret = brcmf_usb_bus_setup(devinfo);
1260 if (ret) { 1194 if (ret)
1261 brcmf_err("brcmf_attach failed\n"); 1195 goto fail;
1262 goto fail;
1263 }
1264
1265 ret = brcmf_bus_start(dev);
1266 if (ret) {
1267 brcmf_err("dongle is not responding\n");
1268 brcmf_detach(dev);
1269 goto fail;
1270 } 1196 }
1271 1197 /* request firmware here */
1198 brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
1199 brcmf_usb_probe_phase2);
1272 return 0; 1200 return 0;
1201
1273fail: 1202fail:
1274 /* Release resources in reverse order */ 1203 /* Release resources in reverse order */
1275 kfree(bus); 1204 kfree(bus);
@@ -1357,9 +1286,6 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1357 goto fail; 1286 goto fail;
1358 } 1287 }
1359 1288
1360 endpoint_num = endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
1361 devinfo->intr_pipe = usb_rcvintpipe(usb, endpoint_num);
1362
1363 devinfo->rx_pipe = 0; 1289 devinfo->rx_pipe = 0;
1364 devinfo->rx_pipe2 = 0; 1290 devinfo->rx_pipe2 = 0;
1365 devinfo->tx_pipe = 0; 1291 devinfo->tx_pipe = 0;
@@ -1391,16 +1317,9 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1391 } 1317 }
1392 } 1318 }
1393 1319
1394 /* Allocate interrupt URB and data buffer */ 1320 if (usb->speed == USB_SPEED_SUPER)
1395 /* RNDIS says 8-byte intr, our old drivers used 4-byte */ 1321 brcmf_dbg(USB, "Broadcom super speed USB wireless device detected\n");
1396 if (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == cpu_to_le16(16)) 1322 else if (usb->speed == USB_SPEED_HIGH)
1397 devinfo->intr_size = 8;
1398 else
1399 devinfo->intr_size = 4;
1400
1401 devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
1402
1403 if (usb->speed == USB_SPEED_HIGH)
1404 brcmf_dbg(USB, "Broadcom high speed USB wireless device detected\n"); 1323 brcmf_dbg(USB, "Broadcom high speed USB wireless device detected\n");
1405 else 1324 else
1406 brcmf_dbg(USB, "Broadcom full speed USB wireless device detected\n"); 1325 brcmf_dbg(USB, "Broadcom full speed USB wireless device detected\n");
@@ -1455,23 +1374,18 @@ static int brcmf_usb_resume(struct usb_interface *intf)
1455 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev); 1374 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
1456 1375
1457 brcmf_dbg(USB, "Enter\n"); 1376 brcmf_dbg(USB, "Enter\n");
1458 if (!brcmf_attach(devinfo->dev)) 1377 return brcmf_usb_bus_setup(devinfo);
1459 return brcmf_bus_start(&usb->dev);
1460
1461 return 0;
1462} 1378}
1463 1379
1464static int brcmf_usb_reset_resume(struct usb_interface *intf) 1380static int brcmf_usb_reset_resume(struct usb_interface *intf)
1465{ 1381{
1466 struct usb_device *usb = interface_to_usbdev(intf); 1382 struct usb_device *usb = interface_to_usbdev(intf);
1467 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev); 1383 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
1468
1469 brcmf_dbg(USB, "Enter\n"); 1384 brcmf_dbg(USB, "Enter\n");
1470 1385
1471 if (!brcmf_usb_fw_download(devinfo)) 1386 return brcmf_fw_get_firmwares(&usb->dev, 0,
1472 return brcmf_usb_resume(intf); 1387 brcmf_usb_get_fwname(devinfo), NULL,
1473 1388 brcmf_usb_probe_phase2);
1474 return -EIO;
1475} 1389}
1476 1390
1477#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c 1391#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c
@@ -1506,16 +1420,6 @@ static struct usb_driver brcmf_usbdrvr = {
1506 .disable_hub_initiated_lpm = 1, 1420 .disable_hub_initiated_lpm = 1,
1507}; 1421};
1508 1422
1509static void brcmf_release_fw(struct list_head *q)
1510{
1511 struct brcmf_usb_image *fw_image, *next;
1512
1513 list_for_each_entry_safe(fw_image, next, q, list) {
1514 vfree(fw_image->image);
1515 list_del_init(&fw_image->list);
1516 }
1517}
1518
1519static int brcmf_usb_reset_device(struct device *dev, void *notused) 1423static int brcmf_usb_reset_device(struct device *dev, void *notused)
1520{ 1424{
1521 /* device past is the usb interface so we 1425 /* device past is the usb interface so we
@@ -1534,12 +1438,10 @@ void brcmf_usb_exit(void)
1534 ret = driver_for_each_device(drv, NULL, NULL, 1438 ret = driver_for_each_device(drv, NULL, NULL,
1535 brcmf_usb_reset_device); 1439 brcmf_usb_reset_device);
1536 usb_deregister(&brcmf_usbdrvr); 1440 usb_deregister(&brcmf_usbdrvr);
1537 brcmf_release_fw(&fw_image_list);
1538} 1441}
1539 1442
1540void brcmf_usb_register(void) 1443void brcmf_usb_register(void)
1541{ 1444{
1542 brcmf_dbg(USB, "Enter\n"); 1445 brcmf_dbg(USB, "Enter\n");
1543 INIT_LIST_HEAD(&fw_image_list);
1544 usb_register(&brcmf_usbdrvr); 1446 usb_register(&brcmf_usbdrvr);
1545} 1447}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index be1985296bdc..d8fa276e368b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -221,9 +221,9 @@ static const struct ieee80211_regdomain brcmf_regdom = {
221 */ 221 */
222 REG_RULE(2484-10, 2484+10, 20, 6, 20, 0), 222 REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
223 /* IEEE 802.11a, channel 36..64 */ 223 /* IEEE 802.11a, channel 36..64 */
224 REG_RULE(5150-10, 5350+10, 40, 6, 20, 0), 224 REG_RULE(5150-10, 5350+10, 80, 6, 20, 0),
225 /* IEEE 802.11a, channel 100..165 */ 225 /* IEEE 802.11a, channel 100..165 */
226 REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), } 226 REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), }
227}; 227};
228 228
229static const u32 __wl_cipher_suites[] = { 229static const u32 __wl_cipher_suites[] = {
@@ -341,6 +341,60 @@ static u8 brcmf_mw_to_qdbm(u16 mw)
341 return qdbm; 341 return qdbm;
342} 342}
343 343
344static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
345 struct cfg80211_chan_def *ch)
346{
347 struct brcmu_chan ch_inf;
348 s32 primary_offset;
349
350 brcmf_dbg(TRACE, "chandef: control %d center %d width %d\n",
351 ch->chan->center_freq, ch->center_freq1, ch->width);
352 ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1);
353 primary_offset = ch->center_freq1 - ch->chan->center_freq;
354 switch (ch->width) {
355 case NL80211_CHAN_WIDTH_20:
356 ch_inf.bw = BRCMU_CHAN_BW_20;
357 WARN_ON(primary_offset != 0);
358 break;
359 case NL80211_CHAN_WIDTH_40:
360 ch_inf.bw = BRCMU_CHAN_BW_40;
361 if (primary_offset < 0)
362 ch_inf.sb = BRCMU_CHAN_SB_U;
363 else
364 ch_inf.sb = BRCMU_CHAN_SB_L;
365 break;
366 case NL80211_CHAN_WIDTH_80:
367 ch_inf.bw = BRCMU_CHAN_BW_80;
368 if (primary_offset < 0) {
369 if (primary_offset < -CH_10MHZ_APART)
370 ch_inf.sb = BRCMU_CHAN_SB_UU;
371 else
372 ch_inf.sb = BRCMU_CHAN_SB_UL;
373 } else {
374 if (primary_offset > CH_10MHZ_APART)
375 ch_inf.sb = BRCMU_CHAN_SB_LL;
376 else
377 ch_inf.sb = BRCMU_CHAN_SB_LU;
378 }
379 break;
380 default:
381 WARN_ON_ONCE(1);
382 }
383 switch (ch->chan->band) {
384 case IEEE80211_BAND_2GHZ:
385 ch_inf.band = BRCMU_CHAN_BAND_2G;
386 break;
387 case IEEE80211_BAND_5GHZ:
388 ch_inf.band = BRCMU_CHAN_BAND_5G;
389 break;
390 default:
391 WARN_ON_ONCE(1);
392 }
393 d11inf->encchspec(&ch_inf);
394
395 return ch_inf.chspec;
396}
397
344u16 channel_to_chanspec(struct brcmu_d11inf *d11inf, 398u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
345 struct ieee80211_channel *ch) 399 struct ieee80211_channel *ch)
346{ 400{
@@ -586,6 +640,9 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
586 if (err) 640 if (err)
587 brcmf_err("Scan abort failed\n"); 641 brcmf_err("Scan abort failed\n");
588 } 642 }
643
644 brcmf_set_mpc(ifp, 1);
645
589 /* 646 /*
590 * e-scan can be initiated by scheduled scan 647 * e-scan can be initiated by scheduled scan
591 * which takes precedence. 648 * which takes precedence.
@@ -595,12 +652,10 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
595 cfg->sched_escan = false; 652 cfg->sched_escan = false;
596 if (!aborted) 653 if (!aborted)
597 cfg80211_sched_scan_results(cfg_to_wiphy(cfg)); 654 cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
598 brcmf_set_mpc(ifp, 1);
599 } else if (scan_request) { 655 } else if (scan_request) {
600 brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n", 656 brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
601 aborted ? "Aborted" : "Done"); 657 aborted ? "Aborted" : "Done");
602 cfg80211_scan_done(scan_request, aborted); 658 cfg80211_scan_done(scan_request, aborted);
603 brcmf_set_mpc(ifp, 1);
604 } 659 }
605 if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) 660 if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
606 brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n"); 661 brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
@@ -1236,8 +1291,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
1236 params->chandef.chan->center_freq); 1291 params->chandef.chan->center_freq);
1237 if (params->channel_fixed) { 1292 if (params->channel_fixed) {
1238 /* adding chanspec */ 1293 /* adding chanspec */
1239 chanspec = channel_to_chanspec(&cfg->d11inf, 1294 chanspec = chandef_to_chanspec(&cfg->d11inf,
1240 params->chandef.chan); 1295 &params->chandef);
1241 join_params.params_le.chanspec_list[0] = 1296 join_params.params_le.chanspec_list[0] =
1242 cpu_to_le16(chanspec); 1297 cpu_to_le16(chanspec);
1243 join_params.params_le.chanspec_num = cpu_to_le32(1); 1298 join_params.params_le.chanspec_num = cpu_to_le32(1);
@@ -2182,7 +2237,7 @@ brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
2182 2237
2183static s32 2238static s32
2184brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, 2239brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
2185 u8 *mac, struct station_info *sinfo) 2240 const u8 *mac, struct station_info *sinfo)
2186{ 2241{
2187 struct brcmf_if *ifp = netdev_priv(ndev); 2242 struct brcmf_if *ifp = netdev_priv(ndev);
2188 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile; 2243 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
@@ -3124,7 +3179,7 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
3124 } 3179 }
3125 3180
3126 if (!request->n_ssids || !request->n_match_sets) { 3181 if (!request->n_ssids || !request->n_match_sets) {
3127 brcmf_err("Invalid sched scan req!! n_ssids:%d\n", 3182 brcmf_dbg(SCAN, "Invalid sched scan req!! n_ssids:%d\n",
3128 request->n_ssids); 3183 request->n_ssids);
3129 return -EINVAL; 3184 return -EINVAL;
3130 } 3185 }
@@ -3734,23 +3789,6 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
3734} 3789}
3735 3790
3736static s32 3791static s32
3737brcmf_cfg80211_set_channel(struct brcmf_cfg80211_info *cfg,
3738 struct brcmf_if *ifp,
3739 struct ieee80211_channel *channel)
3740{
3741 u16 chanspec;
3742 s32 err;
3743
3744 brcmf_dbg(TRACE, "band=%d, center_freq=%d\n", channel->band,
3745 channel->center_freq);
3746
3747 chanspec = channel_to_chanspec(&cfg->d11inf, channel);
3748 err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
3749
3750 return err;
3751}
3752
3753static s32
3754brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, 3792brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3755 struct cfg80211_ap_settings *settings) 3793 struct cfg80211_ap_settings *settings)
3756{ 3794{
@@ -3765,11 +3803,12 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3765 struct brcmf_join_params join_params; 3803 struct brcmf_join_params join_params;
3766 enum nl80211_iftype dev_role; 3804 enum nl80211_iftype dev_role;
3767 struct brcmf_fil_bss_enable_le bss_enable; 3805 struct brcmf_fil_bss_enable_le bss_enable;
3806 u16 chanspec;
3768 3807
3769 brcmf_dbg(TRACE, "channel_type=%d, beacon_interval=%d, dtim_period=%d,\n", 3808 brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
3770 cfg80211_get_chandef_type(&settings->chandef), 3809 settings->chandef.chan->hw_value,
3771 settings->beacon_interval, 3810 settings->chandef.center_freq1, settings->chandef.width,
3772 settings->dtim_period); 3811 settings->beacon_interval, settings->dtim_period);
3773 brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n", 3812 brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n",
3774 settings->ssid, settings->ssid_len, settings->auth_type, 3813 settings->ssid, settings->ssid_len, settings->auth_type,
3775 settings->inactivity_timeout); 3814 settings->inactivity_timeout);
@@ -3826,9 +3865,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3826 3865
3827 brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon); 3866 brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
3828 3867
3829 err = brcmf_cfg80211_set_channel(cfg, ifp, settings->chandef.chan); 3868 chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
3869 err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
3830 if (err < 0) { 3870 if (err < 0) {
3831 brcmf_err("Set Channel failed, %d\n", err); 3871 brcmf_err("Set Channel failed: chspec=%d, %d\n", chanspec, err);
3832 goto exit; 3872 goto exit;
3833 } 3873 }
3834 3874
@@ -3975,7 +4015,7 @@ brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
3975 4015
3976static int 4016static int
3977brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev, 4017brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
3978 u8 *mac) 4018 const u8 *mac)
3979{ 4019{
3980 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 4020 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
3981 struct brcmf_scb_val_le scbval; 4021 struct brcmf_scb_val_le scbval;
@@ -4203,7 +4243,7 @@ static int brcmf_convert_nl80211_tdls_oper(enum nl80211_tdls_operation oper)
4203} 4243}
4204 4244
4205static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy, 4245static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
4206 struct net_device *ndev, u8 *peer, 4246 struct net_device *ndev, const u8 *peer,
4207 enum nl80211_tdls_operation oper) 4247 enum nl80211_tdls_operation oper)
4208{ 4248{
4209 struct brcmf_if *ifp; 4249 struct brcmf_if *ifp;
@@ -4364,6 +4404,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
4364 WIPHY_FLAG_OFFCHAN_TX | 4404 WIPHY_FLAG_OFFCHAN_TX |
4365 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | 4405 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
4366 WIPHY_FLAG_SUPPORTS_TDLS; 4406 WIPHY_FLAG_SUPPORTS_TDLS;
4407 if (!brcmf_roamoff)
4408 wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
4367 wiphy->mgmt_stypes = brcmf_txrx_stypes; 4409 wiphy->mgmt_stypes = brcmf_txrx_stypes;
4368 wiphy->max_remain_on_channel_duration = 5000; 4410 wiphy->max_remain_on_channel_duration = 5000;
4369 brcmf_wiphy_pno_params(wiphy); 4411 brcmf_wiphy_pno_params(wiphy);
@@ -4685,7 +4727,6 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
4685 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile; 4727 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
4686 struct ieee80211_channel *chan; 4728 struct ieee80211_channel *chan;
4687 s32 err = 0; 4729 s32 err = 0;
4688 u16 reason;
4689 4730
4690 if (brcmf_is_apmode(ifp->vif)) { 4731 if (brcmf_is_apmode(ifp->vif)) {
4691 err = brcmf_notify_connect_status_ap(cfg, ndev, e, data); 4732 err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
@@ -4706,16 +4747,6 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
4706 brcmf_dbg(CONN, "Linkdown\n"); 4747 brcmf_dbg(CONN, "Linkdown\n");
4707 if (!brcmf_is_ibssmode(ifp->vif)) { 4748 if (!brcmf_is_ibssmode(ifp->vif)) {
4708 brcmf_bss_connect_done(cfg, ndev, e, false); 4749 brcmf_bss_connect_done(cfg, ndev, e, false);
4709 if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
4710 &ifp->vif->sme_state)) {
4711 reason = 0;
4712 if (((e->event_code == BRCMF_E_DEAUTH_IND) ||
4713 (e->event_code == BRCMF_E_DISASSOC_IND)) &&
4714 (e->reason != WLAN_REASON_UNSPECIFIED))
4715 reason = e->reason;
4716 cfg80211_disconnected(ndev, reason, NULL, 0,
4717 GFP_KERNEL);
4718 }
4719 } 4750 }
4720 brcmf_link_down(ifp->vif); 4751 brcmf_link_down(ifp->vif);
4721 brcmf_init_prof(ndev_to_prof(ndev)); 4752 brcmf_init_prof(ndev_to_prof(ndev));
@@ -5215,6 +5246,9 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
5215 if (!(bw_cap[band] & WLC_BW_40MHZ_BIT) && 5246 if (!(bw_cap[band] & WLC_BW_40MHZ_BIT) &&
5216 ch.bw == BRCMU_CHAN_BW_40) 5247 ch.bw == BRCMU_CHAN_BW_40)
5217 continue; 5248 continue;
5249 if (!(bw_cap[band] & WLC_BW_80MHZ_BIT) &&
5250 ch.bw == BRCMU_CHAN_BW_80)
5251 continue;
5218 update = false; 5252 update = false;
5219 for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) { 5253 for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
5220 if (band_chan_arr[j].hw_value == ch.chnum) { 5254 if (band_chan_arr[j].hw_value == ch.chnum) {
@@ -5231,10 +5265,13 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
5231 ieee80211_channel_to_frequency(ch.chnum, band); 5265 ieee80211_channel_to_frequency(ch.chnum, band);
5232 band_chan_arr[index].hw_value = ch.chnum; 5266 band_chan_arr[index].hw_value = ch.chnum;
5233 5267
5234 if (ch.bw == BRCMU_CHAN_BW_40) { 5268 /* assuming the chanspecs order is HT20,
5235 /* assuming the order is HT20, HT40 Upper, 5269 * HT40 upper, HT40 lower, and VHT80.
5236 * HT40 lower from chanspecs 5270 */
5237 */ 5271 if (ch.bw == BRCMU_CHAN_BW_80) {
5272 band_chan_arr[index].flags &=
5273 ~IEEE80211_CHAN_NO_80MHZ;
5274 } else if (ch.bw == BRCMU_CHAN_BW_40) {
5238 ht40_flag = band_chan_arr[index].flags & 5275 ht40_flag = band_chan_arr[index].flags &
5239 IEEE80211_CHAN_NO_HT40; 5276 IEEE80211_CHAN_NO_HT40;
5240 if (ch.sb == BRCMU_CHAN_SB_U) { 5277 if (ch.sb == BRCMU_CHAN_SB_U) {
@@ -5255,8 +5292,13 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
5255 IEEE80211_CHAN_NO_HT40MINUS; 5292 IEEE80211_CHAN_NO_HT40MINUS;
5256 } 5293 }
5257 } else { 5294 } else {
5295 /* disable other bandwidths for now as mentioned
5296 * order assure they are enabled for subsequent
5297 * chanspecs.
5298 */
5258 band_chan_arr[index].flags = 5299 band_chan_arr[index].flags =
5259 IEEE80211_CHAN_NO_HT40; 5300 IEEE80211_CHAN_NO_HT40 |
5301 IEEE80211_CHAN_NO_80MHZ;
5260 ch.bw = BRCMU_CHAN_BW_20; 5302 ch.bw = BRCMU_CHAN_BW_20;
5261 cfg->d11inf.encchspec(&ch); 5303 cfg->d11inf.encchspec(&ch);
5262 channel = ch.chspec; 5304 channel = ch.chspec;
@@ -5323,13 +5365,63 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
5323 } 5365 }
5324} 5366}
5325 5367
5368static void brcmf_update_ht_cap(struct ieee80211_supported_band *band,
5369 u32 bw_cap[2], u32 nchain)
5370{
5371 band->ht_cap.ht_supported = true;
5372 if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
5373 band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
5374 band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
5375 }
5376 band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
5377 band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
5378 band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
5379 band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
5380 memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
5381 band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
5382}
5383
5384static __le16 brcmf_get_mcs_map(u32 nchain, enum ieee80211_vht_mcs_support supp)
5385{
5386 u16 mcs_map;
5387 int i;
5388
5389 for (i = 0, mcs_map = 0xFFFF; i < nchain; i++)
5390 mcs_map = (mcs_map << 2) | supp;
5391
5392 return cpu_to_le16(mcs_map);
5393}
5394
5395static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
5396 u32 bw_cap[2], u32 nchain)
5397{
5398 __le16 mcs_map;
5399
5400 /* not allowed in 2.4G band */
5401 if (band->band == IEEE80211_BAND_2GHZ)
5402 return;
5403
5404 band->vht_cap.vht_supported = true;
5405 /* 80MHz is mandatory */
5406 band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
5407 if (bw_cap[band->band] & WLC_BW_160MHZ_BIT) {
5408 band->vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
5409 band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_160;
5410 }
5411 /* all support 256-QAM */
5412 mcs_map = brcmf_get_mcs_map(nchain, IEEE80211_VHT_MCS_SUPPORT_0_9);
5413 band->vht_cap.vht_mcs.rx_mcs_map = mcs_map;
5414 band->vht_cap.vht_mcs.tx_mcs_map = mcs_map;
5415}
5416
5326static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg) 5417static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
5327{ 5418{
5328 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); 5419 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
5329 struct wiphy *wiphy; 5420 struct wiphy *wiphy;
5330 s32 phy_list; 5421 s32 phy_list;
5331 u32 band_list[3]; 5422 u32 band_list[3];
5332 u32 nmode; 5423 u32 nmode = 0;
5424 u32 vhtmode = 0;
5333 u32 bw_cap[2] = { 0, 0 }; 5425 u32 bw_cap[2] = { 0, 0 };
5334 u32 rxchain; 5426 u32 rxchain;
5335 u32 nchain; 5427 u32 nchain;
@@ -5360,14 +5452,16 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
5360 brcmf_dbg(INFO, "BRCMF_C_GET_BANDLIST reported: 0x%08x 0x%08x 0x%08x phy\n", 5452 brcmf_dbg(INFO, "BRCMF_C_GET_BANDLIST reported: 0x%08x 0x%08x 0x%08x phy\n",
5361 band_list[0], band_list[1], band_list[2]); 5453 band_list[0], band_list[1], band_list[2]);
5362 5454
5455 (void)brcmf_fil_iovar_int_get(ifp, "vhtmode", &vhtmode);
5363 err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode); 5456 err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode);
5364 if (err) { 5457 if (err) {
5365 brcmf_err("nmode error (%d)\n", err); 5458 brcmf_err("nmode error (%d)\n", err);
5366 } else { 5459 } else {
5367 brcmf_get_bwcap(ifp, bw_cap); 5460 brcmf_get_bwcap(ifp, bw_cap);
5368 } 5461 }
5369 brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode, 5462 brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n",
5370 bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]); 5463 nmode, vhtmode, bw_cap[IEEE80211_BAND_2GHZ],
5464 bw_cap[IEEE80211_BAND_5GHZ]);
5371 5465
5372 err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain); 5466 err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
5373 if (err) { 5467 if (err) {
@@ -5398,17 +5492,10 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
5398 else 5492 else
5399 continue; 5493 continue;
5400 5494
5401 if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) { 5495 if (nmode)
5402 band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; 5496 brcmf_update_ht_cap(band, bw_cap, nchain);
5403 band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 5497 if (vhtmode)
5404 } 5498 brcmf_update_vht_cap(band, bw_cap, nchain);
5405 band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
5406 band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
5407 band->ht_cap.ht_supported = true;
5408 band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
5409 band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
5410 memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
5411 band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
5412 bands[band->band] = band; 5499 bands[band->band] = band;
5413 } 5500 }
5414 5501
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 8c5fa4e58139..43c71bfaa474 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -897,7 +897,8 @@ static bool brcms_tx_flush_completed(struct brcms_info *wl)
897 return result; 897 return result;
898} 898}
899 899
900static void brcms_ops_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 900static void brcms_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
901 u32 queues, bool drop)
901{ 902{
902 struct brcms_info *wl = hw->priv; 903 struct brcms_info *wl = hw->priv;
903 int ret; 904 int ret;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 9417cb5a2553..af8ba64ace39 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -4870,14 +4870,11 @@ static void brcms_c_detach_module(struct brcms_c_info *wlc)
4870/* 4870/*
4871 * low level detach 4871 * low level detach
4872 */ 4872 */
4873static int brcms_b_detach(struct brcms_c_info *wlc) 4873static void brcms_b_detach(struct brcms_c_info *wlc)
4874{ 4874{
4875 uint i; 4875 uint i;
4876 struct brcms_hw_band *band; 4876 struct brcms_hw_band *band;
4877 struct brcms_hardware *wlc_hw = wlc->hw; 4877 struct brcms_hardware *wlc_hw = wlc->hw;
4878 int callbacks;
4879
4880 callbacks = 0;
4881 4878
4882 brcms_b_detach_dmapio(wlc_hw); 4879 brcms_b_detach_dmapio(wlc_hw);
4883 4880
@@ -4900,9 +4897,6 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
4900 ai_detach(wlc_hw->sih); 4897 ai_detach(wlc_hw->sih);
4901 wlc_hw->sih = NULL; 4898 wlc_hw->sih = NULL;
4902 } 4899 }
4903
4904 return callbacks;
4905
4906} 4900}
4907 4901
4908/* 4902/*
@@ -4917,14 +4911,15 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
4917 */ 4911 */
4918uint brcms_c_detach(struct brcms_c_info *wlc) 4912uint brcms_c_detach(struct brcms_c_info *wlc)
4919{ 4913{
4920 uint callbacks = 0; 4914 uint callbacks;
4921 4915
4922 if (wlc == NULL) 4916 if (wlc == NULL)
4923 return 0; 4917 return 0;
4924 4918
4925 callbacks += brcms_b_detach(wlc); 4919 brcms_b_detach(wlc);
4926 4920
4927 /* delete software timers */ 4921 /* delete software timers */
4922 callbacks = 0;
4928 if (!brcms_c_radio_monitor_stop(wlc)) 4923 if (!brcms_c_radio_monitor_stop(wlc))
4929 callbacks++; 4924 callbacks++;
4930 4925
diff --git a/drivers/net/wireless/brcm80211/brcmutil/d11.c b/drivers/net/wireless/brcm80211/brcmutil/d11.c
index 30e54e2c6c9b..2b2522bdd8eb 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/d11.c
@@ -21,19 +21,46 @@
21#include <brcmu_wifi.h> 21#include <brcmu_wifi.h>
22#include <brcmu_d11.h> 22#include <brcmu_d11.h>
23 23
24static void brcmu_d11n_encchspec(struct brcmu_chan *ch) 24static u16 d11n_sb(enum brcmu_chan_sb sb)
25{ 25{
26 ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK; 26 switch (sb) {
27 case BRCMU_CHAN_SB_NONE:
28 return BRCMU_CHSPEC_D11N_SB_N;
29 case BRCMU_CHAN_SB_L:
30 return BRCMU_CHSPEC_D11N_SB_L;
31 case BRCMU_CHAN_SB_U:
32 return BRCMU_CHSPEC_D11N_SB_U;
33 default:
34 WARN_ON(1);
35 }
36 return 0;
37}
27 38
28 switch (ch->bw) { 39static u16 d11n_bw(enum brcmu_chan_bw bw)
40{
41 switch (bw) {
29 case BRCMU_CHAN_BW_20: 42 case BRCMU_CHAN_BW_20:
30 ch->chspec |= BRCMU_CHSPEC_D11N_BW_20 | BRCMU_CHSPEC_D11N_SB_N; 43 return BRCMU_CHSPEC_D11N_BW_20;
31 break;
32 case BRCMU_CHAN_BW_40: 44 case BRCMU_CHAN_BW_40:
45 return BRCMU_CHSPEC_D11N_BW_40;
33 default: 46 default:
34 WARN_ON_ONCE(1); 47 WARN_ON(1);
35 break;
36 } 48 }
49 return 0;
50}
51
52static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
53{
54 if (ch->bw == BRCMU_CHAN_BW_20)
55 ch->sb = BRCMU_CHAN_SB_NONE;
56
57 ch->chspec = 0;
58 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK,
59 BRCMU_CHSPEC_CH_SHIFT, ch->chnum);
60 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_SB_MASK,
61 0, d11n_sb(ch->sb));
62 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_BW_MASK,
63 0, d11n_bw(ch->bw));
37 64
38 if (ch->chnum <= CH_MAX_2G_CHANNEL) 65 if (ch->chnum <= CH_MAX_2G_CHANNEL)
39 ch->chspec |= BRCMU_CHSPEC_D11N_BND_2G; 66 ch->chspec |= BRCMU_CHSPEC_D11N_BND_2G;
@@ -41,23 +68,34 @@ static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
41 ch->chspec |= BRCMU_CHSPEC_D11N_BND_5G; 68 ch->chspec |= BRCMU_CHSPEC_D11N_BND_5G;
42} 69}
43 70
44static void brcmu_d11ac_encchspec(struct brcmu_chan *ch) 71static u16 d11ac_bw(enum brcmu_chan_bw bw)
45{ 72{
46 ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK; 73 switch (bw) {
47
48 switch (ch->bw) {
49 case BRCMU_CHAN_BW_20: 74 case BRCMU_CHAN_BW_20:
50 ch->chspec |= BRCMU_CHSPEC_D11AC_BW_20; 75 return BRCMU_CHSPEC_D11AC_BW_20;
51 break;
52 case BRCMU_CHAN_BW_40: 76 case BRCMU_CHAN_BW_40:
77 return BRCMU_CHSPEC_D11AC_BW_40;
53 case BRCMU_CHAN_BW_80: 78 case BRCMU_CHAN_BW_80:
54 case BRCMU_CHAN_BW_80P80: 79 return BRCMU_CHSPEC_D11AC_BW_80;
55 case BRCMU_CHAN_BW_160:
56 default: 80 default:
57 WARN_ON_ONCE(1); 81 WARN_ON(1);
58 break;
59 } 82 }
83 return 0;
84}
60 85
86static void brcmu_d11ac_encchspec(struct brcmu_chan *ch)
87{
88 if (ch->bw == BRCMU_CHAN_BW_20 || ch->sb == BRCMU_CHAN_SB_NONE)
89 ch->sb = BRCMU_CHAN_SB_L;
90
91 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK,
92 BRCMU_CHSPEC_CH_SHIFT, ch->chnum);
93 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
94 BRCMU_CHSPEC_D11AC_SB_SHIFT, ch->sb);
95 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_BW_MASK,
96 0, d11ac_bw(ch->bw));
97
98 ch->chspec &= ~BRCMU_CHSPEC_D11AC_BND_MASK;
61 if (ch->chnum <= CH_MAX_2G_CHANNEL) 99 if (ch->chnum <= CH_MAX_2G_CHANNEL)
62 ch->chspec |= BRCMU_CHSPEC_D11AC_BND_2G; 100 ch->chspec |= BRCMU_CHSPEC_D11AC_BND_2G;
63 else 101 else
@@ -73,6 +111,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
73 switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) { 111 switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) {
74 case BRCMU_CHSPEC_D11N_BW_20: 112 case BRCMU_CHSPEC_D11N_BW_20:
75 ch->bw = BRCMU_CHAN_BW_20; 113 ch->bw = BRCMU_CHAN_BW_20;
114 ch->sb = BRCMU_CHAN_SB_NONE;
76 break; 115 break;
77 case BRCMU_CHSPEC_D11N_BW_40: 116 case BRCMU_CHSPEC_D11N_BW_40:
78 ch->bw = BRCMU_CHAN_BW_40; 117 ch->bw = BRCMU_CHAN_BW_40;
@@ -112,6 +151,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
112 switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) { 151 switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) {
113 case BRCMU_CHSPEC_D11AC_BW_20: 152 case BRCMU_CHSPEC_D11AC_BW_20:
114 ch->bw = BRCMU_CHAN_BW_20; 153 ch->bw = BRCMU_CHAN_BW_20;
154 ch->sb = BRCMU_CHAN_SB_NONE;
115 break; 155 break;
116 case BRCMU_CHSPEC_D11AC_BW_40: 156 case BRCMU_CHSPEC_D11AC_BW_40:
117 ch->bw = BRCMU_CHAN_BW_40; 157 ch->bw = BRCMU_CHAN_BW_40;
@@ -128,6 +168,25 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
128 break; 168 break;
129 case BRCMU_CHSPEC_D11AC_BW_80: 169 case BRCMU_CHSPEC_D11AC_BW_80:
130 ch->bw = BRCMU_CHAN_BW_80; 170 ch->bw = BRCMU_CHAN_BW_80;
171 ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
172 BRCMU_CHSPEC_D11AC_SB_SHIFT);
173 switch (ch->sb) {
174 case BRCMU_CHAN_SB_LL:
175 ch->chnum -= CH_30MHZ_APART;
176 break;
177 case BRCMU_CHAN_SB_LU:
178 ch->chnum -= CH_10MHZ_APART;
179 break;
180 case BRCMU_CHAN_SB_UL:
181 ch->chnum += CH_10MHZ_APART;
182 break;
183 case BRCMU_CHAN_SB_UU:
184 ch->chnum += CH_30MHZ_APART;
185 break;
186 default:
187 WARN_ON_ONCE(1);
188 break;
189 }
131 break; 190 break;
132 case BRCMU_CHSPEC_D11AC_BW_8080: 191 case BRCMU_CHSPEC_D11AC_BW_8080:
133 case BRCMU_CHSPEC_D11AC_BW_160: 192 case BRCMU_CHSPEC_D11AC_BW_160:
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
index 8660a2cba098..f9745ea8b3e0 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_d11.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
@@ -108,13 +108,7 @@ enum brcmu_chan_bw {
108}; 108};
109 109
110enum brcmu_chan_sb { 110enum brcmu_chan_sb {
111 BRCMU_CHAN_SB_NONE = 0, 111 BRCMU_CHAN_SB_NONE = -1,
112 BRCMU_CHAN_SB_L,
113 BRCMU_CHAN_SB_U,
114 BRCMU_CHAN_SB_LL,
115 BRCMU_CHAN_SB_LU,
116 BRCMU_CHAN_SB_UL,
117 BRCMU_CHAN_SB_UU,
118 BRCMU_CHAN_SB_LLL, 112 BRCMU_CHAN_SB_LLL,
119 BRCMU_CHAN_SB_LLU, 113 BRCMU_CHAN_SB_LLU,
120 BRCMU_CHAN_SB_LUL, 114 BRCMU_CHAN_SB_LUL,
@@ -123,6 +117,12 @@ enum brcmu_chan_sb {
123 BRCMU_CHAN_SB_ULU, 117 BRCMU_CHAN_SB_ULU,
124 BRCMU_CHAN_SB_UUL, 118 BRCMU_CHAN_SB_UUL,
125 BRCMU_CHAN_SB_UUU, 119 BRCMU_CHAN_SB_UUU,
120 BRCMU_CHAN_SB_L = BRCMU_CHAN_SB_LLL,
121 BRCMU_CHAN_SB_U = BRCMU_CHAN_SB_LLU,
122 BRCMU_CHAN_SB_LL = BRCMU_CHAN_SB_LLL,
123 BRCMU_CHAN_SB_LU = BRCMU_CHAN_SB_LLU,
124 BRCMU_CHAN_SB_UL = BRCMU_CHAN_SB_LUL,
125 BRCMU_CHAN_SB_UU = BRCMU_CHAN_SB_LUU,
126}; 126};
127 127
128struct brcmu_chan { 128struct brcmu_chan {
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index 74419d4bd123..76b5d3a86294 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -29,6 +29,7 @@
29#define CH_UPPER_SB 0x01 29#define CH_UPPER_SB 0x01
30#define CH_LOWER_SB 0x02 30#define CH_LOWER_SB 0x02
31#define CH_EWA_VALID 0x04 31#define CH_EWA_VALID 0x04
32#define CH_30MHZ_APART 6
32#define CH_20MHZ_APART 4 33#define CH_20MHZ_APART 4
33#define CH_10MHZ_APART 2 34#define CH_10MHZ_APART 2
34#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */ 35#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 103f7bce8932..cd0cad7f7759 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -936,7 +936,8 @@ static int __cw1200_flush(struct cw1200_common *priv, bool drop)
936 return ret; 936 return ret;
937} 937}
938 938
939void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 939void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
940 u32 queues, bool drop)
940{ 941{
941 struct cw1200_common *priv = hw->priv; 942 struct cw1200_common *priv = hw->priv;
942 943
diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/cw1200/sta.h
index 35babb62cc6a..b7e386b7662b 100644
--- a/drivers/net/wireless/cw1200/sta.h
+++ b/drivers/net/wireless/cw1200/sta.h
@@ -40,7 +40,8 @@ int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
40 40
41int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value); 41int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
42 42
43void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop); 43void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
44 u32 queues, bool drop);
44 45
45u64 cw1200_prepare_multicast(struct ieee80211_hw *hw, 46u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
46 struct netdev_hw_addr_list *mc_list); 47 struct netdev_hw_addr_list *mc_list);
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 67db34e56d7e..52919ad42726 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -882,7 +882,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
882 dev->mtu = local->mtu; 882 dev->mtu = local->mtu;
883 883
884 884
885 SET_ETHTOOL_OPS(dev, &prism2_ethtool_ops); 885 dev->ethtool_ops = &prism2_ethtool_ops;
886 886
887} 887}
888 888
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index d37a6fd90d40..b598e2803500 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -573,7 +573,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
573 rx_status.flag |= RX_FLAG_SHORTPRE; 573 rx_status.flag |= RX_FLAG_SHORTPRE;
574 574
575 if ((unlikely(rx_stats->phy_count > 20))) { 575 if ((unlikely(rx_stats->phy_count > 20))) {
576 D_DROP("dsp size out of range [0,20]: %d/n", 576 D_DROP("dsp size out of range [0,20]: %d\n",
577 rx_stats->phy_count); 577 rx_stats->phy_count);
578 return; 578 return;
579 } 579 }
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 888ad5c74639..c159c05db6ef 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -670,7 +670,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
670 } 670 }
671 671
672 if ((unlikely(phy_res->cfg_phy_cnt > 20))) { 672 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
673 D_DROP("dsp size out of range [0,20]: %d/n", 673 D_DROP("dsp size out of range [0,20]: %d\n",
674 phy_res->cfg_phy_cnt); 674 phy_res->cfg_phy_cnt);
675 return; 675 return;
676 } 676 }
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 4f42174d9994..ecc674627e6e 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4755,7 +4755,8 @@ out:
4755} 4755}
4756EXPORT_SYMBOL(il_mac_change_interface); 4756EXPORT_SYMBOL(il_mac_change_interface);
4757 4757
4758void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 4758void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4759 u32 queues, bool drop)
4759{ 4760{
4760 struct il_priv *il = hw->priv; 4761 struct il_priv *il = hw->priv;
4761 unsigned long timeout = jiffies + msecs_to_jiffies(500); 4762 unsigned long timeout = jiffies + msecs_to_jiffies(500);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index dfb13c70efe8..ea5c0f863c4e 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1723,7 +1723,8 @@ void il_mac_remove_interface(struct ieee80211_hw *hw,
1723 struct ieee80211_vif *vif); 1723 struct ieee80211_vif *vif);
1724int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1724int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1725 enum nl80211_iftype newtype, bool newp2p); 1725 enum nl80211_iftype newtype, bool newp2p);
1726void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop); 1726void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1727 u32 queues, bool drop);
1727int il_alloc_txq_mem(struct il_priv *il); 1728int il_alloc_txq_mem(struct il_priv *il);
1728void il_free_txq_mem(struct il_priv *il); 1729void il_free_txq_mem(struct il_priv *il);
1729 1730
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 74b3b4de7bb7..7fd50428b934 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -2,10 +2,6 @@ config IWLWIFI
2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " 2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
3 depends on PCI && MAC80211 && HAS_IOMEM 3 depends on PCI && MAC80211 && HAS_IOMEM
4 select FW_LOADER 4 select FW_LOADER
5 select NEW_LEDS
6 select LEDS_CLASS
7 select LEDS_TRIGGERS
8 select MAC80211_LEDS
9 ---help--- 5 ---help---
10 Select to build the driver supporting the: 6 Select to build the driver supporting the:
11 7
@@ -43,6 +39,14 @@ config IWLWIFI
43 say M here and read <file:Documentation/kbuild/modules.txt>. The 39 say M here and read <file:Documentation/kbuild/modules.txt>. The
44 module will be called iwlwifi. 40 module will be called iwlwifi.
45 41
42config IWLWIFI_LEDS
43 bool
44 depends on IWLWIFI
45 depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
46 select LEDS_TRIGGERS
47 select MAC80211_LEDS
48 default y
49
46config IWLDVM 50config IWLDVM
47 tristate "Intel Wireless WiFi DVM Firmware support" 51 tristate "Intel Wireless WiFi DVM Firmware support"
48 depends on IWLWIFI 52 depends on IWLWIFI
@@ -124,7 +128,6 @@ config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
124 Enable use of experimental ucode for testing and debugging. 128 Enable use of experimental ucode for testing and debugging.
125 129
126config IWLWIFI_DEVICE_TRACING 130config IWLWIFI_DEVICE_TRACING
127
128 bool "iwlwifi device access tracing" 131 bool "iwlwifi device access tracing"
129 depends on IWLWIFI 132 depends on IWLWIFI
130 depends on EVENT_TRACING 133 depends on EVENT_TRACING
diff --git a/drivers/net/wireless/iwlwifi/dvm/Makefile b/drivers/net/wireless/iwlwifi/dvm/Makefile
index dce7ab2e0c4b..4d19685f31c3 100644
--- a/drivers/net/wireless/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/dvm/Makefile
@@ -4,9 +4,10 @@ iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o
4iwldvm-objs += lib.o calib.o tt.o sta.o rx.o 4iwldvm-objs += lib.o calib.o tt.o sta.o rx.o
5 5
6iwldvm-objs += power.o 6iwldvm-objs += power.o
7iwldvm-objs += scan.o led.o 7iwldvm-objs += scan.o
8iwldvm-objs += rxon.o devices.o 8iwldvm-objs += rxon.o devices.o
9 9
10iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
10iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o 11iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
11 12
12ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ 13ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index be1086c87157..20e6aa910700 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -94,7 +94,6 @@ int iwl_send_calib_results(struct iwl_priv *priv)
94{ 94{
95 struct iwl_host_cmd hcmd = { 95 struct iwl_host_cmd hcmd = {
96 .id = REPLY_PHY_CALIBRATION_CMD, 96 .id = REPLY_PHY_CALIBRATION_CMD,
97 .flags = CMD_SYNC,
98 }; 97 };
99 struct iwl_calib_result *res; 98 struct iwl_calib_result *res;
100 99
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index d2fe2596d54e..0ffb6ff1a255 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -1481,7 +1481,7 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
1481 1481
1482 /* make request to uCode to retrieve statistics information */ 1482 /* make request to uCode to retrieve statistics information */
1483 mutex_lock(&priv->mutex); 1483 mutex_lock(&priv->mutex);
1484 ret = iwl_send_statistics_request(priv, CMD_SYNC, false); 1484 ret = iwl_send_statistics_request(priv, 0, false);
1485 mutex_unlock(&priv->mutex); 1485 mutex_unlock(&priv->mutex);
1486 1486
1487 if (ret) 1487 if (ret)
@@ -1868,7 +1868,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
1868 1868
1869 /* make request to uCode to retrieve statistics information */ 1869 /* make request to uCode to retrieve statistics information */
1870 mutex_lock(&priv->mutex); 1870 mutex_lock(&priv->mutex);
1871 iwl_send_statistics_request(priv, CMD_SYNC, true); 1871 iwl_send_statistics_request(priv, 0, true);
1872 mutex_unlock(&priv->mutex); 1872 mutex_unlock(&priv->mutex);
1873 1873
1874 return count; 1874 return count;
@@ -2188,7 +2188,6 @@ static int iwl_cmd_echo_test(struct iwl_priv *priv)
2188 struct iwl_host_cmd cmd = { 2188 struct iwl_host_cmd cmd = {
2189 .id = REPLY_ECHO, 2189 .id = REPLY_ECHO,
2190 .len = { 0 }, 2190 .len = { 0 },
2191 .flags = CMD_SYNC,
2192 }; 2191 };
2193 2192
2194 ret = iwl_dvm_send_cmd(priv, &cmd); 2193 ret = iwl_dvm_send_cmd(priv, &cmd);
@@ -2320,7 +2319,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
2320 mutex_lock(&priv->mutex); 2319 mutex_lock(&priv->mutex);
2321 2320
2322 /* take the return value to make compiler happy - it will fail anyway */ 2321 /* take the return value to make compiler happy - it will fail anyway */
2323 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, CMD_SYNC, 0, NULL); 2322 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, 0, 0, NULL);
2324 2323
2325 mutex_unlock(&priv->mutex); 2324 mutex_unlock(&priv->mutex);
2326 2325
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 3441f70d0ff9..a6f22c32a279 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -888,9 +888,11 @@ struct iwl_priv {
888 888
889 struct iwl_event_log event_log; 889 struct iwl_event_log event_log;
890 890
891#ifdef CONFIG_IWLWIFI_LEDS
891 struct led_classdev led; 892 struct led_classdev led;
892 unsigned long blink_on, blink_off; 893 unsigned long blink_on, blink_off;
893 bool led_registered; 894 bool led_registered;
895#endif
894 896
895 /* WoWLAN GTK rekey data */ 897 /* WoWLAN GTK rekey data */
896 u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN]; 898 u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 758c54eeb206..34b41e5f7cfc 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -417,7 +417,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
417 struct iwl_host_cmd hcmd = { 417 struct iwl_host_cmd hcmd = {
418 .id = REPLY_CHANNEL_SWITCH, 418 .id = REPLY_CHANNEL_SWITCH,
419 .len = { sizeof(cmd), }, 419 .len = { sizeof(cmd), },
420 .flags = CMD_SYNC,
421 .data = { &cmd, }, 420 .data = { &cmd, },
422 }; 421 };
423 422
@@ -579,7 +578,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
579 struct iwl_host_cmd hcmd = { 578 struct iwl_host_cmd hcmd = {
580 .id = REPLY_CHANNEL_SWITCH, 579 .id = REPLY_CHANNEL_SWITCH,
581 .len = { sizeof(*cmd), }, 580 .len = { sizeof(*cmd), },
582 .flags = CMD_SYNC,
583 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 581 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
584 }; 582 };
585 int err; 583 int err;
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
index 6a0817d9c4fa..1c6b2252d0f2 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.h
+++ b/drivers/net/wireless/iwlwifi/dvm/led.h
@@ -36,8 +36,20 @@ struct iwl_priv;
36#define IWL_LED_ACTIVITY (0<<1) 36#define IWL_LED_ACTIVITY (0<<1)
37#define IWL_LED_LINK (1<<1) 37#define IWL_LED_LINK (1<<1)
38 38
39#ifdef CONFIG_IWLWIFI_LEDS
39void iwlagn_led_enable(struct iwl_priv *priv); 40void iwlagn_led_enable(struct iwl_priv *priv);
40void iwl_leds_init(struct iwl_priv *priv); 41void iwl_leds_init(struct iwl_priv *priv);
41void iwl_leds_exit(struct iwl_priv *priv); 42void iwl_leds_exit(struct iwl_priv *priv);
43#else
44static inline void iwlagn_led_enable(struct iwl_priv *priv)
45{
46}
47static inline void iwl_leds_init(struct iwl_priv *priv)
48{
49}
50static inline void iwl_leds_exit(struct iwl_priv *priv)
51{
52}
53#endif
42 54
43#endif /* __iwl_leds_h__ */ 55#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 576f7ee38ca5..2191621d69c1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -81,7 +81,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
81 else 81 else
82 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; 82 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
83 83
84 return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, CMD_SYNC, 84 return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, 0,
85 sizeof(tx_power_cmd), &tx_power_cmd); 85 sizeof(tx_power_cmd), &tx_power_cmd);
86} 86}
87 87
@@ -141,7 +141,6 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk)
141 struct iwl_host_cmd cmd = { 141 struct iwl_host_cmd cmd = {
142 .id = REPLY_TXFIFO_FLUSH, 142 .id = REPLY_TXFIFO_FLUSH,
143 .len = { sizeof(struct iwl_txfifo_flush_cmd), }, 143 .len = { sizeof(struct iwl_txfifo_flush_cmd), },
144 .flags = CMD_SYNC,
145 .data = { &flush_cmd, }, 144 .data = { &flush_cmd, },
146 }; 145 };
147 146
@@ -180,7 +179,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
180 goto done; 179 goto done;
181 } 180 }
182 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n"); 181 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
183 iwl_trans_wait_tx_queue_empty(priv->trans); 182 iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
184done: 183done:
185 ieee80211_wake_queues(priv->hw); 184 ieee80211_wake_queues(priv->hw);
186 mutex_unlock(&priv->mutex); 185 mutex_unlock(&priv->mutex);
@@ -333,12 +332,12 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
333 memcpy(&bt_cmd_v2.basic, &basic, 332 memcpy(&bt_cmd_v2.basic, &basic,
334 sizeof(basic)); 333 sizeof(basic));
335 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, 334 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
336 CMD_SYNC, sizeof(bt_cmd_v2), &bt_cmd_v2); 335 0, sizeof(bt_cmd_v2), &bt_cmd_v2);
337 } else { 336 } else {
338 memcpy(&bt_cmd_v1.basic, &basic, 337 memcpy(&bt_cmd_v1.basic, &basic,
339 sizeof(basic)); 338 sizeof(basic));
340 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, 339 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
341 CMD_SYNC, sizeof(bt_cmd_v1), &bt_cmd_v1); 340 0, sizeof(bt_cmd_v1), &bt_cmd_v1);
342 } 341 }
343 if (ret) 342 if (ret)
344 IWL_ERR(priv, "failed to send BT Coex Config\n"); 343 IWL_ERR(priv, "failed to send BT Coex Config\n");
@@ -1044,7 +1043,6 @@ int iwlagn_send_patterns(struct iwl_priv *priv,
1044 struct iwl_host_cmd cmd = { 1043 struct iwl_host_cmd cmd = {
1045 .id = REPLY_WOWLAN_PATTERNS, 1044 .id = REPLY_WOWLAN_PATTERNS,
1046 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 1045 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1047 .flags = CMD_SYNC,
1048 }; 1046 };
1049 int i, err; 1047 int i, err;
1050 1048
@@ -1201,7 +1199,6 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
1201 if (key_data.use_rsc_tsc) { 1199 if (key_data.use_rsc_tsc) {
1202 struct iwl_host_cmd rsc_tsc_cmd = { 1200 struct iwl_host_cmd rsc_tsc_cmd = {
1203 .id = REPLY_WOWLAN_TSC_RSC_PARAMS, 1201 .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
1204 .flags = CMD_SYNC,
1205 .data[0] = key_data.rsc_tsc, 1202 .data[0] = key_data.rsc_tsc,
1206 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 1203 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1207 .len[0] = sizeof(*key_data.rsc_tsc), 1204 .len[0] = sizeof(*key_data.rsc_tsc),
@@ -1215,7 +1212,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
1215 if (key_data.use_tkip) { 1212 if (key_data.use_tkip) {
1216 ret = iwl_dvm_send_cmd_pdu(priv, 1213 ret = iwl_dvm_send_cmd_pdu(priv,
1217 REPLY_WOWLAN_TKIP_PARAMS, 1214 REPLY_WOWLAN_TKIP_PARAMS,
1218 CMD_SYNC, sizeof(tkip_cmd), 1215 0, sizeof(tkip_cmd),
1219 &tkip_cmd); 1216 &tkip_cmd);
1220 if (ret) 1217 if (ret)
1221 goto out; 1218 goto out;
@@ -1231,20 +1228,20 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
1231 1228
1232 ret = iwl_dvm_send_cmd_pdu(priv, 1229 ret = iwl_dvm_send_cmd_pdu(priv,
1233 REPLY_WOWLAN_KEK_KCK_MATERIAL, 1230 REPLY_WOWLAN_KEK_KCK_MATERIAL,
1234 CMD_SYNC, sizeof(kek_kck_cmd), 1231 0, sizeof(kek_kck_cmd),
1235 &kek_kck_cmd); 1232 &kek_kck_cmd);
1236 if (ret) 1233 if (ret)
1237 goto out; 1234 goto out;
1238 } 1235 }
1239 } 1236 }
1240 1237
1241 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, CMD_SYNC, 1238 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, 0,
1242 sizeof(d3_cfg_cmd), &d3_cfg_cmd); 1239 sizeof(d3_cfg_cmd), &d3_cfg_cmd);
1243 if (ret) 1240 if (ret)
1244 goto out; 1241 goto out;
1245 1242
1246 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER, 1243 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER,
1247 CMD_SYNC, sizeof(wakeup_filter_cmd), 1244 0, sizeof(wakeup_filter_cmd),
1248 &wakeup_filter_cmd); 1245 &wakeup_filter_cmd);
1249 if (ret) 1246 if (ret)
1250 goto out; 1247 goto out;
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index dd55c9cf7ba8..29af7b51e370 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1091,7 +1091,8 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
1091 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 1091 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
1092} 1092}
1093 1093
1094static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 1094static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1095 u32 queues, bool drop)
1095{ 1096{
1096 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1097 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1097 1098
@@ -1119,7 +1120,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1119 } 1120 }
1120 } 1121 }
1121 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); 1122 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
1122 iwl_trans_wait_tx_queue_empty(priv->trans); 1123 iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
1123done: 1124done:
1124 mutex_unlock(&priv->mutex); 1125 mutex_unlock(&priv->mutex);
1125 IWL_DEBUG_MAC80211(priv, "leave\n"); 1126 IWL_DEBUG_MAC80211(priv, "leave\n");
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 6a6df71af1d7..0b7f46f0b079 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -128,7 +128,6 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
128 struct iwl_tx_beacon_cmd *tx_beacon_cmd; 128 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
129 struct iwl_host_cmd cmd = { 129 struct iwl_host_cmd cmd = {
130 .id = REPLY_TX_BEACON, 130 .id = REPLY_TX_BEACON,
131 .flags = CMD_SYNC,
132 }; 131 };
133 struct ieee80211_tx_info *info; 132 struct ieee80211_tx_info *info;
134 u32 frame_size; 133 u32 frame_size;
@@ -311,8 +310,7 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
311 sizeof(struct iwl_statistics_cmd), 310 sizeof(struct iwl_statistics_cmd),
312 &statistics_cmd); 311 &statistics_cmd);
313 else 312 else
314 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, 313 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, 0,
315 CMD_SYNC,
316 sizeof(struct iwl_statistics_cmd), 314 sizeof(struct iwl_statistics_cmd),
317 &statistics_cmd); 315 &statistics_cmd);
318} 316}
@@ -622,7 +620,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
622 620
623 ret = iwl_dvm_send_cmd_pdu(priv, 621 ret = iwl_dvm_send_cmd_pdu(priv,
624 REPLY_CT_KILL_CONFIG_CMD, 622 REPLY_CT_KILL_CONFIG_CMD,
625 CMD_SYNC, sizeof(adv_cmd), &adv_cmd); 623 0, sizeof(adv_cmd), &adv_cmd);
626 if (ret) 624 if (ret)
627 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); 625 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
628 else 626 else
@@ -637,7 +635,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
637 635
638 ret = iwl_dvm_send_cmd_pdu(priv, 636 ret = iwl_dvm_send_cmd_pdu(priv,
639 REPLY_CT_KILL_CONFIG_CMD, 637 REPLY_CT_KILL_CONFIG_CMD,
640 CMD_SYNC, sizeof(cmd), &cmd); 638 0, sizeof(cmd), &cmd);
641 if (ret) 639 if (ret)
642 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); 640 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
643 else 641 else
@@ -673,9 +671,7 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
673 671
674 if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) { 672 if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) {
675 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant); 673 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
676 return iwl_dvm_send_cmd_pdu(priv, 674 return iwl_dvm_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, 0,
677 TX_ANT_CONFIGURATION_CMD,
678 CMD_SYNC,
679 sizeof(struct iwl_tx_ant_config_cmd), 675 sizeof(struct iwl_tx_ant_config_cmd),
680 &tx_ant_cmd); 676 &tx_ant_cmd);
681 } else { 677 } else {
@@ -703,7 +699,7 @@ static void iwl_send_bt_config(struct iwl_priv *priv)
703 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); 699 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
704 700
705 if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, 701 if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
706 CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd)) 702 0, sizeof(struct iwl_bt_cmd), &bt_cmd))
707 IWL_ERR(priv, "failed to send BT Coex Config\n"); 703 IWL_ERR(priv, "failed to send BT Coex Config\n");
708} 704}
709 705
@@ -987,7 +983,7 @@ static void iwl_bg_restart(struct work_struct *data)
987 ieee80211_restart_hw(priv->hw); 983 ieee80211_restart_hw(priv->hw);
988 else 984 else
989 IWL_ERR(priv, 985 IWL_ERR(priv,
990 "Cannot request restart before registrating with mac80211"); 986 "Cannot request restart before registrating with mac80211\n");
991 } else { 987 } else {
992 WARN_ON(1); 988 WARN_ON(1);
993 } 989 }
@@ -1127,7 +1123,6 @@ static void iwl_option_config(struct iwl_priv *priv)
1127static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) 1123static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
1128{ 1124{
1129 struct iwl_nvm_data *data = priv->nvm_data; 1125 struct iwl_nvm_data *data = priv->nvm_data;
1130 char *debug_msg;
1131 1126
1132 if (data->sku_cap_11n_enable && 1127 if (data->sku_cap_11n_enable &&
1133 !priv->cfg->ht_params) { 1128 !priv->cfg->ht_params) {
@@ -1141,8 +1136,8 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
1141 return -EINVAL; 1136 return -EINVAL;
1142 } 1137 }
1143 1138
1144 debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n"; 1139 IWL_DEBUG_INFO(priv,
1145 IWL_DEBUG_INFO(priv, debug_msg, 1140 "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n",
1146 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled", 1141 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
1147 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled", 1142 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
1148 data->sku_cap_11n_enable ? "" : "NOT", "enabled"); 1143 data->sku_cap_11n_enable ? "" : "NOT", "enabled");
@@ -1350,7 +1345,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1350 iwl_set_hw_params(priv); 1345 iwl_set_hw_params(priv);
1351 1346
1352 if (!(priv->nvm_data->sku_cap_ipan_enable)) { 1347 if (!(priv->nvm_data->sku_cap_ipan_enable)) {
1353 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN"); 1348 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN\n");
1354 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN; 1349 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
1355 /* 1350 /*
1356 * if not PAN, then don't support P2P -- might be a uCode 1351 * if not PAN, then don't support P2P -- might be a uCode
@@ -2019,10 +2014,10 @@ void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
2019 2014
2020 for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) { 2015 for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) {
2021 if (!test_bit(mq, &priv->transport_queue_stop)) { 2016 if (!test_bit(mq, &priv->transport_queue_stop)) {
2022 IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d", mq); 2017 IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d\n", mq);
2023 ieee80211_wake_queue(priv->hw, mq); 2018 ieee80211_wake_queue(priv->hw, mq);
2024 } else { 2019 } else {
2025 IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d", mq); 2020 IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d\n", mq);
2026 } 2021 }
2027 } 2022 }
2028 2023
@@ -2053,6 +2048,17 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
2053 return false; 2048 return false;
2054} 2049}
2055 2050
2051static void iwl_napi_add(struct iwl_op_mode *op_mode,
2052 struct napi_struct *napi,
2053 struct net_device *napi_dev,
2054 int (*poll)(struct napi_struct *, int),
2055 int weight)
2056{
2057 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2058
2059 ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
2060}
2061
2056static const struct iwl_op_mode_ops iwl_dvm_ops = { 2062static const struct iwl_op_mode_ops iwl_dvm_ops = {
2057 .start = iwl_op_mode_dvm_start, 2063 .start = iwl_op_mode_dvm_start,
2058 .stop = iwl_op_mode_dvm_stop, 2064 .stop = iwl_op_mode_dvm_stop,
@@ -2065,6 +2071,7 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
2065 .cmd_queue_full = iwl_cmd_queue_full, 2071 .cmd_queue_full = iwl_cmd_queue_full,
2066 .nic_config = iwl_nic_config, 2072 .nic_config = iwl_nic_config,
2067 .wimax_active = iwl_wimax_active, 2073 .wimax_active = iwl_wimax_active,
2074 .napi_add = iwl_napi_add,
2068}; 2075};
2069 2076
2070/***************************************************************************** 2077/*****************************************************************************
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index b4e61417013a..f2c1439566b5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -278,7 +278,7 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
278 le32_to_cpu(cmd->sleep_interval[3]), 278 le32_to_cpu(cmd->sleep_interval[3]),
279 le32_to_cpu(cmd->sleep_interval[4])); 279 le32_to_cpu(cmd->sleep_interval[4]));
280 280
281 return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, CMD_SYNC, 281 return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, 0,
282 sizeof(struct iwl_powertable_cmd), cmd); 282 sizeof(struct iwl_powertable_cmd), cmd);
283} 283}
284 284
@@ -361,7 +361,7 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
361 361
362 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)); 362 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
363 } else 363 } else
364 IWL_ERR(priv, "set power fail, ret = %d", ret); 364 IWL_ERR(priv, "set power fail, ret = %d\n", ret);
365 365
366 return ret; 366 return ret;
367} 367}
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index aa773a2da4ab..32b78a66536d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -1453,7 +1453,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1453 tbl->action = IWL_LEGACY_SWITCH_SISO; 1453 tbl->action = IWL_LEGACY_SWITCH_SISO;
1454 break; 1454 break;
1455 default: 1455 default:
1456 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load); 1456 IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
1457 break; 1457 break;
1458 } 1458 }
1459 1459
@@ -1628,7 +1628,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1628 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1628 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1629 break; 1629 break;
1630 default: 1630 default:
1631 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load); 1631 IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
1632 break; 1632 break;
1633 } 1633 }
1634 1634
@@ -1799,7 +1799,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
1799 tbl->action = IWL_MIMO2_SWITCH_SISO_A; 1799 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1800 break; 1800 break;
1801 default: 1801 default:
1802 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load); 1802 IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
1803 break; 1803 break;
1804 } 1804 }
1805 1805
@@ -1969,7 +1969,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
1969 tbl->action = IWL_MIMO3_SWITCH_SISO_A; 1969 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1970 break; 1970 break;
1971 default: 1971 default:
1972 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load); 1972 IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
1973 break; 1973 break;
1974 } 1974 }
1975 1975
@@ -2709,7 +2709,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2709 rs_set_expected_tpt_table(lq_sta, tbl); 2709 rs_set_expected_tpt_table(lq_sta, tbl);
2710 rs_fill_link_cmd(NULL, lq_sta, rate); 2710 rs_fill_link_cmd(NULL, lq_sta, rate);
2711 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq; 2711 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2712 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true); 2712 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, 0, true);
2713} 2713}
2714 2714
2715static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, 2715static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index cd8377346aff..debec963c610 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -786,7 +786,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
786 786
787 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 787 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
788 788
789 ieee80211_rx_ni(priv->hw, skb); 789 ieee80211_rx(priv->hw, skb);
790} 790}
791 791
792static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) 792static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 503a81e58185..ed50de6362ed 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -104,7 +104,7 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
104 104
105 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 105 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
106 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 106 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
107 CMD_SYNC, sizeof(*send), send); 107 0, sizeof(*send), send);
108 108
109 send->filter_flags = old_filter; 109 send->filter_flags = old_filter;
110 110
@@ -134,7 +134,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
134 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 134 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
135 send->dev_type = RXON_DEV_TYPE_P2P; 135 send->dev_type = RXON_DEV_TYPE_P2P;
136 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 136 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
137 CMD_SYNC, sizeof(*send), send); 137 0, sizeof(*send), send);
138 138
139 send->filter_flags = old_filter; 139 send->filter_flags = old_filter;
140 send->dev_type = old_dev_type; 140 send->dev_type = old_dev_type;
@@ -160,7 +160,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
160 int ret; 160 int ret;
161 161
162 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 162 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
163 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC, 163 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
164 sizeof(*send), send); 164 sizeof(*send), send);
165 165
166 send->filter_flags = old_filter; 166 send->filter_flags = old_filter;
@@ -189,7 +189,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
189 ctx->qos_data.qos_active, 189 ctx->qos_data.qos_active,
190 ctx->qos_data.def_qos_parm.qos_flags); 190 ctx->qos_data.def_qos_parm.qos_flags);
191 191
192 ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, CMD_SYNC, 192 ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0,
193 sizeof(struct iwl_qosparam_cmd), 193 sizeof(struct iwl_qosparam_cmd),
194 &ctx->qos_data.def_qos_parm); 194 &ctx->qos_data.def_qos_parm);
195 if (ret) 195 if (ret)
@@ -353,7 +353,7 @@ static int iwl_send_rxon_timing(struct iwl_priv *priv,
353 le16_to_cpu(ctx->timing.atim_window)); 353 le16_to_cpu(ctx->timing.atim_window));
354 354
355 return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd, 355 return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
356 CMD_SYNC, sizeof(ctx->timing), &ctx->timing); 356 0, sizeof(ctx->timing), &ctx->timing);
357} 357}
358 358
359static int iwlagn_rxon_disconn(struct iwl_priv *priv, 359static int iwlagn_rxon_disconn(struct iwl_priv *priv,
@@ -495,7 +495,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
495 * Associated RXON doesn't clear the station table in uCode, 495 * Associated RXON doesn't clear the station table in uCode,
496 * so we don't need to restore stations etc. after this. 496 * so we don't need to restore stations etc. after this.
497 */ 497 */
498 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC, 498 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
499 sizeof(struct iwl_rxon_cmd), &ctx->staging); 499 sizeof(struct iwl_rxon_cmd), &ctx->staging);
500 if (ret) { 500 if (ret) {
501 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 501 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
@@ -610,7 +610,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
610 cmd.slots[0].width = cpu_to_le16(slot0); 610 cmd.slots[0].width = cpu_to_le16(slot0);
611 cmd.slots[1].width = cpu_to_le16(slot1); 611 cmd.slots[1].width = cpu_to_le16(slot1);
612 612
613 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, CMD_SYNC, 613 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, 0,
614 sizeof(cmd), &cmd); 614 sizeof(cmd), &cmd);
615 if (ret) 615 if (ret)
616 IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret); 616 IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
@@ -823,7 +823,7 @@ static int iwl_check_rxon_cmd(struct iwl_priv *priv,
823 823
824 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) 824 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
825 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { 825 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
826 IWL_WARN(priv, "CCK and auto detect"); 826 IWL_WARN(priv, "CCK and auto detect\n");
827 errors |= BIT(8); 827 errors |= BIT(8);
828 } 828 }
829 829
@@ -1395,7 +1395,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
1395 priv->phy_calib_chain_noise_reset_cmd); 1395 priv->phy_calib_chain_noise_reset_cmd);
1396 ret = iwl_dvm_send_cmd_pdu(priv, 1396 ret = iwl_dvm_send_cmd_pdu(priv,
1397 REPLY_PHY_CALIBRATION_CMD, 1397 REPLY_PHY_CALIBRATION_CMD,
1398 CMD_SYNC, sizeof(cmd), &cmd); 1398 0, sizeof(cmd), &cmd);
1399 if (ret) 1399 if (ret)
1400 IWL_ERR(priv, 1400 IWL_ERR(priv,
1401 "Could not send REPLY_PHY_CALIBRATION_CMD\n"); 1401 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index be98b913ed58..43bef901e8f9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -59,7 +59,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
59 int ret; 59 int ret;
60 struct iwl_host_cmd cmd = { 60 struct iwl_host_cmd cmd = {
61 .id = REPLY_SCAN_ABORT_CMD, 61 .id = REPLY_SCAN_ABORT_CMD,
62 .flags = CMD_SYNC | CMD_WANT_SKB, 62 .flags = CMD_WANT_SKB,
63 }; 63 };
64 __le32 *status; 64 __le32 *status;
65 65
@@ -639,7 +639,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
639 struct iwl_host_cmd cmd = { 639 struct iwl_host_cmd cmd = {
640 .id = REPLY_SCAN_CMD, 640 .id = REPLY_SCAN_CMD,
641 .len = { sizeof(struct iwl_scan_cmd), }, 641 .len = { sizeof(struct iwl_scan_cmd), },
642 .flags = CMD_SYNC,
643 }; 642 };
644 struct iwl_scan_cmd *scan; 643 struct iwl_scan_cmd *scan;
645 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 644 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index 9cdd91cdf661..6ec86adbe4a1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -39,7 +39,7 @@ static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
39 lockdep_assert_held(&priv->sta_lock); 39 lockdep_assert_held(&priv->sta_lock);
40 40
41 if (sta_id >= IWLAGN_STATION_COUNT) { 41 if (sta_id >= IWLAGN_STATION_COUNT) {
42 IWL_ERR(priv, "invalid sta_id %u", sta_id); 42 IWL_ERR(priv, "invalid sta_id %u\n", sta_id);
43 return -EINVAL; 43 return -EINVAL;
44 } 44 }
45 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) 45 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
@@ -165,7 +165,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
165 iwl_free_resp(&cmd); 165 iwl_free_resp(&cmd);
166 166
167 if (cmd.handler_status) 167 if (cmd.handler_status)
168 IWL_ERR(priv, "%s - error in the CMD response %d", __func__, 168 IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__,
169 cmd.handler_status); 169 cmd.handler_status);
170 170
171 return cmd.handler_status; 171 return cmd.handler_status;
@@ -261,7 +261,7 @@ int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
261 cmd.station_flags = flags; 261 cmd.station_flags = flags;
262 cmd.sta.sta_id = sta_id; 262 cmd.sta.sta_id = sta_id;
263 263
264 return iwl_send_add_sta(priv, &cmd, CMD_SYNC); 264 return iwl_send_add_sta(priv, &cmd, 0);
265} 265}
266 266
267static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, 267static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
@@ -413,7 +413,7 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
413 spin_unlock_bh(&priv->sta_lock); 413 spin_unlock_bh(&priv->sta_lock);
414 414
415 /* Add station to device's station table */ 415 /* Add station to device's station table */
416 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 416 ret = iwl_send_add_sta(priv, &sta_cmd, 0);
417 if (ret) { 417 if (ret) {
418 spin_lock_bh(&priv->sta_lock); 418 spin_lock_bh(&priv->sta_lock);
419 IWL_ERR(priv, "Adding station %pM failed.\n", 419 IWL_ERR(priv, "Adding station %pM failed.\n",
@@ -456,7 +456,6 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
456 struct iwl_host_cmd cmd = { 456 struct iwl_host_cmd cmd = {
457 .id = REPLY_REMOVE_STA, 457 .id = REPLY_REMOVE_STA,
458 .len = { sizeof(struct iwl_rem_sta_cmd), }, 458 .len = { sizeof(struct iwl_rem_sta_cmd), },
459 .flags = CMD_SYNC,
460 .data = { &rm_sta_cmd, }, 459 .data = { &rm_sta_cmd, },
461 }; 460 };
462 461
@@ -740,7 +739,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
740 send_lq = true; 739 send_lq = true;
741 } 740 }
742 spin_unlock_bh(&priv->sta_lock); 741 spin_unlock_bh(&priv->sta_lock);
743 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 742 ret = iwl_send_add_sta(priv, &sta_cmd, 0);
744 if (ret) { 743 if (ret) {
745 spin_lock_bh(&priv->sta_lock); 744 spin_lock_bh(&priv->sta_lock);
746 IWL_ERR(priv, "Adding station %pM failed.\n", 745 IWL_ERR(priv, "Adding station %pM failed.\n",
@@ -756,8 +755,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
756 * current LQ command 755 * current LQ command
757 */ 756 */
758 if (send_lq) 757 if (send_lq)
759 iwl_send_lq_cmd(priv, ctx, &lq, 758 iwl_send_lq_cmd(priv, ctx, &lq, 0, true);
760 CMD_SYNC, true);
761 spin_lock_bh(&priv->sta_lock); 759 spin_lock_bh(&priv->sta_lock);
762 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; 760 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
763 } 761 }
@@ -968,7 +966,7 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv,
968 return -ENOMEM; 966 return -ENOMEM;
969 } 967 }
970 968
971 ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true); 969 ret = iwl_send_lq_cmd(priv, ctx, link_cmd, 0, true);
972 if (ret) 970 if (ret)
973 IWL_ERR(priv, "Link quality command failed (%d)\n", ret); 971 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
974 972
@@ -999,7 +997,6 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
999 struct iwl_host_cmd cmd = { 997 struct iwl_host_cmd cmd = {
1000 .id = ctx->wep_key_cmd, 998 .id = ctx->wep_key_cmd,
1001 .data = { wep_cmd, }, 999 .data = { wep_cmd, },
1002 .flags = CMD_SYNC,
1003 }; 1000 };
1004 1001
1005 might_sleep(); 1002 might_sleep();
@@ -1248,7 +1245,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
1248 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; 1245 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
1249 sta_cmd.mode = STA_CONTROL_MODIFY_MSK; 1246 sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
1250 1247
1251 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 1248 return iwl_send_add_sta(priv, &sta_cmd, 0);
1252} 1249}
1253 1250
1254int iwl_set_dynamic_key(struct iwl_priv *priv, 1251int iwl_set_dynamic_key(struct iwl_priv *priv,
@@ -1284,13 +1281,13 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
1284 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 1281 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1285 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); 1282 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
1286 ret = iwlagn_send_sta_key(priv, keyconf, sta_id, 1283 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
1287 seq.tkip.iv32, p1k, CMD_SYNC); 1284 seq.tkip.iv32, p1k, 0);
1288 break; 1285 break;
1289 case WLAN_CIPHER_SUITE_CCMP: 1286 case WLAN_CIPHER_SUITE_CCMP:
1290 case WLAN_CIPHER_SUITE_WEP40: 1287 case WLAN_CIPHER_SUITE_WEP40:
1291 case WLAN_CIPHER_SUITE_WEP104: 1288 case WLAN_CIPHER_SUITE_WEP104:
1292 ret = iwlagn_send_sta_key(priv, keyconf, sta_id, 1289 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
1293 0, NULL, CMD_SYNC); 1290 0, NULL, 0);
1294 break; 1291 break;
1295 default: 1292 default:
1296 IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher); 1293 IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher);
@@ -1409,7 +1406,7 @@ int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
1409 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); 1406 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1410 spin_unlock_bh(&priv->sta_lock); 1407 spin_unlock_bh(&priv->sta_lock);
1411 1408
1412 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 1409 return iwl_send_add_sta(priv, &sta_cmd, 0);
1413} 1410}
1414 1411
1415int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, 1412int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
@@ -1433,7 +1430,7 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
1433 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); 1430 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1434 spin_unlock_bh(&priv->sta_lock); 1431 spin_unlock_bh(&priv->sta_lock);
1435 1432
1436 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 1433 return iwl_send_add_sta(priv, &sta_cmd, 0);
1437} 1434}
1438 1435
1439int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, 1436int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
@@ -1458,7 +1455,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
1458 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); 1455 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1459 spin_unlock_bh(&priv->sta_lock); 1456 spin_unlock_bh(&priv->sta_lock);
1460 1457
1461 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 1458 return iwl_send_add_sta(priv, &sta_cmd, 0);
1462} 1459}
1463 1460
1464 1461
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index 058c5892c427..acb981a0a0aa 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -236,7 +236,7 @@ static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
236{ 236{
237 IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n"); 237 IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n");
238 /* make request to retrieve statistics information */ 238 /* make request to retrieve statistics information */
239 iwl_send_statistics_request(priv, CMD_SYNC, false); 239 iwl_send_statistics_request(priv, 0, false);
240 /* Reschedule the ct_kill wait timer */ 240 /* Reschedule the ct_kill wait timer */
241 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm, 241 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
242 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION)); 242 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 398dd096674c..3255a1723d17 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -402,10 +402,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
402 /* aggregation is on for this <sta,tid> */ 402 /* aggregation is on for this <sta,tid> */
403 if (info->flags & IEEE80211_TX_CTL_AMPDU && 403 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
404 tid_data->agg.state != IWL_AGG_ON) { 404 tid_data->agg.state != IWL_AGG_ON) {
405 IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:" 405 IWL_ERR(priv,
406 " Tx flags = 0x%08x, agg.state = %d", 406 "TX_CTL_AMPDU while not in AGG: Tx flags = 0x%08x, agg.state = %d\n",
407 info->flags, tid_data->agg.state); 407 info->flags, tid_data->agg.state);
408 IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d", 408 IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d\n",
409 sta_id, tid, 409 sta_id, tid,
410 IEEE80211_SEQ_TO_SN(tid_data->seq_number)); 410 IEEE80211_SEQ_TO_SN(tid_data->seq_number));
411 goto drop_unlock_sta; 411 goto drop_unlock_sta;
@@ -416,7 +416,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
416 */ 416 */
417 if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON && 417 if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
418 tid_data->agg.state != IWL_AGG_OFF, 418 tid_data->agg.state != IWL_AGG_OFF,
419 "Tx while agg.state = %d", tid_data->agg.state)) 419 "Tx while agg.state = %d\n", tid_data->agg.state))
420 goto drop_unlock_sta; 420 goto drop_unlock_sta;
421 421
422 seq_number = tid_data->seq_number; 422 seq_number = tid_data->seq_number;
@@ -778,8 +778,8 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
778 /* There are no packets for this RA / TID in the HW any more */ 778 /* There are no packets for this RA / TID in the HW any more */
779 if (tid_data->agg.ssn == tid_data->next_reclaimed) { 779 if (tid_data->agg.ssn == tid_data->next_reclaimed) {
780 IWL_DEBUG_TX_QUEUES(priv, 780 IWL_DEBUG_TX_QUEUES(priv,
781 "Can continue DELBA flow ssn = next_recl =" 781 "Can continue DELBA flow ssn = next_recl = %d\n",
782 " %d", tid_data->next_reclaimed); 782 tid_data->next_reclaimed);
783 iwl_trans_txq_disable(priv->trans, 783 iwl_trans_txq_disable(priv->trans,
784 tid_data->agg.txq_id); 784 tid_data->agg.txq_id);
785 iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id); 785 iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
@@ -791,8 +791,8 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
791 /* There are no packets for this RA / TID in the HW any more */ 791 /* There are no packets for this RA / TID in the HW any more */
792 if (tid_data->agg.ssn == tid_data->next_reclaimed) { 792 if (tid_data->agg.ssn == tid_data->next_reclaimed) {
793 IWL_DEBUG_TX_QUEUES(priv, 793 IWL_DEBUG_TX_QUEUES(priv,
794 "Can continue ADDBA flow ssn = next_recl =" 794 "Can continue ADDBA flow ssn = next_recl = %d\n",
795 " %d", tid_data->next_reclaimed); 795 tid_data->next_reclaimed);
796 tid_data->agg.state = IWL_AGG_STARTING; 796 tid_data->agg.state = IWL_AGG_STARTING;
797 ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); 797 ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
798 } 798 }
@@ -1216,8 +1216,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1216 ctx->vif->type == NL80211_IFTYPE_STATION) { 1216 ctx->vif->type == NL80211_IFTYPE_STATION) {
1217 /* block and stop all queues */ 1217 /* block and stop all queues */
1218 priv->passive_no_rx = true; 1218 priv->passive_no_rx = true;
1219 IWL_DEBUG_TX_QUEUES(priv, "stop all queues: " 1219 IWL_DEBUG_TX_QUEUES(priv,
1220 "passive channel"); 1220 "stop all queues: passive channel\n");
1221 ieee80211_stop_queues(priv->hw); 1221 ieee80211_stop_queues(priv->hw);
1222 1222
1223 IWL_DEBUG_TX_REPLY(priv, 1223 IWL_DEBUG_TX_REPLY(priv,
@@ -1271,7 +1271,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1271 1271
1272 while (!skb_queue_empty(&skbs)) { 1272 while (!skb_queue_empty(&skbs)) {
1273 skb = __skb_dequeue(&skbs); 1273 skb = __skb_dequeue(&skbs);
1274 ieee80211_tx_status_ni(priv->hw, skb); 1274 ieee80211_tx_status(priv->hw, skb);
1275 } 1275 }
1276 1276
1277 return 0; 1277 return 0;
@@ -1411,7 +1411,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1411 1411
1412 while (!skb_queue_empty(&reclaimed_skbs)) { 1412 while (!skb_queue_empty(&reclaimed_skbs)) {
1413 skb = __skb_dequeue(&reclaimed_skbs); 1413 skb = __skb_dequeue(&reclaimed_skbs);
1414 ieee80211_tx_status_ni(priv->hw, skb); 1414 ieee80211_tx_status(priv->hw, skb);
1415 } 1415 }
1416 1416
1417 return 0; 1417 return 0;
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index cf03ef5619d9..d5cee1530597 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -172,7 +172,7 @@ static int iwl_send_wimax_coex(struct iwl_priv *priv)
172 memset(&coex_cmd, 0, sizeof(coex_cmd)); 172 memset(&coex_cmd, 0, sizeof(coex_cmd));
173 173
174 return iwl_dvm_send_cmd_pdu(priv, 174 return iwl_dvm_send_cmd_pdu(priv,
175 COEX_PRIORITY_TABLE_CMD, CMD_SYNC, 175 COEX_PRIORITY_TABLE_CMD, 0,
176 sizeof(coex_cmd), &coex_cmd); 176 sizeof(coex_cmd), &coex_cmd);
177} 177}
178 178
@@ -205,7 +205,7 @@ void iwl_send_prio_tbl(struct iwl_priv *priv)
205 memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl, 205 memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
206 sizeof(iwl_bt_prio_tbl)); 206 sizeof(iwl_bt_prio_tbl));
207 if (iwl_dvm_send_cmd_pdu(priv, 207 if (iwl_dvm_send_cmd_pdu(priv,
208 REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC, 208 REPLY_BT_COEX_PRIO_TABLE, 0,
209 sizeof(prio_tbl_cmd), &prio_tbl_cmd)) 209 sizeof(prio_tbl_cmd), &prio_tbl_cmd))
210 IWL_ERR(priv, "failed to send BT prio tbl command\n"); 210 IWL_ERR(priv, "failed to send BT prio tbl command\n");
211} 211}
@@ -218,7 +218,7 @@ int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
218 env_cmd.action = action; 218 env_cmd.action = action;
219 env_cmd.type = type; 219 env_cmd.type = type;
220 ret = iwl_dvm_send_cmd_pdu(priv, 220 ret = iwl_dvm_send_cmd_pdu(priv,
221 REPLY_BT_COEX_PROT_ENV, CMD_SYNC, 221 REPLY_BT_COEX_PROT_ENV, 0,
222 sizeof(env_cmd), &env_cmd); 222 sizeof(env_cmd), &env_cmd);
223 if (ret) 223 if (ret)
224 IWL_ERR(priv, "failed to send BT env command\n"); 224 IWL_ERR(priv, "failed to send BT env command\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 854ba84ccb73..c3817fae16c0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -62,6 +62,7 @@ static const struct iwl_base_params iwl1000_base_params = {
62 .led_compensation = 51, 62 .led_compensation = 51,
63 .wd_timeout = IWL_WATCHDOG_DISABLED, 63 .wd_timeout = IWL_WATCHDOG_DISABLED,
64 .max_event_log_size = 128, 64 .max_event_log_size = 128,
65 .scd_chain_ext_wa = true,
65}; 66};
66 67
67static const struct iwl_ht_params iwl1000_ht_params = { 68static const struct iwl_ht_params iwl1000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 3e63323637f3..21e5d0843a62 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -75,6 +75,7 @@ static const struct iwl_base_params iwl2000_base_params = {
75 .wd_timeout = IWL_DEF_WD_TIMEOUT, 75 .wd_timeout = IWL_DEF_WD_TIMEOUT,
76 .max_event_log_size = 512, 76 .max_event_log_size = 512,
77 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 77 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
78 .scd_chain_ext_wa = true,
78}; 79};
79 80
80 81
@@ -88,6 +89,7 @@ static const struct iwl_base_params iwl2030_base_params = {
88 .wd_timeout = IWL_LONG_WD_TIMEOUT, 89 .wd_timeout = IWL_LONG_WD_TIMEOUT,
89 .max_event_log_size = 512, 90 .max_event_log_size = 512,
90 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 91 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
92 .scd_chain_ext_wa = true,
91}; 93};
92 94
93static const struct iwl_ht_params iwl2000_ht_params = { 95static const struct iwl_ht_params iwl2000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 6674f2c4541c..332bbede39e5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -61,6 +61,7 @@ static const struct iwl_base_params iwl5000_base_params = {
61 .led_compensation = 51, 61 .led_compensation = 51,
62 .wd_timeout = IWL_WATCHDOG_DISABLED, 62 .wd_timeout = IWL_WATCHDOG_DISABLED,
63 .max_event_log_size = 512, 63 .max_event_log_size = 512,
64 .scd_chain_ext_wa = true,
64}; 65};
65 66
66static const struct iwl_ht_params iwl5000_ht_params = { 67static const struct iwl_ht_params iwl5000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 8048de90233f..8f2c3c8c6b84 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -85,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
85 .wd_timeout = IWL_DEF_WD_TIMEOUT, 85 .wd_timeout = IWL_DEF_WD_TIMEOUT,
86 .max_event_log_size = 512, 86 .max_event_log_size = 512,
87 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 87 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
88 .scd_chain_ext_wa = true,
88}; 89};
89 90
90static const struct iwl_base_params iwl6050_base_params = { 91static const struct iwl_base_params iwl6050_base_params = {
@@ -97,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
97 .wd_timeout = IWL_DEF_WD_TIMEOUT, 98 .wd_timeout = IWL_DEF_WD_TIMEOUT,
98 .max_event_log_size = 1024, 99 .max_event_log_size = 1024,
99 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 100 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
101 .scd_chain_ext_wa = true,
100}; 102};
101 103
102static const struct iwl_base_params iwl6000_g2_base_params = { 104static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -109,6 +111,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
109 .wd_timeout = IWL_LONG_WD_TIMEOUT, 111 .wd_timeout = IWL_LONG_WD_TIMEOUT,
110 .max_event_log_size = 512, 112 .max_event_log_size = 512,
111 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 113 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
114 .scd_chain_ext_wa = true,
112}; 115};
113 116
114static const struct iwl_ht_params iwl6000_ht_params = { 117static const struct iwl_ht_params iwl6000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 4c2d4ef28b22..48730064da73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -71,12 +71,12 @@
71#define IWL3160_UCODE_API_MAX 9 71#define IWL3160_UCODE_API_MAX 9
72 72
73/* Oldest version we won't warn about */ 73/* Oldest version we won't warn about */
74#define IWL7260_UCODE_API_OK 8 74#define IWL7260_UCODE_API_OK 9
75#define IWL3160_UCODE_API_OK 8 75#define IWL3160_UCODE_API_OK 9
76 76
77/* Lowest firmware API version supported */ 77/* Lowest firmware API version supported */
78#define IWL7260_UCODE_API_MIN 7 78#define IWL7260_UCODE_API_MIN 8
79#define IWL3160_UCODE_API_MIN 7 79#define IWL3160_UCODE_API_MIN 8
80 80
81/* NVM versions */ 81/* NVM versions */
82#define IWL7260_NVM_VERSION 0x0a1d 82#define IWL7260_NVM_VERSION 0x0a1d
@@ -98,7 +98,7 @@
98#define NVM_HW_SECTION_NUM_FAMILY_7000 0 98#define NVM_HW_SECTION_NUM_FAMILY_7000 0
99 99
100static const struct iwl_base_params iwl7000_base_params = { 100static const struct iwl_base_params iwl7000_base_params = {
101 .eeprom_size = OTP_LOW_IMAGE_SIZE, 101 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
102 .num_of_queues = IWLAGN_NUM_QUEUES, 102 .num_of_queues = IWLAGN_NUM_QUEUES,
103 .pll_cfg_val = 0, 103 .pll_cfg_val = 0,
104 .shadow_ram_support = true, 104 .shadow_ram_support = true,
@@ -107,6 +107,7 @@ static const struct iwl_base_params iwl7000_base_params = {
107 .max_event_log_size = 512, 107 .max_event_log_size = 512,
108 .shadow_reg_enable = true, 108 .shadow_reg_enable = true,
109 .pcie_l1_allowed = true, 109 .pcie_l1_allowed = true,
110 .apmg_wake_up_wa = true,
110}; 111};
111 112
112static const struct iwl_ht_params iwl7000_ht_params = { 113static const struct iwl_ht_params iwl7000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index f5bd82b88592..51c41531d81d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -83,9 +83,10 @@
83#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode" 83#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
84 84
85#define NVM_HW_SECTION_NUM_FAMILY_8000 10 85#define NVM_HW_SECTION_NUM_FAMILY_8000 10
86#define DEFAULT_NVM_FILE_FAMILY_8000 "iwl_nvm_8000.bin"
86 87
87static const struct iwl_base_params iwl8000_base_params = { 88static const struct iwl_base_params iwl8000_base_params = {
88 .eeprom_size = OTP_LOW_IMAGE_SIZE, 89 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
89 .num_of_queues = IWLAGN_NUM_QUEUES, 90 .num_of_queues = IWLAGN_NUM_QUEUES,
90 .pll_cfg_val = 0, 91 .pll_cfg_val = 0,
91 .shadow_ram_support = true, 92 .shadow_ram_support = true,
@@ -118,6 +119,7 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
118 .ht_params = &iwl8000_ht_params, 119 .ht_params = &iwl8000_ht_params,
119 .nvm_ver = IWL8000_NVM_VERSION, 120 .nvm_ver = IWL8000_NVM_VERSION,
120 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 121 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
122 .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
121}; 123};
122 124
123const struct iwl_cfg iwl8260_n_cfg = { 125const struct iwl_cfg iwl8260_n_cfg = {
@@ -127,6 +129,7 @@ const struct iwl_cfg iwl8260_n_cfg = {
127 .ht_params = &iwl8000_ht_params, 129 .ht_params = &iwl8000_ht_params,
128 .nvm_ver = IWL8000_NVM_VERSION, 130 .nvm_ver = IWL8000_NVM_VERSION,
129 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 131 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
132 .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
130}; 133};
131 134
132MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); 135MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 7f37fb86837b..04a483d38659 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -102,9 +102,7 @@
102 102
103/* EEPROM */ 103/* EEPROM */
104#define IWLAGN_EEPROM_IMG_SIZE 2048 104#define IWLAGN_EEPROM_IMG_SIZE 2048
105/* OTP */ 105
106/* lower blocks contain EEPROM image and calibration data */
107#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
108/* high blocks contain PAPD data */ 106/* high blocks contain PAPD data */
109#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */ 107#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */
110#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */ 108#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 3f17dc3f2c8a..b7047905f41a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -146,6 +146,9 @@ static inline u8 num_of_ant(u8 mask)
146 * @wd_timeout: TX queues watchdog timeout 146 * @wd_timeout: TX queues watchdog timeout
147 * @max_event_log_size: size of event log buffer size for ucode event logging 147 * @max_event_log_size: size of event log buffer size for ucode event logging
148 * @shadow_reg_enable: HW shadow register support 148 * @shadow_reg_enable: HW shadow register support
149 * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
150 * is in flight. This is due to a HW bug in 7260, 3160 and 7265.
151 * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
149 */ 152 */
150struct iwl_base_params { 153struct iwl_base_params {
151 int eeprom_size; 154 int eeprom_size;
@@ -160,6 +163,8 @@ struct iwl_base_params {
160 u32 max_event_log_size; 163 u32 max_event_log_size;
161 const bool shadow_reg_enable; 164 const bool shadow_reg_enable;
162 const bool pcie_l1_allowed; 165 const bool pcie_l1_allowed;
166 const bool apmg_wake_up_wa;
167 const bool scd_chain_ext_wa;
163}; 168};
164 169
165/* 170/*
@@ -188,6 +193,11 @@ struct iwl_ht_params {
188#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80 193#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80
189#define EEPROM_REGULATORY_BAND_NO_HT40 0 194#define EEPROM_REGULATORY_BAND_NO_HT40 0
190 195
196/* lower blocks contain EEPROM image and calibration data */
197#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
198#define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */
199#define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */
200
191struct iwl_eeprom_params { 201struct iwl_eeprom_params {
192 const u8 regulatory_bands[7]; 202 const u8 regulatory_bands[7];
193 bool enhanced_txpower; 203 bool enhanced_txpower;
@@ -264,6 +274,8 @@ struct iwl_cfg {
264 u8 nvm_hw_section_num; 274 u8 nvm_hw_section_num;
265 bool lp_xtal_workaround; 275 bool lp_xtal_workaround;
266 const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; 276 const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
277 bool no_power_up_nic_in_init;
278 const char *default_nvm_file;
267}; 279};
268 280
269/* 281/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 8a44f594528d..09feff4fa226 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -61,8 +61,6 @@
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63 63
64#define DEBUG
65
66#include <linux/device.h> 64#include <linux/device.h>
67#include <linux/interrupt.h> 65#include <linux/interrupt.h>
68#include <linux/export.h> 66#include <linux/export.h>
@@ -128,8 +126,8 @@ void __iwl_dbg(struct device *dev,
128#ifdef CONFIG_IWLWIFI_DEBUG 126#ifdef CONFIG_IWLWIFI_DEBUG
129 if (iwl_have_debug_level(level) && 127 if (iwl_have_debug_level(level) &&
130 (!limit || net_ratelimit())) 128 (!limit || net_ratelimit()))
131 dev_dbg(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U', 129 dev_printk(KERN_DEBUG, dev, "%c %s %pV",
132 function, &vaf); 130 in_interrupt() ? 'I' : 'U', function, &vaf);
133#endif 131#endif
134 trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf); 132 trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
135 va_end(args); 133 va_end(args);
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index c8cbdbe15924..295083510e72 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -47,12 +47,32 @@ void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
47void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3); 47void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3);
48void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3); 48void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
49 49
50/* not all compilers can evaluate strlen() at compile time, so use sizeof() */
51#define CHECK_FOR_NEWLINE(f) BUILD_BUG_ON(f[sizeof(f) - 2] != '\n')
52
50/* No matter what is m (priv, bus, trans), this will work */ 53/* No matter what is m (priv, bus, trans), this will work */
51#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a) 54#define IWL_ERR_DEV(d, f, a...) \
52#define IWL_ERR_DEV(d, f, a...) __iwl_err((d), false, false, f, ## a) 55 do { \
53#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a) 56 CHECK_FOR_NEWLINE(f); \
54#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a) 57 __iwl_err((d), false, false, f, ## a); \
55#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a) 58 } while (0)
59#define IWL_ERR(m, f, a...) \
60 IWL_ERR_DEV((m)->dev, f, ## a)
61#define IWL_WARN(m, f, a...) \
62 do { \
63 CHECK_FOR_NEWLINE(f); \
64 __iwl_warn((m)->dev, f, ## a); \
65 } while (0)
66#define IWL_INFO(m, f, a...) \
67 do { \
68 CHECK_FOR_NEWLINE(f); \
69 __iwl_info((m)->dev, f, ## a); \
70 } while (0)
71#define IWL_CRIT(m, f, a...) \
72 do { \
73 CHECK_FOR_NEWLINE(f); \
74 __iwl_crit((m)->dev, f, ## a); \
75 } while (0)
56 76
57#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) 77#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
58void __iwl_dbg(struct device *dev, 78void __iwl_dbg(struct device *dev,
@@ -72,12 +92,17 @@ do { \
72 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ 92 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
73} while (0) 93} while (0)
74 94
95#define __IWL_DEBUG_DEV(dev, level, limit, fmt, args...) \
96 do { \
97 CHECK_FOR_NEWLINE(fmt); \
98 __iwl_dbg(dev, level, limit, __func__, fmt, ##args); \
99 } while (0)
75#define IWL_DEBUG(m, level, fmt, args...) \ 100#define IWL_DEBUG(m, level, fmt, args...) \
76 __iwl_dbg((m)->dev, level, false, __func__, fmt, ##args) 101 __IWL_DEBUG_DEV((m)->dev, level, false, fmt, ##args)
77#define IWL_DEBUG_DEV(dev, level, fmt, args...) \ 102#define IWL_DEBUG_DEV(dev, level, fmt, args...) \
78 __iwl_dbg((dev), level, false, __func__, fmt, ##args) 103 __IWL_DEBUG_DEV(dev, level, false, fmt, ##args)
79#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \ 104#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
80 __iwl_dbg((m)->dev, level, true, __func__, fmt, ##args) 105 __IWL_DEBUG_DEV((m)->dev, level, true, fmt, ##args)
81 106
82#ifdef CONFIG_IWLWIFI_DEBUG 107#ifdef CONFIG_IWLWIFI_DEBUG
83#define iwl_print_hex_dump(m, level, p, len) \ 108#define iwl_print_hex_dump(m, level, p, len) \
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 0a3e841b44a9..f2a5c12269a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1243,6 +1243,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
1243 .bt_coex_active = true, 1243 .bt_coex_active = true,
1244 .power_level = IWL_POWER_INDEX_1, 1244 .power_level = IWL_POWER_INDEX_1,
1245 .wd_disable = true, 1245 .wd_disable = true,
1246 .uapsd_disable = false,
1246 /* the rest are 0 by default */ 1247 /* the rest are 0 by default */
1247}; 1248};
1248IWL_EXPORT_SYMBOL(iwlwifi_mod_params); 1249IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1356,6 +1357,10 @@ MODULE_PARM_DESC(wd_disable,
1356module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO); 1357module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
1357MODULE_PARM_DESC(nvm_file, "NVM file name"); 1358MODULE_PARM_DESC(nvm_file, "NVM file name");
1358 1359
1360module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
1361 bool, S_IRUGO);
1362MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)");
1363
1359/* 1364/*
1360 * set bt_coex_active to true, uCode will do kill/defer 1365 * set bt_coex_active to true, uCode will do kill/defer
1361 * every time the priority line is asserted (BT is sending signals on the 1366 * every time the priority line is asserted (BT is sending signals on the
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index 58c8941c0d95..2953ffceda38 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -71,10 +71,15 @@
71 * enum iwl_fw_error_dump_type - types of data in the dump file 71 * enum iwl_fw_error_dump_type - types of data in the dump file
72 * @IWL_FW_ERROR_DUMP_SRAM: 72 * @IWL_FW_ERROR_DUMP_SRAM:
73 * @IWL_FW_ERROR_DUMP_REG: 73 * @IWL_FW_ERROR_DUMP_REG:
74 * @IWL_FW_ERROR_DUMP_RXF:
75 * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
76 * &struct iwl_fw_error_dump_txcmd packets
74 */ 77 */
75enum iwl_fw_error_dump_type { 78enum iwl_fw_error_dump_type {
76 IWL_FW_ERROR_DUMP_SRAM = 0, 79 IWL_FW_ERROR_DUMP_SRAM = 0,
77 IWL_FW_ERROR_DUMP_REG = 1, 80 IWL_FW_ERROR_DUMP_REG = 1,
81 IWL_FW_ERROR_DUMP_RXF = 2,
82 IWL_FW_ERROR_DUMP_TXCMD = 3,
78 83
79 IWL_FW_ERROR_DUMP_MAX, 84 IWL_FW_ERROR_DUMP_MAX,
80}; 85};
@@ -89,7 +94,7 @@ struct iwl_fw_error_dump_data {
89 __le32 type; 94 __le32 type;
90 __le32 len; 95 __le32 len;
91 __u8 data[]; 96 __u8 data[];
92} __packed __aligned(4); 97} __packed;
93 98
94/** 99/**
95 * struct iwl_fw_error_dump_file - the layout of the header of the file 100 * struct iwl_fw_error_dump_file - the layout of the header of the file
@@ -101,6 +106,29 @@ struct iwl_fw_error_dump_file {
101 __le32 barker; 106 __le32 barker;
102 __le32 file_len; 107 __le32 file_len;
103 u8 data[0]; 108 u8 data[0];
104} __packed __aligned(4); 109} __packed;
110
111/**
112 * struct iwl_fw_error_dump_txcmd - TX command data
113 * @cmdlen: original length of command
114 * @caplen: captured length of command (may be less)
115 * @data: captured command data, @caplen bytes
116 */
117struct iwl_fw_error_dump_txcmd {
118 __le32 cmdlen;
119 __le32 caplen;
120 u8 data[];
121} __packed;
122
123/**
124 * iwl_mvm_fw_error_next_data - advance fw error dump data pointer
125 * @data: previous data block
126 * Returns: next data block
127 */
128static inline struct iwl_fw_error_dump_data *
129iwl_mvm_fw_error_next_data(struct iwl_fw_error_dump_data *data)
130{
131 return (void *)(data->data + le32_to_cpu(data->len));
132}
105 133
106#endif /* __fw_error_dump_h__ */ 134#endif /* __fw_error_dump_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index d14f19339d61..0aa7c0085c9f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -74,29 +74,24 @@
74 * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). 74 * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
75 * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P. 75 * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
76 * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS 76 * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
77 * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD 77 * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
78 * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan 78 * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
79 * offload profile config command. 79 * offload profile config command.
80 * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
81 * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
82 * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six 80 * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
83 * (rather than two) IPv6 addresses 81 * (rather than two) IPv6 addresses
84 * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
85 * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element 82 * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
86 * from the probe request template. 83 * from the probe request template.
87 * @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
88 * connection when going back to D0
89 * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version) 84 * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
90 * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version) 85 * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
91 * @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan. 86 * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
92 * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
93 * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
94 * containing CAM (Continuous Active Mode) indication.
95 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and 87 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
96 * P2P client interfaces simultaneously if they are in different bindings. 88 * P2P client interfaces simultaneously if they are in different bindings.
89 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
90 * P2P client interfaces simultaneously if they are in same bindings.
97 * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save 91 * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
98 * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. 92 * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
99 * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients 93 * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
94 * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
100 */ 95 */
101enum iwl_ucode_tlv_flag { 96enum iwl_ucode_tlv_flag {
102 IWL_UCODE_TLV_FLAGS_PAN = BIT(0), 97 IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -104,22 +99,16 @@ enum iwl_ucode_tlv_flag {
104 IWL_UCODE_TLV_FLAGS_MFP = BIT(2), 99 IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
105 IWL_UCODE_TLV_FLAGS_P2P = BIT(3), 100 IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
106 IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), 101 IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
107 IWL_UCODE_TLV_FLAGS_NEWBT_COEX = BIT(5),
108 IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT = BIT(6),
109 IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7), 102 IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
110 IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
111 IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
112 IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), 103 IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
113 IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
114 IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12), 104 IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
115 IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API = BIT(14),
116 IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15), 105 IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
117 IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16), 106 IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
118 IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17), 107 IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
119 IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
120 IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
121 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22), 108 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
109 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
122 IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24), 110 IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
111 IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
123 IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26), 112 IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
124 IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29), 113 IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
125 IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30), 114 IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
@@ -128,9 +117,11 @@ enum iwl_ucode_tlv_flag {
128/** 117/**
129 * enum iwl_ucode_tlv_api - ucode api 118 * enum iwl_ucode_tlv_api - ucode api
130 * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field. 119 * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
120 * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
131 */ 121 */
132enum iwl_ucode_tlv_api { 122enum iwl_ucode_tlv_api {
133 IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), 123 IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
124 IWL_UCODE_TLV_API_CSA_FLOW = BIT(4),
134}; 125};
135 126
136/** 127/**
@@ -183,6 +174,7 @@ enum iwl_ucode_sec {
183#define IWL_UCODE_SECTION_MAX 12 174#define IWL_UCODE_SECTION_MAX 12
184#define IWL_API_ARRAY_SIZE 1 175#define IWL_API_ARRAY_SIZE 1
185#define IWL_CAPABILITIES_ARRAY_SIZE 1 176#define IWL_CAPABILITIES_ARRAY_SIZE 1
177#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
186 178
187struct iwl_ucode_capabilities { 179struct iwl_ucode_capabilities {
188 u32 max_probe_length; 180 u32 max_probe_length;
@@ -205,6 +197,11 @@ struct fw_img {
205 bool is_dual_cpus; 197 bool is_dual_cpus;
206}; 198};
207 199
200struct iwl_sf_region {
201 u32 addr;
202 u32 size;
203};
204
208/* uCode version contains 4 values: Major/Minor/API/Serial */ 205/* uCode version contains 4 values: Major/Minor/API/Serial */
209#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) 206#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
210#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) 207#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 44cc3cf45762..5eef4ae7333b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -33,6 +33,7 @@
33#include "iwl-io.h" 33#include "iwl-io.h"
34#include "iwl-csr.h" 34#include "iwl-csr.h"
35#include "iwl-debug.h" 35#include "iwl-debug.h"
36#include "iwl-prph.h"
36#include "iwl-fh.h" 37#include "iwl-fh.h"
37 38
38#define IWL_POLL_INTERVAL 10 /* microseconds */ 39#define IWL_POLL_INTERVAL 10 /* microseconds */
@@ -183,6 +184,23 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
183} 184}
184IWL_EXPORT_SYMBOL(iwl_clear_bits_prph); 185IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
185 186
187void iwl_force_nmi(struct iwl_trans *trans)
188{
189 /*
190 * In HW previous to the 8000 HW family, and in the 8000 HW family
191 * itself when the revision step==0, the DEVICE_SET_NMI_REG is used
192 * to force an NMI. Otherwise, a different register -
193 * DEVICE_SET_NMI_8000B_REG - is used.
194 */
195 if ((trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) ||
196 ((trans->hw_rev & 0xc) == 0x0))
197 iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL);
198 else
199 iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG,
200 DEVICE_SET_NMI_8000B_VAL);
201}
202IWL_EXPORT_SYMBOL(iwl_force_nmi);
203
186static const char *get_fh_string(int cmd) 204static const char *get_fh_string(int cmd)
187{ 205{
188#define IWL_CMD(x) case x: return #x 206#define IWL_CMD(x) case x: return #x
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 665ddd9dbbc4..705d12c079e8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -80,6 +80,7 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
80void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, 80void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
81 u32 bits, u32 mask); 81 u32 bits, u32 mask);
82void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); 82void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
83void iwl_force_nmi(struct iwl_trans *trans);
83 84
84/* Error handling */ 85/* Error handling */
85int iwl_dump_fh(struct iwl_trans *trans, char **buf); 86int iwl_dump_fh(struct iwl_trans *trans, char **buf);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index d994317db85b..d051857729ab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -119,6 +119,7 @@ struct iwl_mod_params {
119#endif 119#endif
120 int ant_coupling; 120 int ant_coupling;
121 char *nvm_file; 121 char *nvm_file;
122 bool uapsd_disable;
122}; 123};
123 124
124#endif /* #__iwl_modparams_h__ */ 125#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 6be30c698506..85eee79c495c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -62,6 +62,7 @@
62#include <linux/types.h> 62#include <linux/types.h>
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <linux/export.h> 64#include <linux/export.h>
65#include <linux/etherdevice.h>
65#include "iwl-drv.h" 66#include "iwl-drv.h"
66#include "iwl-modparams.h" 67#include "iwl-modparams.h"
67#include "iwl-nvm-parse.h" 68#include "iwl-nvm-parse.h"
@@ -127,19 +128,20 @@ static const u8 iwl_nvm_channels[] = {
127 128
128static const u8 iwl_nvm_channels_family_8000[] = { 129static const u8 iwl_nvm_channels_family_8000[] = {
129 /* 2.4 GHz */ 130 /* 2.4 GHz */
130 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 131 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
131 /* 5 GHz */ 132 /* 5 GHz */
132 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 133 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
133 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 134 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
134 149, 153, 157, 161, 165, 169, 173, 177, 181 135 149, 153, 157, 161, 165, 169, 173, 177, 181
135}; 136};
136 137
137#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) 138#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
138#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000) 139#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000)
139#define NUM_2GHZ_CHANNELS 14 140#define NUM_2GHZ_CHANNELS 14
140#define FIRST_2GHZ_HT_MINUS 5 141#define NUM_2GHZ_CHANNELS_FAMILY_8000 14
141#define LAST_2GHZ_HT_PLUS 9 142#define FIRST_2GHZ_HT_MINUS 5
142#define LAST_5GHZ_HT 161 143#define LAST_2GHZ_HT_PLUS 9
144#define LAST_5GHZ_HT 161
143 145
144#define DEFAULT_MAX_TX_POWER 16 146#define DEFAULT_MAX_TX_POWER 16
145 147
@@ -202,21 +204,23 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
202 struct ieee80211_channel *channel; 204 struct ieee80211_channel *channel;
203 u16 ch_flags; 205 u16 ch_flags;
204 bool is_5ghz; 206 bool is_5ghz;
205 int num_of_ch; 207 int num_of_ch, num_2ghz_channels;
206 const u8 *nvm_chan; 208 const u8 *nvm_chan;
207 209
208 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { 210 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
209 num_of_ch = IWL_NUM_CHANNELS; 211 num_of_ch = IWL_NUM_CHANNELS;
210 nvm_chan = &iwl_nvm_channels[0]; 212 nvm_chan = &iwl_nvm_channels[0];
213 num_2ghz_channels = NUM_2GHZ_CHANNELS;
211 } else { 214 } else {
212 num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000; 215 num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
213 nvm_chan = &iwl_nvm_channels_family_8000[0]; 216 nvm_chan = &iwl_nvm_channels_family_8000[0];
217 num_2ghz_channels = NUM_2GHZ_CHANNELS_FAMILY_8000;
214 } 218 }
215 219
216 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { 220 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
217 ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); 221 ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
218 222
219 if (ch_idx >= NUM_2GHZ_CHANNELS && 223 if (ch_idx >= num_2ghz_channels &&
220 !data->sku_cap_band_52GHz_enable) 224 !data->sku_cap_band_52GHz_enable)
221 ch_flags &= ~NVM_CHANNEL_VALID; 225 ch_flags &= ~NVM_CHANNEL_VALID;
222 226
@@ -225,7 +229,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
225 "Ch. %d Flags %x [%sGHz] - No traffic\n", 229 "Ch. %d Flags %x [%sGHz] - No traffic\n",
226 nvm_chan[ch_idx], 230 nvm_chan[ch_idx],
227 ch_flags, 231 ch_flags,
228 (ch_idx >= NUM_2GHZ_CHANNELS) ? 232 (ch_idx >= num_2ghz_channels) ?
229 "5.2" : "2.4"); 233 "5.2" : "2.4");
230 continue; 234 continue;
231 } 235 }
@@ -234,7 +238,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
234 n_channels++; 238 n_channels++;
235 239
236 channel->hw_value = nvm_chan[ch_idx]; 240 channel->hw_value = nvm_chan[ch_idx];
237 channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ? 241 channel->band = (ch_idx < num_2ghz_channels) ?
238 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 242 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
239 channel->center_freq = 243 channel->center_freq =
240 ieee80211_channel_to_frequency( 244 ieee80211_channel_to_frequency(
@@ -242,7 +246,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
242 246
243 /* TODO: Need to be dependent to the NVM */ 247 /* TODO: Need to be dependent to the NVM */
244 channel->flags = IEEE80211_CHAN_NO_HT40; 248 channel->flags = IEEE80211_CHAN_NO_HT40;
245 if (ch_idx < NUM_2GHZ_CHANNELS && 249 if (ch_idx < num_2ghz_channels &&
246 (ch_flags & NVM_CHANNEL_40MHZ)) { 250 (ch_flags & NVM_CHANNEL_40MHZ)) {
247 if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS) 251 if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
248 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 252 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
@@ -250,7 +254,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
250 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 254 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
251 } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT && 255 } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
252 (ch_flags & NVM_CHANNEL_40MHZ)) { 256 (ch_flags & NVM_CHANNEL_40MHZ)) {
253 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) 257 if ((ch_idx - num_2ghz_channels) % 2 == 0)
254 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 258 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
255 else 259 else
256 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 260 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
@@ -447,13 +451,7 @@ static void iwl_set_hw_address(const struct iwl_cfg *cfg,
447 struct iwl_nvm_data *data, 451 struct iwl_nvm_data *data,
448 const __le16 *nvm_sec) 452 const __le16 *nvm_sec)
449{ 453{
450 u8 hw_addr[ETH_ALEN]; 454 const u8 *hw_addr = (const u8 *)(nvm_sec + HW_ADDR);
451
452 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
453 memcpy(hw_addr, nvm_sec + HW_ADDR, ETH_ALEN);
454 else
455 memcpy(hw_addr, nvm_sec + MAC_ADDRESS_OVERRIDE_FAMILY_8000,
456 ETH_ALEN);
457 455
458 /* The byte order is little endian 16 bit, meaning 214365 */ 456 /* The byte order is little endian 16 bit, meaning 214365 */
459 data->hw_addr[0] = hw_addr[1]; 457 data->hw_addr[0] = hw_addr[1];
@@ -464,6 +462,41 @@ static void iwl_set_hw_address(const struct iwl_cfg *cfg,
464 data->hw_addr[5] = hw_addr[4]; 462 data->hw_addr[5] = hw_addr[4];
465} 463}
466 464
465static void iwl_set_hw_address_family_8000(const struct iwl_cfg *cfg,
466 struct iwl_nvm_data *data,
467 const __le16 *mac_override,
468 const __le16 *nvm_hw)
469{
470 const u8 *hw_addr;
471
472 if (mac_override) {
473 hw_addr = (const u8 *)(mac_override +
474 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
475
476 /* The byte order is little endian 16 bit, meaning 214365 */
477 data->hw_addr[0] = hw_addr[1];
478 data->hw_addr[1] = hw_addr[0];
479 data->hw_addr[2] = hw_addr[3];
480 data->hw_addr[3] = hw_addr[2];
481 data->hw_addr[4] = hw_addr[5];
482 data->hw_addr[5] = hw_addr[4];
483
484 if (is_valid_ether_addr(hw_addr))
485 return;
486 }
487
488 /* take the MAC address from the OTP */
489 hw_addr = (const u8 *)(nvm_hw + HW_ADDR0_FAMILY_8000);
490 data->hw_addr[0] = hw_addr[3];
491 data->hw_addr[1] = hw_addr[2];
492 data->hw_addr[2] = hw_addr[1];
493 data->hw_addr[3] = hw_addr[0];
494
495 hw_addr = (const u8 *)(nvm_hw + HW_ADDR1_FAMILY_8000);
496 data->hw_addr[4] = hw_addr[1];
497 data->hw_addr[5] = hw_addr[0];
498}
499
467struct iwl_nvm_data * 500struct iwl_nvm_data *
468iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, 501iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
469 const __le16 *nvm_hw, const __le16 *nvm_sw, 502 const __le16 *nvm_hw, const __le16 *nvm_sw,
@@ -523,7 +556,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
523 rx_chains); 556 rx_chains);
524 } else { 557 } else {
525 /* MAC address in family 8000 */ 558 /* MAC address in family 8000 */
526 iwl_set_hw_address(cfg, data, mac_override); 559 iwl_set_hw_address_family_8000(cfg, data, mac_override, nvm_hw);
527 560
528 iwl_init_sbands(dev, cfg, data, regulatory, 561 iwl_init_sbands(dev, cfg, data, regulatory,
529 sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains, 562 sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index ea29504ac617..99785c892f96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -63,6 +63,7 @@
63#ifndef __iwl_op_mode_h__ 63#ifndef __iwl_op_mode_h__
64#define __iwl_op_mode_h__ 64#define __iwl_op_mode_h__
65 65
66#include <linux/netdevice.h>
66#include <linux/debugfs.h> 67#include <linux/debugfs.h>
67 68
68struct iwl_op_mode; 69struct iwl_op_mode;
@@ -112,8 +113,11 @@ struct iwl_cfg;
112 * @stop: stop the op_mode. Must free all the memory allocated. 113 * @stop: stop the op_mode. Must free all the memory allocated.
113 * May sleep 114 * May sleep
114 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the 115 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
115 * HCMD this Rx responds to. 116 * HCMD this Rx responds to. Can't sleep.
116 * This callback may sleep, it is called from a threaded IRQ handler. 117 * @napi_add: NAPI initialisation. The transport is fully responsible for NAPI,
118 * but the higher layers need to know about it (in particular mac80211 to
119 * to able to call the right NAPI RX functions); this function is needed
120 * to eventually call netif_napi_add() with higher layer involvement.
117 * @queue_full: notifies that a HW queue is full. 121 * @queue_full: notifies that a HW queue is full.
118 * Must be atomic and called with BH disabled. 122 * Must be atomic and called with BH disabled.
119 * @queue_not_full: notifies that a HW queue is not full any more. 123 * @queue_not_full: notifies that a HW queue is not full any more.
@@ -143,6 +147,11 @@ struct iwl_op_mode_ops {
143 void (*stop)(struct iwl_op_mode *op_mode); 147 void (*stop)(struct iwl_op_mode *op_mode);
144 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, 148 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
145 struct iwl_device_cmd *cmd); 149 struct iwl_device_cmd *cmd);
150 void (*napi_add)(struct iwl_op_mode *op_mode,
151 struct napi_struct *napi,
152 struct net_device *napi_dev,
153 int (*poll)(struct napi_struct *, int),
154 int weight);
146 void (*queue_full)(struct iwl_op_mode *op_mode, int queue); 155 void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
147 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); 156 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
148 bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); 157 bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -180,7 +189,6 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
180 struct iwl_rx_cmd_buffer *rxb, 189 struct iwl_rx_cmd_buffer *rxb,
181 struct iwl_device_cmd *cmd) 190 struct iwl_device_cmd *cmd)
182{ 191{
183 might_sleep();
184 return op_mode->ops->rx(op_mode, rxb, cmd); 192 return op_mode->ops->rx(op_mode, rxb, cmd);
185} 193}
186 194
@@ -249,4 +257,15 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
249 return op_mode->ops->exit_d0i3(op_mode); 257 return op_mode->ops->exit_d0i3(op_mode);
250} 258}
251 259
260static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
261 struct napi_struct *napi,
262 struct net_device *napi_dev,
263 int (*poll)(struct napi_struct *, int),
264 int weight)
265{
266 if (!op_mode->ops->napi_add)
267 return;
268 op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
269}
270
252#endif /* __iwl_op_mode_h__ */ 271#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index b761ac4822a3..d4fb5cad07ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -345,7 +345,6 @@ static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type,
345 struct iwl_phy_db_cmd phy_db_cmd; 345 struct iwl_phy_db_cmd phy_db_cmd;
346 struct iwl_host_cmd cmd = { 346 struct iwl_host_cmd cmd = {
347 .id = PHY_DB_CMD, 347 .id = PHY_DB_CMD,
348 .flags = CMD_SYNC,
349 }; 348 };
350 349
351 IWL_DEBUG_INFO(phy_db->trans, 350 IWL_DEBUG_INFO(phy_db->trans,
@@ -393,13 +392,13 @@ static int iwl_phy_db_send_all_channel_groups(
393 entry->data); 392 entry->data);
394 if (err) { 393 if (err) {
395 IWL_ERR(phy_db->trans, 394 IWL_ERR(phy_db->trans,
396 "Can't SEND phy_db section %d (%d), err %d", 395 "Can't SEND phy_db section %d (%d), err %d\n",
397 type, i, err); 396 type, i, err);
398 return err; 397 return err;
399 } 398 }
400 399
401 IWL_DEBUG_INFO(phy_db->trans, 400 IWL_DEBUG_INFO(phy_db->trans,
402 "Sent PHY_DB HCMD, type = %d num = %d", 401 "Sent PHY_DB HCMD, type = %d num = %d\n",
403 type, i); 402 type, i);
404 } 403 }
405 404
@@ -451,7 +450,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
451 IWL_NUM_PAPD_CH_GROUPS); 450 IWL_NUM_PAPD_CH_GROUPS);
452 if (err) { 451 if (err) {
453 IWL_ERR(phy_db->trans, 452 IWL_ERR(phy_db->trans,
454 "Cannot send channel specific PAPD groups"); 453 "Cannot send channel specific PAPD groups\n");
455 return err; 454 return err;
456 } 455 }
457 456
@@ -461,7 +460,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
461 IWL_NUM_TXP_CH_GROUPS); 460 IWL_NUM_TXP_CH_GROUPS);
462 if (err) { 461 if (err) {
463 IWL_ERR(phy_db->trans, 462 IWL_ERR(phy_db->trans,
464 "Cannot send channel specific TX power groups"); 463 "Cannot send channel specific TX power groups\n");
465 return err; 464 return err;
466 } 465 }
467 466
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 5f657c501406..4997e27672b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -105,6 +105,9 @@
105 105
106/* Device NMI register */ 106/* Device NMI register */
107#define DEVICE_SET_NMI_REG 0x00a01c30 107#define DEVICE_SET_NMI_REG 0x00a01c30
108#define DEVICE_SET_NMI_VAL 0x1
109#define DEVICE_SET_NMI_8000B_REG 0x00a01c24
110#define DEVICE_SET_NMI_8000B_VAL 0x1000000
108 111
109/* Shared registers (0x0..0x3ff, via target indirect or periphery */ 112/* Shared registers (0x0..0x3ff, via target indirect or periphery */
110#define SHR_BASE 0x00a10000 113#define SHR_BASE 0x00a10000
@@ -348,4 +351,12 @@ enum secure_load_status_reg {
348 351
349#define LMPM_SECURE_TIME_OUT (100) 352#define LMPM_SECURE_TIME_OUT (100)
350 353
354/* Rx FIFO */
355#define RXF_SIZE_ADDR (0xa00c88)
356#define RXF_SIZE_BYTE_CND_POS (7)
357#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS)
358
359#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10)
360#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c)
361
351#endif /* __iwl_prph_h__ */ 362#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 8cdb0dd618a6..34d49e171fb4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -189,10 +189,9 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
189/** 189/**
190 * enum CMD_MODE - how to send the host commands ? 190 * enum CMD_MODE - how to send the host commands ?
191 * 191 *
192 * @CMD_SYNC: The caller will be stalled until the fw responds to the command
193 * @CMD_ASYNC: Return right away and don't wait for the response 192 * @CMD_ASYNC: Return right away and don't wait for the response
194 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the 193 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
195 * response. The caller needs to call iwl_free_resp when done. 194 * the response. The caller needs to call iwl_free_resp when done.
196 * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the 195 * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
197 * command queue, but after other high priority commands. valid only 196 * command queue, but after other high priority commands. valid only
198 * with CMD_ASYNC. 197 * with CMD_ASYNC.
@@ -202,7 +201,6 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
202 * (i.e. mark it as non-idle). 201 * (i.e. mark it as non-idle).
203 */ 202 */
204enum CMD_MODE { 203enum CMD_MODE {
205 CMD_SYNC = 0,
206 CMD_ASYNC = BIT(0), 204 CMD_ASYNC = BIT(0),
207 CMD_WANT_SKB = BIT(1), 205 CMD_WANT_SKB = BIT(1),
208 CMD_SEND_IN_RFKILL = BIT(2), 206 CMD_SEND_IN_RFKILL = BIT(2),
@@ -427,7 +425,7 @@ struct iwl_trans;
427 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. 425 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
428 * If RFkill is asserted in the middle of a SYNC host command, it must 426 * If RFkill is asserted in the middle of a SYNC host command, it must
429 * return -ERFKILL straight away. 427 * return -ERFKILL straight away.
430 * May sleep only if CMD_SYNC is set 428 * May sleep only if CMD_ASYNC is not set
431 * @tx: send an skb 429 * @tx: send an skb
432 * Must be atomic 430 * Must be atomic
433 * @reclaim: free packet until ssn. Returns a list of freed packets. 431 * @reclaim: free packet until ssn. Returns a list of freed packets.
@@ -437,8 +435,7 @@ struct iwl_trans;
437 * this one. The op_mode must not configure the HCMD queue. May sleep. 435 * this one. The op_mode must not configure the HCMD queue. May sleep.
438 * @txq_disable: de-configure a Tx queue to send AMPDUs 436 * @txq_disable: de-configure a Tx queue to send AMPDUs
439 * Must be atomic 437 * Must be atomic
440 * @wait_tx_queue_empty: wait until all tx queues are empty 438 * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
441 * May sleep
442 * @dbgfs_register: add the dbgfs files under this directory. Files will be 439 * @dbgfs_register: add the dbgfs files under this directory. Files will be
443 * automatically deleted. 440 * automatically deleted.
444 * @write8: write a u8 to a register at offset ofs from the BAR 441 * @write8: write a u8 to a register at offset ofs from the BAR
@@ -464,6 +461,11 @@ struct iwl_trans;
464 * @unref: release a reference previously taken with @ref. Note that 461 * @unref: release a reference previously taken with @ref. Note that
465 * initially the reference count is 1, making an initial @unref 462 * initially the reference count is 1, making an initial @unref
466 * necessary to allow low power states. 463 * necessary to allow low power states.
464 * @dump_data: fill a data dump with debug data, maybe containing last
465 * TX'ed commands and similar. When called with a NULL buffer and
466 * zero buffer length, provide only the (estimated) required buffer
467 * length. Return the used buffer length.
468 * Note that the transport must fill in the proper file headers.
467 */ 469 */
468struct iwl_trans_ops { 470struct iwl_trans_ops {
469 471
@@ -471,6 +473,8 @@ struct iwl_trans_ops {
471 void (*op_mode_leave)(struct iwl_trans *iwl_trans); 473 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
472 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, 474 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
473 bool run_in_rfkill); 475 bool run_in_rfkill);
476 int (*update_sf)(struct iwl_trans *trans,
477 struct iwl_sf_region *st_fwrd_space);
474 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); 478 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
475 void (*stop_device)(struct iwl_trans *trans); 479 void (*stop_device)(struct iwl_trans *trans);
476 480
@@ -490,7 +494,7 @@ struct iwl_trans_ops {
490 void (*txq_disable)(struct iwl_trans *trans, int queue); 494 void (*txq_disable)(struct iwl_trans *trans, int queue);
491 495
492 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); 496 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
493 int (*wait_tx_queue_empty)(struct iwl_trans *trans); 497 int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
494 498
495 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); 499 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
496 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); 500 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
@@ -512,6 +516,10 @@ struct iwl_trans_ops {
512 u32 value); 516 u32 value);
513 void (*ref)(struct iwl_trans *trans); 517 void (*ref)(struct iwl_trans *trans);
514 void (*unref)(struct iwl_trans *trans); 518 void (*unref)(struct iwl_trans *trans);
519
520#ifdef CONFIG_IWLWIFI_DEBUGFS
521 u32 (*dump_data)(struct iwl_trans *trans, void *buf, u32 buflen);
522#endif
515}; 523};
516 524
517/** 525/**
@@ -630,6 +638,17 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
630 return trans->ops->start_fw(trans, fw, run_in_rfkill); 638 return trans->ops->start_fw(trans, fw, run_in_rfkill);
631} 639}
632 640
641static inline int iwl_trans_update_sf(struct iwl_trans *trans,
642 struct iwl_sf_region *st_fwrd_space)
643{
644 might_sleep();
645
646 if (trans->ops->update_sf)
647 return trans->ops->update_sf(trans, st_fwrd_space);
648
649 return 0;
650}
651
633static inline void iwl_trans_stop_device(struct iwl_trans *trans) 652static inline void iwl_trans_stop_device(struct iwl_trans *trans)
634{ 653{
635 might_sleep(); 654 might_sleep();
@@ -665,6 +684,16 @@ static inline void iwl_trans_unref(struct iwl_trans *trans)
665 trans->ops->unref(trans); 684 trans->ops->unref(trans);
666} 685}
667 686
687#ifdef CONFIG_IWLWIFI_DEBUGFS
688static inline u32 iwl_trans_dump_data(struct iwl_trans *trans,
689 void *buf, u32 buflen)
690{
691 if (!trans->ops->dump_data)
692 return 0;
693 return trans->ops->dump_data(trans, buf, buflen);
694}
695#endif
696
668static inline int iwl_trans_send_cmd(struct iwl_trans *trans, 697static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
669 struct iwl_host_cmd *cmd) 698 struct iwl_host_cmd *cmd)
670{ 699{
@@ -678,7 +707,7 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
678 return -EIO; 707 return -EIO;
679 708
680 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) { 709 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
681 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state); 710 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
682 return -EIO; 711 return -EIO;
683 } 712 }
684 713
@@ -720,7 +749,7 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
720 return -EIO; 749 return -EIO;
721 750
722 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) 751 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
723 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state); 752 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
724 753
725 return trans->ops->tx(trans, skb, dev_cmd, queue); 754 return trans->ops->tx(trans, skb, dev_cmd, queue);
726} 755}
@@ -729,7 +758,7 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
729 int ssn, struct sk_buff_head *skbs) 758 int ssn, struct sk_buff_head *skbs)
730{ 759{
731 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) 760 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
732 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state); 761 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
733 762
734 trans->ops->reclaim(trans, queue, ssn, skbs); 763 trans->ops->reclaim(trans, queue, ssn, skbs);
735} 764}
@@ -746,7 +775,7 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
746 might_sleep(); 775 might_sleep();
747 776
748 if (unlikely((trans->state != IWL_TRANS_FW_ALIVE))) 777 if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
749 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state); 778 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
750 779
751 trans->ops->txq_enable(trans, queue, fifo, sta_id, tid, 780 trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
752 frame_limit, ssn); 781 frame_limit, ssn);
@@ -759,12 +788,13 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
759 IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0); 788 IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
760} 789}
761 790
762static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans) 791static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
792 u32 txq_bm)
763{ 793{
764 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) 794 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
765 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state); 795 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
766 796
767 return trans->ops->wait_tx_queue_empty(trans); 797 return trans->ops->wait_tx_queue_empty(trans, txq_bm);
768} 798}
769 799
770static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans, 800static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index ccdd3b7c4cce..c30d7f64ec1e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -3,8 +3,9 @@ iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o 3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
4iwlmvm-y += scan.o time-event.o rs.o 4iwlmvm-y += scan.o time-event.o rs.o
5iwlmvm-y += power.o coex.o 5iwlmvm-y += power.o coex.o
6iwlmvm-y += led.o tt.o offloading.o 6iwlmvm-y += tt.o offloading.o
7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o 7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
8iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
8iwlmvm-$(CONFIG_PM_SLEEP) += d3.o 9iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
9 10
10ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ 11ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 0489314425cb..c8c3b38228f0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -104,12 +104,9 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
104#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65) 104#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
105#define BT_ANTENNA_COUPLING_THRESHOLD (30) 105#define BT_ANTENNA_COUPLING_THRESHOLD (30)
106 106
107int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm) 107static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
108{ 108{
109 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX)) 109 return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
110 return 0;
111
112 return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
113 sizeof(struct iwl_bt_coex_prio_tbl_cmd), 110 sizeof(struct iwl_bt_coex_prio_tbl_cmd),
114 &iwl_bt_prio_tbl); 111 &iwl_bt_prio_tbl);
115} 112}
@@ -127,10 +124,10 @@ const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
127}; 124};
128 125
129static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = { 126static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
130 cpu_to_le32(0xf0f0f0f0), 127 cpu_to_le32(0xf0f0f0f0), /* 50% */
131 cpu_to_le32(0xc0c0c0c0), 128 cpu_to_le32(0xc0c0c0c0), /* 25% */
132 cpu_to_le32(0xfcfcfcfc), 129 cpu_to_le32(0xfcfcfcfc), /* 75% */
133 cpu_to_le32(0xff00ff00), 130 cpu_to_le32(0xfefefefe), /* 87.5% */
134}; 131};
135 132
136static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = { 133static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
@@ -303,8 +300,8 @@ static const __le64 iwl_ci_mask[][3] = {
303}; 300};
304 301
305static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = { 302static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
306 cpu_to_le32(0x22002200), 303 cpu_to_le32(0x28412201),
307 cpu_to_le32(0x33113311), 304 cpu_to_le32(0x11118451),
308}; 305};
309 306
310struct corunning_block_luts { 307struct corunning_block_luts {
@@ -568,13 +565,13 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
568 .id = BT_CONFIG, 565 .id = BT_CONFIG,
569 .len = { sizeof(*bt_cmd), }, 566 .len = { sizeof(*bt_cmd), },
570 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 567 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
571 .flags = CMD_SYNC,
572 }; 568 };
573 int ret; 569 int ret;
574 u32 flags; 570 u32 flags;
575 571
576 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX)) 572 ret = iwl_send_bt_prio_tbl(mvm);
577 return 0; 573 if (ret)
574 return ret;
578 575
579 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); 576 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
580 if (!bt_cmd) 577 if (!bt_cmd)
@@ -582,10 +579,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
582 cmd.data[0] = bt_cmd; 579 cmd.data[0] = bt_cmd;
583 580
584 bt_cmd->max_kill = 5; 581 bt_cmd->max_kill = 5;
585 bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD, 582 bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
586 bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling, 583 bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
587 bt_cmd->bt4_tx_tx_delta_freq_thr = 15, 584 bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
588 bt_cmd->bt4_tx_rx_max_freq0 = 15, 585 bt_cmd->bt4_tx_rx_max_freq0 = 15;
586 bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
587 bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
589 588
590 flags = iwlwifi_mod_params.bt_coex_active ? 589 flags = iwlwifi_mod_params.bt_coex_active ?
591 BT_COEX_NW : BT_COEX_DISABLE; 590 BT_COEX_NW : BT_COEX_DISABLE;
@@ -663,7 +662,6 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
663 .data[0] = &bt_cmd, 662 .data[0] = &bt_cmd,
664 .len = { sizeof(*bt_cmd), }, 663 .len = { sizeof(*bt_cmd), },
665 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 664 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
666 .flags = CMD_SYNC,
667 }; 665 };
668 int ret = 0; 666 int ret = 0;
669 667
@@ -717,7 +715,8 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
717 return ret; 715 return ret;
718} 716}
719 717
720int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable) 718static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
719 bool enable)
721{ 720{
722 struct iwl_bt_coex_cmd *bt_cmd; 721 struct iwl_bt_coex_cmd *bt_cmd;
723 /* Send ASYNC since this can be sent from an atomic context */ 722 /* Send ASYNC since this can be sent from an atomic context */
@@ -735,8 +734,7 @@ int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
735 return 0; 734 return 0;
736 735
737 /* nothing to do */ 736 /* nothing to do */
738 if (mvmsta->bt_reduced_txpower_dbg || 737 if (mvmsta->bt_reduced_txpower == enable)
739 mvmsta->bt_reduced_txpower == enable)
740 return 0; 738 return 0;
741 739
742 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC); 740 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
@@ -803,23 +801,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
803 801
804 switch (vif->type) { 802 switch (vif->type) {
805 case NL80211_IFTYPE_STATION: 803 case NL80211_IFTYPE_STATION:
804 /* Count BSSes vifs */
805 data->num_bss_ifaces++;
806 /* default smps_mode for BSS / P2P client is AUTOMATIC */ 806 /* default smps_mode for BSS / P2P client is AUTOMATIC */
807 smps_mode = IEEE80211_SMPS_AUTOMATIC; 807 smps_mode = IEEE80211_SMPS_AUTOMATIC;
808 data->num_bss_ifaces++;
809
810 /*
811 * Count unassoc BSSes, relax SMSP constraints
812 * and disable reduced Tx Power
813 */
814 if (!vif->bss_conf.assoc) {
815 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
816 smps_mode);
817 if (iwl_mvm_bt_coex_reduced_txp(mvm,
818 mvmvif->ap_sta_id,
819 false))
820 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
821 return;
822 }
823 break; 808 break;
824 case NL80211_IFTYPE_AP: 809 case NL80211_IFTYPE_AP:
825 /* default smps_mode for AP / GO is OFF */ 810 /* default smps_mode for AP / GO is OFF */
@@ -845,8 +830,12 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
845 /* ... relax constraints and disable rssi events */ 830 /* ... relax constraints and disable rssi events */
846 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, 831 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
847 smps_mode); 832 smps_mode);
848 if (vif->type == NL80211_IFTYPE_STATION) 833 data->reduced_tx_power = false;
834 if (vif->type == NL80211_IFTYPE_STATION) {
835 iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
836 false);
849 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); 837 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
838 }
850 return; 839 return;
851 } 840 }
852 841
@@ -857,6 +846,11 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
857 smps_mode = vif->type == NL80211_IFTYPE_AP ? 846 smps_mode = vif->type == NL80211_IFTYPE_AP ?
858 IEEE80211_SMPS_OFF : 847 IEEE80211_SMPS_OFF :
859 IEEE80211_SMPS_DYNAMIC; 848 IEEE80211_SMPS_DYNAMIC;
849
850 /* relax SMPS contraints for next association */
851 if (!vif->bss_conf.assoc)
852 smps_mode = IEEE80211_SMPS_AUTOMATIC;
853
860 IWL_DEBUG_COEX(data->mvm, 854 IWL_DEBUG_COEX(data->mvm,
861 "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n", 855 "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
862 mvmvif->id, data->notif->bt_status, bt_activity_grading, 856 mvmvif->id, data->notif->bt_status, bt_activity_grading,
@@ -903,22 +897,18 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
903 /* if secondary is not NULL, it might be a GO */ 897 /* if secondary is not NULL, it might be a GO */
904 data->secondary = chanctx_conf; 898 data->secondary = chanctx_conf;
905 899
906 /* don't reduce the Tx power if in loose scheme */ 900 /*
901 * don't reduce the Tx power if one of these is true:
902 * we are in LOOSE
903 * single share antenna product
904 * BT is active
905 * we are associated
906 */
907 if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || 907 if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
908 mvm->cfg->bt_shared_single_ant) { 908 mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
909 data->reduced_tx_power = false; 909 !data->notif->bt_status) {
910 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
911 return;
912 }
913
914 /* reduced Txpower only if BT is on, so ...*/
915 if (!data->notif->bt_status) {
916 /* ... cancel reduced Tx power ... */
917 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
918 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
919 data->reduced_tx_power = false; 910 data->reduced_tx_power = false;
920 911 iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
921 /* ... and there is no need to get reports on RSSI any more. */
922 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); 912 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
923 return; 913 return;
924 } 914 }
@@ -1022,9 +1012,9 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
1022 1012
1023 /* Don't spam the fw with the same command over and over */ 1013 /* Don't spam the fw with the same command over and over */
1024 if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) { 1014 if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
1025 if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, CMD_SYNC, 1015 if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
1026 sizeof(cmd), &cmd)) 1016 sizeof(cmd), &cmd))
1027 IWL_ERR(mvm, "Failed to send BT_CI cmd"); 1017 IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
1028 memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd)); 1018 memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
1029 } 1019 }
1030 1020
@@ -1039,7 +1029,6 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
1039 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); 1029 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
1040} 1030}
1041 1031
1042/* upon association, the fw will send in BT Coex notification */
1043int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, 1032int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
1044 struct iwl_rx_cmd_buffer *rxb, 1033 struct iwl_rx_cmd_buffer *rxb,
1045 struct iwl_device_cmd *dev_cmd) 1034 struct iwl_device_cmd *dev_cmd)
@@ -1215,6 +1204,17 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
1215 return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT; 1204 return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
1216} 1205}
1217 1206
1207bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
1208 enum ieee80211_band band)
1209{
1210 u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
1211
1212 if (band != IEEE80211_BAND_2GHZ)
1213 return false;
1214
1215 return bt_activity >= BT_LOW_TRAFFIC;
1216}
1217
1218u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, 1218u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
1219 struct ieee80211_tx_info *info, u8 ac) 1219 struct ieee80211_tx_info *info, u8 ac)
1220{ 1220{
@@ -1249,9 +1249,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
1249 1249
1250void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) 1250void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
1251{ 1251{
1252 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
1253 return;
1254
1255 iwl_mvm_bt_coex_notif_handle(mvm); 1252 iwl_mvm_bt_coex_notif_handle(mvm);
1256} 1253}
1257 1254
@@ -1270,7 +1267,6 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
1270 .id = BT_CONFIG, 1267 .id = BT_CONFIG,
1271 .len = { sizeof(*bt_cmd), }, 1268 .len = { sizeof(*bt_cmd), },
1272 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 1269 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1273 .flags = CMD_SYNC,
1274 }; 1270 };
1275 1271
1276 if (!IWL_MVM_BT_COEX_CORUNNING) 1272 if (!IWL_MVM_BT_COEX_CORUNNING)
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index e56f5a0edf85..645b3cfc29a5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -193,8 +193,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
193 wkc.wep_key.key_offset = data->wep_key_idx; 193 wkc.wep_key.key_offset = data->wep_key_idx;
194 } 194 }
195 195
196 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC, 196 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc);
197 sizeof(wkc), &wkc);
198 data->error = ret != 0; 197 data->error = ret != 0;
199 198
200 mvm->ptk_ivlen = key->iv_len; 199 mvm->ptk_ivlen = key->iv_len;
@@ -341,7 +340,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
341 struct iwl_host_cmd cmd = { 340 struct iwl_host_cmd cmd = {
342 .id = WOWLAN_PATTERNS, 341 .id = WOWLAN_PATTERNS,
343 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 342 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
344 .flags = CMD_SYNC,
345 }; 343 };
346 int i, err; 344 int i, err;
347 345
@@ -518,7 +516,6 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
518 .id = REMOTE_WAKE_CONFIG_CMD, 516 .id = REMOTE_WAKE_CONFIG_CMD,
519 .len = { sizeof(*cfg), }, 517 .len = { sizeof(*cfg), },
520 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 518 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
521 .flags = CMD_SYNC,
522 }; 519 };
523 int ret; 520 int ret;
524 521
@@ -666,10 +663,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
666 663
667 if (WARN_ON(!vif->bss_conf.assoc)) 664 if (WARN_ON(!vif->bss_conf.assoc))
668 return -EINVAL; 665 return -EINVAL;
669 /* hack */ 666
670 vif->bss_conf.assoc = false;
671 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 667 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
672 vif->bss_conf.assoc = true;
673 if (ret) 668 if (ret)
674 return ret; 669 return ret;
675 670
@@ -705,7 +700,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
705 return ret; 700 return ret;
706 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); 701 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
707 702
708 ret = iwl_mvm_mac_ctxt_changed(mvm, vif); 703 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
709 if (ret) 704 if (ret)
710 return ret; 705 return ret;
711 706
@@ -719,7 +714,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
719 for (i = 1; i < MAX_BINDINGS; i++) 714 for (i = 1; i < MAX_BINDINGS; i++)
720 quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID); 715 quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
721 716
722 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC, 717 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
723 sizeof(quota_cmd), &quota_cmd); 718 sizeof(quota_cmd), &quota_cmd);
724 if (ret) 719 if (ret)
725 IWL_ERR(mvm, "Failed to send quota: %d\n", ret); 720 IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
@@ -739,15 +734,13 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
739 }; 734 };
740 struct iwl_host_cmd cmd = { 735 struct iwl_host_cmd cmd = {
741 .id = NON_QOS_TX_COUNTER_CMD, 736 .id = NON_QOS_TX_COUNTER_CMD,
742 .flags = CMD_SYNC | CMD_WANT_SKB, 737 .flags = CMD_WANT_SKB,
743 }; 738 };
744 int err; 739 int err;
745 u32 size; 740 u32 size;
746 741
747 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) { 742 cmd.data[0] = &query_cmd;
748 cmd.data[0] = &query_cmd; 743 cmd.len[0] = sizeof(query_cmd);
749 cmd.len[0] = sizeof(query_cmd);
750 }
751 744
752 err = iwl_mvm_send_cmd(mvm, &cmd); 745 err = iwl_mvm_send_cmd(mvm, &cmd);
753 if (err) 746 if (err)
@@ -758,10 +751,8 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
758 err = -EINVAL; 751 err = -EINVAL;
759 } else { 752 } else {
760 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data); 753 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
761 /* new API returns next, not last-used seqno */ 754 /* firmware returns next, not last-used seqno */
762 if (mvm->fw->ucode_capa.flags & 755 err = (u16) (err - 0x10);
763 IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
764 err = (u16) (err - 0x10);
765 } 756 }
766 757
767 iwl_free_resp(&cmd); 758 iwl_free_resp(&cmd);
@@ -785,11 +776,7 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
785 776
786 mvmvif->seqno_valid = false; 777 mvmvif->seqno_valid = false;
787 778
788 if (!(mvm->fw->ucode_capa.flags & 779 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
789 IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
790 return;
791
792 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
793 sizeof(query_cmd), &query_cmd)) 780 sizeof(query_cmd), &query_cmd))
794 IWL_ERR(mvm, "failed to set non-QoS seqno\n"); 781 IWL_ERR(mvm, "failed to set non-QoS seqno\n");
795} 782}
@@ -804,7 +791,7 @@ iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm,
804 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID) 791 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID)
805 cmd_len = sizeof(*cmd); 792 cmd_len = sizeof(*cmd);
806 793
807 return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, CMD_SYNC, 794 return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
808 cmd_len, cmd); 795 cmd_len, cmd);
809} 796}
810 797
@@ -833,7 +820,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
833 }; 820 };
834 struct iwl_host_cmd d3_cfg_cmd = { 821 struct iwl_host_cmd d3_cfg_cmd = {
835 .id = D3_CONFIG_CMD, 822 .id = D3_CONFIG_CMD,
836 .flags = CMD_SYNC | CMD_WANT_SKB, 823 .flags = CMD_WANT_SKB,
837 .data[0] = &d3_cfg_cmd_data, 824 .data[0] = &d3_cfg_cmd_data,
838 .len[0] = sizeof(d3_cfg_cmd_data), 825 .len[0] = sizeof(d3_cfg_cmd_data),
839 }; 826 };
@@ -983,7 +970,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
983 if (key_data.use_rsc_tsc) { 970 if (key_data.use_rsc_tsc) {
984 struct iwl_host_cmd rsc_tsc_cmd = { 971 struct iwl_host_cmd rsc_tsc_cmd = {
985 .id = WOWLAN_TSC_RSC_PARAM, 972 .id = WOWLAN_TSC_RSC_PARAM,
986 .flags = CMD_SYNC,
987 .data[0] = key_data.rsc_tsc, 973 .data[0] = key_data.rsc_tsc,
988 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 974 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
989 .len[0] = sizeof(*key_data.rsc_tsc), 975 .len[0] = sizeof(*key_data.rsc_tsc),
@@ -997,7 +983,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
997 if (key_data.use_tkip) { 983 if (key_data.use_tkip) {
998 ret = iwl_mvm_send_cmd_pdu(mvm, 984 ret = iwl_mvm_send_cmd_pdu(mvm,
999 WOWLAN_TKIP_PARAM, 985 WOWLAN_TKIP_PARAM,
1000 CMD_SYNC, sizeof(tkip_cmd), 986 0, sizeof(tkip_cmd),
1001 &tkip_cmd); 987 &tkip_cmd);
1002 if (ret) 988 if (ret)
1003 goto out; 989 goto out;
@@ -1014,8 +1000,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1014 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; 1000 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
1015 1001
1016 ret = iwl_mvm_send_cmd_pdu(mvm, 1002 ret = iwl_mvm_send_cmd_pdu(mvm,
1017 WOWLAN_KEK_KCK_MATERIAL, 1003 WOWLAN_KEK_KCK_MATERIAL, 0,
1018 CMD_SYNC,
1019 sizeof(kek_kck_cmd), 1004 sizeof(kek_kck_cmd),
1020 &kek_kck_cmd); 1005 &kek_kck_cmd);
1021 if (ret) 1006 if (ret)
@@ -1031,7 +1016,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1031 if (ret) 1016 if (ret)
1032 goto out; 1017 goto out;
1033 1018
1034 ret = iwl_mvm_send_proto_offload(mvm, vif, false, CMD_SYNC); 1019 ret = iwl_mvm_send_proto_offload(mvm, vif, false, 0);
1035 if (ret) 1020 if (ret)
1036 goto out; 1021 goto out;
1037 1022
@@ -1043,7 +1028,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1043 if (ret) 1028 if (ret)
1044 goto out; 1029 goto out;
1045 1030
1046 ret = iwl_mvm_power_update_mac(mvm, vif); 1031 ret = iwl_mvm_power_update_mac(mvm);
1047 if (ret) 1032 if (ret)
1048 goto out; 1033 goto out;
1049 1034
@@ -1082,6 +1067,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1082 1067
1083int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 1068int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1084{ 1069{
1070 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1071
1072 if (iwl_mvm_is_d0i3_supported(mvm)) {
1073 mutex_lock(&mvm->d0i3_suspend_mutex);
1074 __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
1075 mutex_unlock(&mvm->d0i3_suspend_mutex);
1076 return 0;
1077 }
1078
1085 return __iwl_mvm_suspend(hw, wowlan, false); 1079 return __iwl_mvm_suspend(hw, wowlan, false);
1086} 1080}
1087 1081
@@ -1277,7 +1271,7 @@ static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
1277} 1271}
1278 1272
1279static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key, 1273static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
1280 struct iwl_wowlan_status_v6 *status) 1274 struct iwl_wowlan_status *status)
1281{ 1275{
1282 union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc; 1276 union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
1283 1277
@@ -1294,7 +1288,7 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
1294} 1288}
1295 1289
1296struct iwl_mvm_d3_gtk_iter_data { 1290struct iwl_mvm_d3_gtk_iter_data {
1297 struct iwl_wowlan_status_v6 *status; 1291 struct iwl_wowlan_status *status;
1298 void *last_gtk; 1292 void *last_gtk;
1299 u32 cipher; 1293 u32 cipher;
1300 bool find_phase, unhandled_cipher; 1294 bool find_phase, unhandled_cipher;
@@ -1370,7 +1364,7 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
1370 1364
1371static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, 1365static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
1372 struct ieee80211_vif *vif, 1366 struct ieee80211_vif *vif,
1373 struct iwl_wowlan_status_v6 *status) 1367 struct iwl_wowlan_status *status)
1374{ 1368{
1375 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1369 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1376 struct iwl_mvm_d3_gtk_iter_data gtkdata = { 1370 struct iwl_mvm_d3_gtk_iter_data gtkdata = {
@@ -1465,10 +1459,10 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1465 } err_info; 1459 } err_info;
1466 struct iwl_host_cmd cmd = { 1460 struct iwl_host_cmd cmd = {
1467 .id = WOWLAN_GET_STATUSES, 1461 .id = WOWLAN_GET_STATUSES,
1468 .flags = CMD_SYNC | CMD_WANT_SKB, 1462 .flags = CMD_WANT_SKB,
1469 }; 1463 };
1470 struct iwl_wowlan_status_data status; 1464 struct iwl_wowlan_status_data status;
1471 struct iwl_wowlan_status_v6 *status_v6; 1465 struct iwl_wowlan_status *fw_status;
1472 int ret, len, status_size, i; 1466 int ret, len, status_size, i;
1473 bool keep; 1467 bool keep;
1474 struct ieee80211_sta *ap_sta; 1468 struct ieee80211_sta *ap_sta;
@@ -1491,7 +1485,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1491 } 1485 }
1492 1486
1493 /* only for tracing for now */ 1487 /* only for tracing for now */
1494 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL); 1488 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
1495 if (ret) 1489 if (ret)
1496 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); 1490 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
1497 1491
@@ -1505,10 +1499,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1505 if (!cmd.resp_pkt) 1499 if (!cmd.resp_pkt)
1506 goto out_unlock; 1500 goto out_unlock;
1507 1501
1508 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) 1502 status_size = sizeof(*fw_status);
1509 status_size = sizeof(struct iwl_wowlan_status_v6);
1510 else
1511 status_size = sizeof(struct iwl_wowlan_status_v4);
1512 1503
1513 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 1504 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1514 if (len < status_size) { 1505 if (len < status_size) {
@@ -1516,35 +1507,18 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1516 goto out_free_resp; 1507 goto out_free_resp;
1517 } 1508 }
1518 1509
1519 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) { 1510 fw_status = (void *)cmd.resp_pkt->data;
1520 status_v6 = (void *)cmd.resp_pkt->data; 1511
1521 1512 status.pattern_number = le16_to_cpu(fw_status->pattern_number);
1522 status.pattern_number = le16_to_cpu(status_v6->pattern_number); 1513 for (i = 0; i < 8; i++)
1523 for (i = 0; i < 8; i++) 1514 status.qos_seq_ctr[i] =
1524 status.qos_seq_ctr[i] = 1515 le16_to_cpu(fw_status->qos_seq_ctr[i]);
1525 le16_to_cpu(status_v6->qos_seq_ctr[i]); 1516 status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
1526 status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons); 1517 status.wake_packet_length =
1527 status.wake_packet_length = 1518 le32_to_cpu(fw_status->wake_packet_length);
1528 le32_to_cpu(status_v6->wake_packet_length); 1519 status.wake_packet_bufsize =
1529 status.wake_packet_bufsize = 1520 le32_to_cpu(fw_status->wake_packet_bufsize);
1530 le32_to_cpu(status_v6->wake_packet_bufsize); 1521 status.wake_packet = fw_status->wake_packet;
1531 status.wake_packet = status_v6->wake_packet;
1532 } else {
1533 struct iwl_wowlan_status_v4 *status_v4;
1534 status_v6 = NULL;
1535 status_v4 = (void *)cmd.resp_pkt->data;
1536
1537 status.pattern_number = le16_to_cpu(status_v4->pattern_number);
1538 for (i = 0; i < 8; i++)
1539 status.qos_seq_ctr[i] =
1540 le16_to_cpu(status_v4->qos_seq_ctr[i]);
1541 status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
1542 status.wake_packet_length =
1543 le32_to_cpu(status_v4->wake_packet_length);
1544 status.wake_packet_bufsize =
1545 le32_to_cpu(status_v4->wake_packet_bufsize);
1546 status.wake_packet = status_v4->wake_packet;
1547 }
1548 1522
1549 if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) { 1523 if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
1550 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1524 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
@@ -1571,7 +1545,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1571 1545
1572 iwl_mvm_report_wakeup_reasons(mvm, vif, &status); 1546 iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
1573 1547
1574 keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6); 1548 keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
1575 1549
1576 iwl_free_resp(&cmd); 1550 iwl_free_resp(&cmd);
1577 return keep; 1551 return keep;
@@ -1674,6 +1648,19 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
1674{ 1648{
1675 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1649 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1676 1650
1651 if (iwl_mvm_is_d0i3_supported(mvm)) {
1652 bool exit_now;
1653
1654 mutex_lock(&mvm->d0i3_suspend_mutex);
1655 __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
1656 exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
1657 &mvm->d0i3_suspend_flags);
1658 mutex_unlock(&mvm->d0i3_suspend_mutex);
1659 if (exit_now)
1660 _iwl_mvm_exit_d0i3(mvm);
1661 return 0;
1662 }
1663
1677 return __iwl_mvm_resume(mvm, false); 1664 return __iwl_mvm_resume(mvm, false);
1678} 1665}
1679 1666
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 9b59e1d7ae71..2e90ff795c13 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -103,10 +103,6 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
103 IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val); 103 IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
104 dbgfs_pm->tx_data_timeout = val; 104 dbgfs_pm->tx_data_timeout = val;
105 break; 105 break;
106 case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
107 IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
108 dbgfs_pm->disable_power_off = val;
109 break;
110 case MVM_DEBUGFS_PM_LPRX_ENA: 106 case MVM_DEBUGFS_PM_LPRX_ENA:
111 IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled"); 107 IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
112 dbgfs_pm->lprx_ena = val; 108 dbgfs_pm->lprx_ena = val;
@@ -154,12 +150,6 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
154 if (sscanf(buf + 16, "%d", &val) != 1) 150 if (sscanf(buf + 16, "%d", &val) != 1)
155 return -EINVAL; 151 return -EINVAL;
156 param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT; 152 param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
157 } else if (!strncmp("disable_power_off=", buf, 18) &&
158 !(mvm->fw->ucode_capa.flags &
159 IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
160 if (sscanf(buf + 18, "%d", &val) != 1)
161 return -EINVAL;
162 param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
163 } else if (!strncmp("lprx=", buf, 5)) { 153 } else if (!strncmp("lprx=", buf, 5)) {
164 if (sscanf(buf + 5, "%d", &val) != 1) 154 if (sscanf(buf + 5, "%d", &val) != 1)
165 return -EINVAL; 155 return -EINVAL;
@@ -185,7 +175,7 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
185 175
186 mutex_lock(&mvm->mutex); 176 mutex_lock(&mvm->mutex);
187 iwl_dbgfs_update_pm(mvm, vif, param, val); 177 iwl_dbgfs_update_pm(mvm, vif, param, val);
188 ret = iwl_mvm_power_update_mac(mvm, vif); 178 ret = iwl_mvm_power_update_mac(mvm);
189 mutex_unlock(&mvm->mutex); 179 mutex_unlock(&mvm->mutex);
190 180
191 return ret ?: count; 181 return ret ?: count;
@@ -272,10 +262,9 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
272 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 262 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
273 263
274 pos += scnprintf(buf+pos, bufsz-pos, 264 pos += scnprintf(buf+pos, bufsz-pos,
275 "ap_sta_id %d - reduced Tx power %d force %d\n", 265 "ap_sta_id %d - reduced Tx power %d\n",
276 ap_sta_id, 266 ap_sta_id,
277 mvm_sta->bt_reduced_txpower, 267 mvm_sta->bt_reduced_txpower);
278 mvm_sta->bt_reduced_txpower_dbg);
279 } 268 }
280 } 269 }
281 270
@@ -293,41 +282,6 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
293 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 282 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
294} 283}
295 284
296static ssize_t iwl_dbgfs_reduced_txp_write(struct ieee80211_vif *vif,
297 char *buf, size_t count,
298 loff_t *ppos)
299{
300 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
301 struct iwl_mvm *mvm = mvmvif->mvm;
302 struct iwl_mvm_sta *mvmsta;
303 bool reduced_tx_power;
304 int ret;
305
306 if (mvmvif->ap_sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
307 return -ENOTCONN;
308
309 if (strtobool(buf, &reduced_tx_power) != 0)
310 return -EINVAL;
311
312 mutex_lock(&mvm->mutex);
313
314 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
315 if (IS_ERR_OR_NULL(mvmsta)) {
316 mutex_unlock(&mvm->mutex);
317 return -ENOTCONN;
318 }
319
320 mvmsta->bt_reduced_txpower_dbg = false;
321 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
322 reduced_tx_power);
323 if (!ret)
324 mvmsta->bt_reduced_txpower_dbg = true;
325
326 mutex_unlock(&mvm->mutex);
327
328 return ret ? : count;
329}
330
331static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif, 285static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
332 enum iwl_dbgfs_bf_mask param, int value) 286 enum iwl_dbgfs_bf_mask param, int value)
333{ 287{
@@ -462,9 +416,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
462 mutex_lock(&mvm->mutex); 416 mutex_lock(&mvm->mutex);
463 iwl_dbgfs_update_bf(vif, param, value); 417 iwl_dbgfs_update_bf(vif, param, value);
464 if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) 418 if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
465 ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC); 419 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
466 else 420 else
467 ret = iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC); 421 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
468 mutex_unlock(&mvm->mutex); 422 mutex_unlock(&mvm->mutex);
469 423
470 return ret ?: count; 424 return ret ?: count;
@@ -568,7 +522,6 @@ MVM_DEBUGFS_READ_FILE_OPS(mac_params);
568MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); 522MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
569MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); 523MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
570MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); 524MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
571MVM_DEBUGFS_WRITE_FILE_OPS(reduced_txp, 10);
572 525
573void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 526void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
574{ 527{
@@ -592,8 +545,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
592 return; 545 return;
593 } 546 }
594 547
595 if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) && 548 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
596 iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
597 ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) || 549 ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
598 (vif->type == NL80211_IFTYPE_STATION && vif->p2p && 550 (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
599 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))) 551 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
@@ -601,7 +553,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
601 S_IRUSR); 553 S_IRUSR);
602 554
603 MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR); 555 MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
604 MVM_DEBUGFS_ADD_FILE_VIF(reduced_txp, mvmvif->dbgfs_dir, S_IWUSR);
605 MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 556 MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
606 S_IRUSR | S_IWUSR); 557 S_IRUSR | S_IWUSR);
607 558
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 1b52deea6081..29ca72695eaa 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -65,9 +65,8 @@
65#include "mvm.h" 65#include "mvm.h"
66#include "sta.h" 66#include "sta.h"
67#include "iwl-io.h" 67#include "iwl-io.h"
68#include "iwl-prph.h"
69#include "debugfs.h" 68#include "debugfs.h"
70#include "fw-error-dump.h" 69#include "iwl-fw-error-dump.h"
71 70
72static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, 71static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
73 size_t count, loff_t *ppos) 72 size_t count, loff_t *ppos)
@@ -136,9 +135,6 @@ static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
136 135
137 file->private_data = mvm->fw_error_dump; 136 file->private_data = mvm->fw_error_dump;
138 mvm->fw_error_dump = NULL; 137 mvm->fw_error_dump = NULL;
139 kfree(mvm->fw_error_sram);
140 mvm->fw_error_sram = NULL;
141 mvm->fw_error_sram_len = 0;
142 ret = 0; 138 ret = 0;
143 139
144out: 140out:
@@ -684,7 +680,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
684 mvm->restart_fw++; 680 mvm->restart_fw++;
685 681
686 /* take the return value to make compiler happy - it will fail anyway */ 682 /* take the return value to make compiler happy - it will fail anyway */
687 ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL); 683 ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
688 684
689 mutex_unlock(&mvm->mutex); 685 mutex_unlock(&mvm->mutex);
690 686
@@ -694,7 +690,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
694static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf, 690static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
695 size_t count, loff_t *ppos) 691 size_t count, loff_t *ppos)
696{ 692{
697 iwl_write_prph(mvm->trans, DEVICE_SET_NMI_REG, 1); 693 iwl_force_nmi(mvm->trans);
698 694
699 return count; 695 return count;
700} 696}
@@ -841,7 +837,7 @@ static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
841 /* send updated bcast filtering configuration */ 837 /* send updated bcast filtering configuration */
842 if (mvm->dbgfs_bcast_filtering.override && 838 if (mvm->dbgfs_bcast_filtering.override &&
843 iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) 839 iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
844 err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC, 840 err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
845 sizeof(cmd), &cmd); 841 sizeof(cmd), &cmd);
846 mutex_unlock(&mvm->mutex); 842 mutex_unlock(&mvm->mutex);
847 843
@@ -913,7 +909,7 @@ static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
913 /* send updated bcast filtering configuration */ 909 /* send updated bcast filtering configuration */
914 if (mvm->dbgfs_bcast_filtering.override && 910 if (mvm->dbgfs_bcast_filtering.override &&
915 iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) 911 iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
916 err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC, 912 err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
917 sizeof(cmd), &cmd); 913 sizeof(cmd), &cmd);
918 mutex_unlock(&mvm->mutex); 914 mutex_unlock(&mvm->mutex);
919 915
@@ -1004,6 +1000,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
1004 PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT); 1000 PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
1005 PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS); 1001 PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
1006 PRINT_MVM_REF(IWL_MVM_REF_USER); 1002 PRINT_MVM_REF(IWL_MVM_REF_USER);
1003 PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
1007 1004
1008 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1005 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1009} 1006}
@@ -1108,9 +1105,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
1108MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); 1105MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
1109 1106
1110static const struct file_operations iwl_dbgfs_fw_error_dump_ops = { 1107static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
1111 .open = iwl_dbgfs_fw_error_dump_open, 1108 .open = iwl_dbgfs_fw_error_dump_open,
1112 .read = iwl_dbgfs_fw_error_dump_read, 1109 .read = iwl_dbgfs_fw_error_dump_read,
1113 .release = iwl_dbgfs_fw_error_dump_release, 1110 .release = iwl_dbgfs_fw_error_dump_release,
1114}; 1111};
1115 1112
1116#ifdef CONFIG_IWLWIFI_BCAST_FILTERING 1113#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
@@ -1138,9 +1135,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
1138 MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR); 1135 MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
1139 MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); 1136 MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
1140 MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR); 1137 MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
1141 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD) 1138 MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
1142 MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 1139 S_IRUSR | S_IWUSR);
1143 S_IRUSR | S_IWUSR);
1144 MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR); 1140 MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
1145 MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR); 1141 MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
1146 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR); 1142 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
index 21877e5966a8..5fe82c29c8ad 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
@@ -141,7 +141,8 @@ enum iwl_bt_coex_lut_type {
141 BT_COEX_TX_DIS_LUT, 141 BT_COEX_TX_DIS_LUT,
142 142
143 BT_COEX_MAX_LUT, 143 BT_COEX_MAX_LUT,
144}; 144 BT_COEX_INVALID_LUT = 0xff,
145}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
145 146
146#define BT_COEX_LUT_SIZE (12) 147#define BT_COEX_LUT_SIZE (12)
147#define BT_COEX_CORUN_LUT_SIZE (32) 148#define BT_COEX_CORUN_LUT_SIZE (32)
@@ -154,19 +155,23 @@ enum iwl_bt_coex_lut_type {
154 * @flags:&enum iwl_bt_coex_flags 155 * @flags:&enum iwl_bt_coex_flags
155 * @max_kill: 156 * @max_kill:
156 * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power 157 * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
157 * @bt4_antenna_isolation: 158 * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
158 * @bt4_antenna_isolation_thr: 159 * should be set by default
159 * @bt4_tx_tx_delta_freq_thr: 160 * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
160 * @bt4_tx_rx_max_freq0: 161 * should be set by default
161 * @bt_prio_boost: 162 * @bt4_antenna_isolation: antenna isolation
163 * @bt4_antenna_isolation_thr: antenna threshold value
164 * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
165 * @bt4_tx_rx_max_freq0: TxRx max frequency
166 * @bt_prio_boost: BT priority boost registers
162 * @wifi_tx_prio_boost: SW boost of wifi tx priority 167 * @wifi_tx_prio_boost: SW boost of wifi tx priority
163 * @wifi_rx_prio_boost: SW boost of wifi rx priority 168 * @wifi_rx_prio_boost: SW boost of wifi rx priority
164 * @kill_ack_msk: 169 * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
165 * @kill_cts_msk: 170 * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
166 * @decision_lut: 171 * @decision_lut: PTA decision LUT, per Prio-Ch
167 * @bt4_multiprio_lut: 172 * @bt4_multiprio_lut: multi priority LUT configuration
168 * @bt4_corun_lut20: 173 * @bt4_corun_lut20: co-running 20 MHz LUT configuration
169 * @bt4_corun_lut40: 174 * @bt4_corun_lut40: co-running 40 MHz LUT configuration
170 * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk 175 * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
171 * 176 *
172 * The structure is used for the BT_COEX command. 177 * The structure is used for the BT_COEX command.
@@ -175,7 +180,8 @@ struct iwl_bt_coex_cmd {
175 __le32 flags; 180 __le32 flags;
176 u8 max_kill; 181 u8 max_kill;
177 u8 bt_reduced_tx_power; 182 u8 bt_reduced_tx_power;
178 u8 reserved[2]; 183 u8 override_primary_lut;
184 u8 override_secondary_lut;
179 185
180 u8 bt4_antenna_isolation; 186 u8 bt4_antenna_isolation;
181 u8 bt4_antenna_isolation_thr; 187 u8 bt4_antenna_isolation_thr;
@@ -194,7 +200,7 @@ struct iwl_bt_coex_cmd {
194 __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE]; 200 __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
195 201
196 __le32 valid_bit_msk; 202 __le32 valid_bit_msk;
197} __packed; /* BT_COEX_CMD_API_S_VER_3 */ 203} __packed; /* BT_COEX_CMD_API_S_VER_5 */
198 204
199/** 205/**
200 * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command 206 * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
@@ -282,7 +288,7 @@ enum iwl_bt_activity_grading {
282 BT_ON_NO_CONNECTION = 1, 288 BT_ON_NO_CONNECTION = 1,
283 BT_LOW_TRAFFIC = 2, 289 BT_LOW_TRAFFIC = 2,
284 BT_HIGH_TRAFFIC = 3, 290 BT_HIGH_TRAFFIC = 3,
285}; 291}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
286 292
287/** 293/**
288 * struct iwl_bt_coex_profile_notif - notification about BT coex 294 * struct iwl_bt_coex_profile_notif - notification about BT coex
@@ -310,7 +316,7 @@ struct iwl_bt_coex_profile_notif {
310 __le32 primary_ch_lut; 316 __le32 primary_ch_lut;
311 __le32 secondary_ch_lut; 317 __le32 secondary_ch_lut;
312 __le32 bt_activity_grading; 318 __le32 bt_activity_grading;
313} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */ 319} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
314 320
315enum iwl_bt_coex_prio_table_event { 321enum iwl_bt_coex_prio_table_event {
316 BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0, 322 BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 10fcc1a79ebd..13696fe419b7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -345,21 +345,6 @@ enum iwl_wowlan_wakeup_reason {
345 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12), 345 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
346}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */ 346}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
347 347
348struct iwl_wowlan_status_v4 {
349 __le64 replay_ctr;
350 __le16 pattern_number;
351 __le16 non_qos_seq_ctr;
352 __le16 qos_seq_ctr[8];
353 __le32 wakeup_reasons;
354 __le32 rekey_status;
355 __le32 num_of_gtk_rekeys;
356 __le32 transmitted_ndps;
357 __le32 received_beacons;
358 __le32 wake_packet_length;
359 __le32 wake_packet_bufsize;
360 u8 wake_packet[]; /* can be truncated from _length to _bufsize */
361} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
362
363struct iwl_wowlan_gtk_status { 348struct iwl_wowlan_gtk_status {
364 u8 key_index; 349 u8 key_index;
365 u8 reserved[3]; 350 u8 reserved[3];
@@ -368,7 +353,7 @@ struct iwl_wowlan_gtk_status {
368 struct iwl_wowlan_rsc_tsc_params_cmd rsc; 353 struct iwl_wowlan_rsc_tsc_params_cmd rsc;
369} __packed; 354} __packed;
370 355
371struct iwl_wowlan_status_v6 { 356struct iwl_wowlan_status {
372 struct iwl_wowlan_gtk_status gtk; 357 struct iwl_wowlan_gtk_status gtk;
373 __le64 replay_ctr; 358 __le64 replay_ctr;
374 __le16 pattern_number; 359 __le16 pattern_number;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index 39148b5bb332..8bb5b94bf963 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -334,7 +334,7 @@ enum {
334 */ 334 */
335struct iwl_lq_cmd { 335struct iwl_lq_cmd {
336 u8 sta_id; 336 u8 sta_id;
337 u8 reserved1; 337 u8 reduced_tpc;
338 u16 control; 338 u16 control;
339 /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */ 339 /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
340 u8 flags; 340 u8 flags;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index d73a89ecd78a..6959fda3fe09 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -169,8 +169,12 @@ enum iwl_scan_type {
169 SCAN_TYPE_DISCOVERY_FORCED = 6, 169 SCAN_TYPE_DISCOVERY_FORCED = 6,
170}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */ 170}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
171 171
172/* Maximal number of channels to scan */ 172/**
173#define MAX_NUM_SCAN_CHANNELS 0x24 173 * Maximal number of channels to scan
174 * it should be equal to:
175 * max(IWL_NUM_CHANNELS, IWL_NUM_CHANNELS_FAMILY_8000)
176 */
177#define MAX_NUM_SCAN_CHANNELS 50
174 178
175/** 179/**
176 * struct iwl_scan_cmd - scan request command 180 * struct iwl_scan_cmd - scan request command
@@ -534,13 +538,16 @@ struct iwl_scan_offload_schedule {
534 * 538 *
535 * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering. 539 * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
536 * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan. 540 * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
537 * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan 541 * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
538 * on A band. 542 * beacon period. Finding channel activity in this mode is not guaranteed.
543 * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
544 * Assuming beacon period is 100ms finding channel activity is guaranteed.
539 */ 545 */
540enum iwl_scan_offload_flags { 546enum iwl_scan_offload_flags {
541 IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0), 547 IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0),
542 IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2), 548 IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
543 IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = BIT(3), 549 IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE = BIT(5),
550 IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6),
544}; 551};
545 552
546/** 553/**
@@ -563,17 +570,24 @@ enum iwl_scan_offload_compleate_status {
563 IWL_SCAN_OFFLOAD_ABORTED = 2, 570 IWL_SCAN_OFFLOAD_ABORTED = 2,
564}; 571};
565 572
573enum iwl_scan_ebs_status {
574 IWL_SCAN_EBS_SUCCESS,
575 IWL_SCAN_EBS_FAILED,
576 IWL_SCAN_EBS_CHAN_NOT_FOUND,
577};
578
566/** 579/**
567 * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1 580 * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
568 * @last_schedule_line: last schedule line executed (fast or regular) 581 * @last_schedule_line: last schedule line executed (fast or regular)
569 * @last_schedule_iteration: last scan iteration executed before scan abort 582 * @last_schedule_iteration: last scan iteration executed before scan abort
570 * @status: enum iwl_scan_offload_compleate_status 583 * @status: enum iwl_scan_offload_compleate_status
584 * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
571 */ 585 */
572struct iwl_scan_offload_complete { 586struct iwl_scan_offload_complete {
573 u8 last_schedule_line; 587 u8 last_schedule_line;
574 u8 last_schedule_iteration; 588 u8 last_schedule_iteration;
575 u8 status; 589 u8 status;
576 u8 reserved; 590 u8 ebs_status;
577} __packed; 591} __packed;
578 592
579/** 593/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index d63647867262..39cebee8016f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -255,22 +255,19 @@ struct iwl_mvm_keyinfo {
255} __packed; 255} __packed;
256 256
257/** 257/**
258 * struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table. 258 * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
259 * ( REPLY_ADD_STA = 0x18 ) 259 * ( REPLY_ADD_STA = 0x18 )
260 * @add_modify: 1: modify existing, 0: add new station 260 * @add_modify: 1: modify existing, 0: add new station
261 * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent 261 * @awake_acs:
262 * @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key 262 * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
263 * sent 263 * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
264 * @mac_id_n_color: the Mac context this station belongs to 264 * @mac_id_n_color: the Mac context this station belongs to
265 * @addr[ETH_ALEN]: station's MAC address 265 * @addr[ETH_ALEN]: station's MAC address
266 * @sta_id: index of station in uCode's station table 266 * @sta_id: index of station in uCode's station table
267 * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave 267 * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
268 * alone. 1 - modify, 0 - don't change. 268 * alone. 1 - modify, 0 - don't change.
269 * @key: look at %iwl_mvm_keyinfo
270 * @station_flags: look at %iwl_sta_flags 269 * @station_flags: look at %iwl_sta_flags
271 * @station_flags_msk: what of %station_flags have changed 270 * @station_flags_msk: what of %station_flags have changed
272 * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
273 * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
274 * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) 271 * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
275 * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set 272 * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
276 * add_immediate_ba_ssn. 273 * add_immediate_ba_ssn.
@@ -294,40 +291,7 @@ struct iwl_mvm_keyinfo {
294 * ADD_STA sets up the table entry for one station, either creating a new 291 * ADD_STA sets up the table entry for one station, either creating a new
295 * entry, or modifying a pre-existing one. 292 * entry, or modifying a pre-existing one.
296 */ 293 */
297struct iwl_mvm_add_sta_cmd_v5 { 294struct iwl_mvm_add_sta_cmd {
298 u8 add_modify;
299 u8 unicast_tx_key_id;
300 u8 multicast_tx_key_id;
301 u8 reserved1;
302 __le32 mac_id_n_color;
303 u8 addr[ETH_ALEN];
304 __le16 reserved2;
305 u8 sta_id;
306 u8 modify_mask;
307 __le16 reserved3;
308 struct iwl_mvm_keyinfo key;
309 __le32 station_flags;
310 __le32 station_flags_msk;
311 __le16 tid_disable_tx;
312 __le16 reserved4;
313 u8 add_immediate_ba_tid;
314 u8 remove_immediate_ba_tid;
315 __le16 add_immediate_ba_ssn;
316 __le16 sleep_tx_count;
317 __le16 sleep_state_flags;
318 __le16 assoc_id;
319 __le16 beamform_flags;
320 __le32 tfd_queue_msk;
321} __packed; /* ADD_STA_CMD_API_S_VER_5 */
322
323/**
324 * struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
325 * VER_7 of this command is quite similar to VER_5 except
326 * exclusion of all fields related to the security key installation.
327 * It only differs from VER_6 by the "awake_acs" field that is
328 * reserved and ignored in VER_6.
329 */
330struct iwl_mvm_add_sta_cmd_v7 {
331 u8 add_modify; 295 u8 add_modify;
332 u8 awake_acs; 296 u8 awake_acs;
333 __le16 tid_disable_tx; 297 __le16 tid_disable_tx;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 8e122f3a7a74..6cc5f52b807f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -482,7 +482,8 @@ struct iwl_mvm_tx_resp {
482 u8 pa_integ_res_b[3]; 482 u8 pa_integ_res_b[3];
483 u8 pa_integ_res_c[3]; 483 u8 pa_integ_res_c[3];
484 __le16 measurement_req_id; 484 __le16 measurement_req_id;
485 __le16 reserved; 485 u8 reduced_tpc;
486 u8 reserved;
486 487
487 __le32 tfd_info; 488 __le32 tfd_info;
488 __le16 seq_ctl; 489 __le16 seq_ctl;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 6e75b52588de..309a9b9a94fe 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -71,6 +71,7 @@
71#include "fw-api-power.h" 71#include "fw-api-power.h"
72#include "fw-api-d3.h" 72#include "fw-api-d3.h"
73#include "fw-api-coex.h" 73#include "fw-api-coex.h"
74#include "fw-api-scan.h"
74 75
75/* maximal number of Tx queues in any platform */ 76/* maximal number of Tx queues in any platform */
76#define IWL_MVM_MAX_QUEUES 20 77#define IWL_MVM_MAX_QUEUES 20
@@ -604,52 +605,7 @@ enum {
604 TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7), 605 TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
605}; /* MAC_EVENT_ACTION_API_E_VER_2 */ 606}; /* MAC_EVENT_ACTION_API_E_VER_2 */
606 607
607 608/* Time event - defines for command API */
608/**
609 * struct iwl_time_event_cmd_api_v1 - configuring Time Events
610 * with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
611 * with version 2. determined by IWL_UCODE_TLV_FLAGS)
612 * ( TIME_EVENT_CMD = 0x29 )
613 * @id_and_color: ID and color of the relevant MAC
614 * @action: action to perform, one of FW_CTXT_ACTION_*
615 * @id: this field has two meanings, depending on the action:
616 * If the action is ADD, then it means the type of event to add.
617 * For all other actions it is the unique event ID assigned when the
618 * event was added by the FW.
619 * @apply_time: When to start the Time Event (in GP2)
620 * @max_delay: maximum delay to event's start (apply time), in TU
621 * @depends_on: the unique ID of the event we depend on (if any)
622 * @interval: interval between repetitions, in TU
623 * @interval_reciprocal: 2^32 / interval
624 * @duration: duration of event in TU
625 * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
626 * @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF
627 * and TE_V1_EVENT_SOCIOPATHIC
628 * @is_present: 0 or 1, are we present or absent during the Time Event
629 * @max_frags: maximal number of fragments the Time Event can be divided to
630 * @notify: notifications using TE_V1_NOTIF_* (whom to notify when)
631 */
632struct iwl_time_event_cmd_v1 {
633 /* COMMON_INDEX_HDR_API_S_VER_1 */
634 __le32 id_and_color;
635 __le32 action;
636 __le32 id;
637 /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
638 __le32 apply_time;
639 __le32 max_delay;
640 __le32 dep_policy;
641 __le32 depends_on;
642 __le32 is_present;
643 __le32 max_frags;
644 __le32 interval;
645 __le32 interval_reciprocal;
646 __le32 duration;
647 __le32 repeat;
648 __le32 notify;
649} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
650
651
652/* Time event - defines for command API v2 */
653 609
654/* 610/*
655 * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed. 611 * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
@@ -680,7 +636,7 @@ enum {
680#define TE_V2_PLACEMENT_POS 12 636#define TE_V2_PLACEMENT_POS 12
681#define TE_V2_ABSENCE_POS 15 637#define TE_V2_ABSENCE_POS 15
682 638
683/* Time event policy values (for time event cmd api v2) 639/* Time event policy values
684 * A notification (both event and fragment) includes a status indicating weather 640 * A notification (both event and fragment) includes a status indicating weather
685 * the FW was able to schedule the event or not. For fragment start/end 641 * the FW was able to schedule the event or not. For fragment start/end
686 * notification the status is always success. There is no start/end fragment 642 * notification the status is always success. There is no start/end fragment
@@ -727,7 +683,7 @@ enum {
727}; 683};
728 684
729/** 685/**
730 * struct iwl_time_event_cmd_api_v2 - configuring Time Events 686 * struct iwl_time_event_cmd_api - configuring Time Events
731 * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also 687 * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
732 * with version 1. determined by IWL_UCODE_TLV_FLAGS) 688 * with version 1. determined by IWL_UCODE_TLV_FLAGS)
733 * ( TIME_EVENT_CMD = 0x29 ) 689 * ( TIME_EVENT_CMD = 0x29 )
@@ -750,7 +706,7 @@ enum {
750 * TE_EVENT_SOCIOPATHIC 706 * TE_EVENT_SOCIOPATHIC
751 * using TE_ABSENCE and using TE_NOTIF_* 707 * using TE_ABSENCE and using TE_NOTIF_*
752 */ 708 */
753struct iwl_time_event_cmd_v2 { 709struct iwl_time_event_cmd {
754 /* COMMON_INDEX_HDR_API_S_VER_1 */ 710 /* COMMON_INDEX_HDR_API_S_VER_1 */
755 __le32 id_and_color; 711 __le32 id_and_color;
756 __le32 action; 712 __le32 action;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 7ce20062f32d..883e702152d5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -99,7 +99,7 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
99 }; 99 };
100 100
101 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant); 101 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
102 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, CMD_SYNC, 102 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
103 sizeof(tx_ant_cmd), &tx_ant_cmd); 103 sizeof(tx_ant_cmd), &tx_ant_cmd);
104} 104}
105 105
@@ -137,6 +137,8 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
137 alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr); 137 alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
138 mvm->umac_error_event_table = 138 mvm->umac_error_event_table =
139 le32_to_cpu(palive2->error_info_addr); 139 le32_to_cpu(palive2->error_info_addr);
140 mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
141 mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
140 142
141 alive_data->valid = le16_to_cpu(palive2->status) == 143 alive_data->valid = le16_to_cpu(palive2->status) ==
142 IWL_ALIVE_STATUS_OK; 144 IWL_ALIVE_STATUS_OK;
@@ -180,6 +182,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
180 int ret, i; 182 int ret, i;
181 enum iwl_ucode_type old_type = mvm->cur_ucode; 183 enum iwl_ucode_type old_type = mvm->cur_ucode;
182 static const u8 alive_cmd[] = { MVM_ALIVE }; 184 static const u8 alive_cmd[] = { MVM_ALIVE };
185 struct iwl_sf_region st_fwrd_space;
183 186
184 fw = iwl_get_ucode_image(mvm, ucode_type); 187 fw = iwl_get_ucode_image(mvm, ucode_type);
185 if (WARN_ON(!fw)) 188 if (WARN_ON(!fw))
@@ -215,6 +218,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
215 return -EIO; 218 return -EIO;
216 } 219 }
217 220
221 /*
222 * update the sdio allocation according to the pointer we get in the
223 * alive notification.
224 */
225 st_fwrd_space.addr = mvm->sf_space.addr;
226 st_fwrd_space.size = mvm->sf_space.size;
227 ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
228
218 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); 229 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
219 230
220 /* 231 /*
@@ -256,7 +267,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
256 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", 267 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
257 phy_cfg_cmd.phy_cfg); 268 phy_cfg_cmd.phy_cfg);
258 269
259 return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, CMD_SYNC, 270 return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
260 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 271 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
261} 272}
262 273
@@ -288,14 +299,14 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
288 goto error; 299 goto error;
289 } 300 }
290 301
291 ret = iwl_send_bt_prio_tbl(mvm); 302 ret = iwl_send_bt_init_conf(mvm);
292 if (ret) 303 if (ret)
293 goto error; 304 goto error;
294 305
295 /* Read the NVM only at driver load time, no need to do this twice */ 306 /* Read the NVM only at driver load time, no need to do this twice */
296 if (read_nvm) { 307 if (read_nvm) {
297 /* Read nvm */ 308 /* Read nvm */
298 ret = iwl_nvm_init(mvm); 309 ret = iwl_nvm_init(mvm, true);
299 if (ret) { 310 if (ret) {
300 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); 311 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
301 goto error; 312 goto error;
@@ -303,7 +314,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
303 } 314 }
304 315
305 /* In case we read the NVM from external file, load it to the NIC */ 316 /* In case we read the NVM from external file, load it to the NIC */
306 if (iwlwifi_mod_params.nvm_file) 317 if (mvm->nvm_file_name)
307 iwl_mvm_load_nvm_to_nic(mvm); 318 iwl_mvm_load_nvm_to_nic(mvm);
308 319
309 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); 320 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
@@ -424,10 +435,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
424 if (ret) 435 if (ret)
425 goto error; 436 goto error;
426 437
427 ret = iwl_send_bt_prio_tbl(mvm);
428 if (ret)
429 goto error;
430
431 ret = iwl_send_bt_init_conf(mvm); 438 ret = iwl_send_bt_init_conf(mvm);
432 if (ret) 439 if (ret)
433 goto error; 440 goto error;
@@ -468,12 +475,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
468 /* Initialize tx backoffs to the minimal possible */ 475 /* Initialize tx backoffs to the minimal possible */
469 iwl_mvm_tt_tx_backoff(mvm, 0); 476 iwl_mvm_tt_tx_backoff(mvm, 0);
470 477
471 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
472 ret = iwl_power_legacy_set_cam_mode(mvm);
473 if (ret)
474 goto error;
475 }
476
477 ret = iwl_mvm_power_update_device(mvm); 478 ret = iwl_mvm_power_update_device(mvm);
478 if (ret) 479 if (ret)
479 goto error; 480 goto error;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 9ccec10bba16..8b5302777632 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -667,12 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
667 if (vif->bss_conf.qos) 667 if (vif->bss_conf.qos)
668 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); 668 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
669 669
670 /* Don't use cts to self as the fw doesn't support it currently. */
671 if (vif->bss_conf.use_cts_prot) { 670 if (vif->bss_conf.use_cts_prot) {
672 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); 671 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
673 if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8) 672 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
674 cmd->protection_flags |=
675 cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
676 } 673 }
677 IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", 674 IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
678 vif->bss_conf.use_cts_prot, 675 vif->bss_conf.use_cts_prot,
@@ -688,7 +685,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
688static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, 685static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
689 struct iwl_mac_ctx_cmd *cmd) 686 struct iwl_mac_ctx_cmd *cmd)
690{ 687{
691 int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC, 688 int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
692 sizeof(*cmd), cmd); 689 sizeof(*cmd), cmd);
693 if (ret) 690 if (ret)
694 IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n", 691 IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n",
@@ -696,19 +693,39 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
696 return ret; 693 return ret;
697} 694}
698 695
699/* 696static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
700 * Fill the specific data for mac context of type station or p2p client 697 struct ieee80211_vif *vif,
701 */ 698 u32 action, bool force_assoc_off)
702static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
703 struct ieee80211_vif *vif,
704 struct iwl_mac_data_sta *ctxt_sta,
705 bool force_assoc_off)
706{ 699{
700 struct iwl_mac_ctx_cmd cmd = {};
701 struct iwl_mac_data_sta *ctxt_sta;
702
703 WARN_ON(vif->type != NL80211_IFTYPE_STATION);
704
705 /* Fill the common data for all mac context types */
706 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
707
708 if (vif->p2p) {
709 struct ieee80211_p2p_noa_attr *noa =
710 &vif->bss_conf.p2p_noa_attr;
711
712 cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
713 IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
714 ctxt_sta = &cmd.p2p_sta.sta;
715 } else {
716 ctxt_sta = &cmd.sta;
717 }
718
707 /* We need the dtim_period to set the MAC as associated */ 719 /* We need the dtim_period to set the MAC as associated */
708 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && 720 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
709 !force_assoc_off) { 721 !force_assoc_off) {
710 u32 dtim_offs; 722 u32 dtim_offs;
711 723
724 /* Allow beacons to pass through as long as we are not
725 * associated, or we do not have dtim period information.
726 */
727 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
728
712 /* 729 /*
713 * The DTIM count counts down, so when it is N that means N 730 * The DTIM count counts down, so when it is N that means N
714 * more beacon intervals happen until the DTIM TBTT. Therefore 731 * more beacon intervals happen until the DTIM TBTT. Therefore
@@ -755,51 +772,6 @@ static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
755 772
756 ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval); 773 ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
757 ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid); 774 ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
758}
759
760static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm,
761 struct ieee80211_vif *vif,
762 u32 action)
763{
764 struct iwl_mac_ctx_cmd cmd = {};
765
766 WARN_ON(vif->type != NL80211_IFTYPE_STATION || vif->p2p);
767
768 /* Fill the common data for all mac context types */
769 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
770
771 /* Allow beacons to pass through as long as we are not associated,or we
772 * do not have dtim period information */
773 if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period)
774 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
775 else
776 cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON);
777
778 /* Fill the data specific for station mode */
779 iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta,
780 action == FW_CTXT_ACTION_ADD);
781
782 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
783}
784
785static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm,
786 struct ieee80211_vif *vif,
787 u32 action)
788{
789 struct iwl_mac_ctx_cmd cmd = {};
790 struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr;
791
792 WARN_ON(vif->type != NL80211_IFTYPE_STATION || !vif->p2p);
793
794 /* Fill the common data for all mac context types */
795 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
796
797 /* Fill the data specific for station mode */
798 iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta,
799 action == FW_CTXT_ACTION_ADD);
800
801 cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
802 IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
803 775
804 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); 776 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
805} 777}
@@ -1137,16 +1109,12 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
1137} 1109}
1138 1110
1139static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1111static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1140 u32 action) 1112 u32 action, bool force_assoc_off)
1141{ 1113{
1142 switch (vif->type) { 1114 switch (vif->type) {
1143 case NL80211_IFTYPE_STATION: 1115 case NL80211_IFTYPE_STATION:
1144 if (!vif->p2p) 1116 return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action,
1145 return iwl_mvm_mac_ctxt_cmd_station(mvm, vif, 1117 force_assoc_off);
1146 action);
1147 else
1148 return iwl_mvm_mac_ctxt_cmd_p2p_client(mvm, vif,
1149 action);
1150 break; 1118 break;
1151 case NL80211_IFTYPE_AP: 1119 case NL80211_IFTYPE_AP:
1152 if (!vif->p2p) 1120 if (!vif->p2p)
@@ -1176,7 +1144,8 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1176 vif->addr, ieee80211_vif_type_p2p(vif))) 1144 vif->addr, ieee80211_vif_type_p2p(vif)))
1177 return -EIO; 1145 return -EIO;
1178 1146
1179 ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD); 1147 ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD,
1148 true);
1180 if (ret) 1149 if (ret)
1181 return ret; 1150 return ret;
1182 1151
@@ -1187,7 +1156,8 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1187 return 0; 1156 return 0;
1188} 1157}
1189 1158
1190int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1159int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1160 bool force_assoc_off)
1191{ 1161{
1192 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1162 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1193 1163
@@ -1195,7 +1165,8 @@ int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1195 vif->addr, ieee80211_vif_type_p2p(vif))) 1165 vif->addr, ieee80211_vif_type_p2p(vif)))
1196 return -EIO; 1166 return -EIO;
1197 1167
1198 return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY); 1168 return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY,
1169 force_assoc_off);
1199} 1170}
1200 1171
1201int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1172int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -1214,7 +1185,7 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1214 mvmvif->color)); 1185 mvmvif->color));
1215 cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); 1186 cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
1216 1187
1217 ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC, 1188 ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
1218 sizeof(cmd), &cmd); 1189 sizeof(cmd), &cmd);
1219 if (ret) { 1190 if (ret) {
1220 IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret); 1191 IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret);
@@ -1240,11 +1211,23 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
1240 u32 rate __maybe_unused = 1211 u32 rate __maybe_unused =
1241 le32_to_cpu(beacon->beacon_notify_hdr.initial_rate); 1212 le32_to_cpu(beacon->beacon_notify_hdr.initial_rate);
1242 1213
1214 lockdep_assert_held(&mvm->mutex);
1215
1243 IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%16llX rate:%d\n", 1216 IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%16llX rate:%d\n",
1244 status & TX_STATUS_MSK, 1217 status & TX_STATUS_MSK,
1245 beacon->beacon_notify_hdr.failure_frame, 1218 beacon->beacon_notify_hdr.failure_frame,
1246 le64_to_cpu(beacon->tsf), 1219 le64_to_cpu(beacon->tsf),
1247 rate); 1220 rate);
1221
1222 if (unlikely(mvm->csa_vif && mvm->csa_vif->csa_active)) {
1223 if (!ieee80211_csa_is_complete(mvm->csa_vif)) {
1224 iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm->csa_vif);
1225 } else {
1226 ieee80211_csa_finish(mvm->csa_vif);
1227 mvm->csa_vif = NULL;
1228 }
1229 }
1230
1248 return 0; 1231 return 0;
1249} 1232}
1250 1233
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 8735ef1f44ae..7215f5980186 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -295,7 +295,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
295 !iwlwifi_mod_params.sw_crypto) 295 !iwlwifi_mod_params.sw_crypto)
296 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 296 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
297 297
298 if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) { 298 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
299 IWL_UCODE_API(mvm->fw->ucode_ver) >= 9 &&
300 !iwlwifi_mod_params.uapsd_disable) {
299 hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD; 301 hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
300 hw->uapsd_queues = IWL_UAPSD_AC_INFO; 302 hw->uapsd_queues = IWL_UAPSD_AC_INFO;
301 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; 303 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
@@ -309,11 +311,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
309 BIT(NL80211_IFTYPE_P2P_CLIENT) | 311 BIT(NL80211_IFTYPE_P2P_CLIENT) |
310 BIT(NL80211_IFTYPE_AP) | 312 BIT(NL80211_IFTYPE_AP) |
311 BIT(NL80211_IFTYPE_P2P_GO) | 313 BIT(NL80211_IFTYPE_P2P_GO) |
312 BIT(NL80211_IFTYPE_P2P_DEVICE); 314 BIT(NL80211_IFTYPE_P2P_DEVICE) |
313 315 BIT(NL80211_IFTYPE_ADHOC);
314 /* IBSS has bugs in older versions */
315 if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
316 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
317 316
318 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 317 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
319 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 318 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
@@ -322,6 +321,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
322 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD) 321 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
323 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 322 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
324 323
324 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_CSA_FLOW)
325 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
326
325 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; 327 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
326 hw->wiphy->n_iface_combinations = 328 hw->wiphy->n_iface_combinations =
327 ARRAY_SIZE(iwl_mvm_iface_combinations); 329 ARRAY_SIZE(iwl_mvm_iface_combinations);
@@ -365,14 +367,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
365 else 367 else
366 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 368 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
367 369
368 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) { 370 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
369 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; 371 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
370 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; 372 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
371 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; 373 /* we create the 802.11 header and zero length SSID IE. */
372 /* we create the 802.11 header and zero length SSID IE. */ 374 hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
373 hw->wiphy->max_sched_scan_ie_len =
374 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
375 }
376 375
377 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | 376 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
378 NL80211_FEATURE_P2P_GO_OPPPS; 377 NL80211_FEATURE_P2P_GO_OPPPS;
@@ -386,7 +385,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
386 } 385 }
387 386
388#ifdef CONFIG_PM_SLEEP 387#ifdef CONFIG_PM_SLEEP
389 if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len && 388 if (iwl_mvm_is_d0i3_supported(mvm) &&
389 device_can_wakeup(mvm->trans->dev)) {
390 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
391 hw->wiphy->wowlan = &mvm->wowlan;
392 } else if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
390 mvm->trans->ops->d3_suspend && 393 mvm->trans->ops->d3_suspend &&
391 mvm->trans->ops->d3_resume && 394 mvm->trans->ops->d3_resume &&
392 device_can_wakeup(mvm->trans->dev)) { 395 device_can_wakeup(mvm->trans->dev)) {
@@ -540,13 +543,22 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
540 return -EACCES; 543 return -EACCES;
541 544
542 /* return from D0i3 before starting a new Tx aggregation */ 545 /* return from D0i3 before starting a new Tx aggregation */
543 if (action == IEEE80211_AMPDU_TX_START) { 546 switch (action) {
547 case IEEE80211_AMPDU_TX_START:
548 case IEEE80211_AMPDU_TX_STOP_CONT:
549 case IEEE80211_AMPDU_TX_STOP_FLUSH:
550 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
551 case IEEE80211_AMPDU_TX_OPERATIONAL:
544 iwl_mvm_ref(mvm, IWL_MVM_REF_TX_AGG); 552 iwl_mvm_ref(mvm, IWL_MVM_REF_TX_AGG);
545 tx_agg_ref = true; 553 tx_agg_ref = true;
546 554
547 /* 555 /*
548 * wait synchronously until D0i3 exit to get the correct 556 * for tx start, wait synchronously until D0i3 exit to
549 * sequence number for the tid 557 * get the correct sequence number for the tid.
558 * additionally, some other ampdu actions use direct
559 * target access, which is not handled automatically
560 * by the trans layer (unlike commands), so wait for
561 * d0i3 exit in these cases as well.
550 */ 562 */
551 if (!wait_event_timeout(mvm->d0i3_exit_waitq, 563 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
552 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), HZ)) { 564 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), HZ)) {
@@ -554,6 +566,9 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
554 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG); 566 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
555 return -EIO; 567 return -EIO;
556 } 568 }
569 break;
570 default:
571 break;
557 } 572 }
558 573
559 mutex_lock(&mvm->mutex); 574 mutex_lock(&mvm->mutex);
@@ -758,7 +773,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
758 .pwr_restriction = cpu_to_le16(tx_power), 773 .pwr_restriction = cpu_to_le16(tx_power),
759 }; 774 };
760 775
761 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC, 776 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
762 sizeof(reduce_txpwr_cmd), 777 sizeof(reduce_txpwr_cmd),
763 &reduce_txpwr_cmd); 778 &reduce_txpwr_cmd);
764} 779}
@@ -817,18 +832,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
817 if (ret) 832 if (ret)
818 goto out_release; 833 goto out_release;
819 834
820 ret = iwl_mvm_power_update_mac(mvm, vif); 835 ret = iwl_mvm_power_update_mac(mvm);
821 if (ret) 836 if (ret)
822 goto out_release; 837 goto out_release;
823 838
824 /* beacon filtering */ 839 /* beacon filtering */
825 ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC); 840 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
826 if (ret) 841 if (ret)
827 goto out_remove_mac; 842 goto out_remove_mac;
828 843
829 if (!mvm->bf_allowed_vif && false && 844 if (!mvm->bf_allowed_vif &&
830 vif->type == NL80211_IFTYPE_STATION && !vif->p2p && 845 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
831 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
832 mvm->bf_allowed_vif = mvmvif; 846 mvm->bf_allowed_vif = mvmvif;
833 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 847 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
834 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 848 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@ -969,7 +983,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
969 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) 983 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
970 mvm->vif_count--; 984 mvm->vif_count--;
971 985
972 iwl_mvm_power_update_mac(mvm, vif); 986 iwl_mvm_power_update_mac(mvm);
973 iwl_mvm_mac_ctxt_remove(mvm, vif); 987 iwl_mvm_mac_ctxt_remove(mvm, vif);
974 988
975out_release: 989out_release:
@@ -1223,10 +1237,14 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
1223 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) 1237 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
1224 return 0; 1238 return 0;
1225 1239
1240 /* bcast filtering isn't supported for P2P client */
1241 if (vif->p2p)
1242 return 0;
1243
1226 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) 1244 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1227 return 0; 1245 return 0;
1228 1246
1229 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC, 1247 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
1230 sizeof(cmd), &cmd); 1248 sizeof(cmd), &cmd);
1231} 1249}
1232#else 1250#else
@@ -1253,7 +1271,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1253 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) 1271 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
1254 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 1272 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
1255 1273
1256 ret = iwl_mvm_mac_ctxt_changed(mvm, vif); 1274 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
1257 if (ret) 1275 if (ret)
1258 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 1276 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
1259 1277
@@ -1333,10 +1351,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1333 iwl_mvm_remove_time_event(mvm, mvmvif, 1351 iwl_mvm_remove_time_event(mvm, mvmvif,
1334 &mvmvif->time_event_data); 1352 &mvmvif->time_event_data);
1335 iwl_mvm_sf_update(mvm, vif, false); 1353 iwl_mvm_sf_update(mvm, vif, false);
1336 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC)); 1354 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
1337 } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | 1355 } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
1338 BSS_CHANGED_QOS)) { 1356 BSS_CHANGED_QOS)) {
1339 ret = iwl_mvm_power_update_mac(mvm, vif); 1357 ret = iwl_mvm_power_update_mac(mvm);
1340 if (ret) 1358 if (ret)
1341 IWL_ERR(mvm, "failed to update power mode\n"); 1359 IWL_ERR(mvm, "failed to update power mode\n");
1342 } 1360 }
@@ -1347,16 +1365,19 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1347 } 1365 }
1348 1366
1349 if (changes & BSS_CHANGED_CQM) { 1367 if (changes & BSS_CHANGED_CQM) {
1350 IWL_DEBUG_MAC80211(mvm, "cqm info_changed"); 1368 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
1351 /* reset cqm events tracking */ 1369 /* reset cqm events tracking */
1352 mvmvif->bf_data.last_cqm_event = 0; 1370 mvmvif->bf_data.last_cqm_event = 0;
1353 ret = iwl_mvm_update_beacon_filter(mvm, vif, false, CMD_SYNC); 1371 if (mvmvif->bf_data.bf_enabled) {
1354 if (ret) 1372 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
1355 IWL_ERR(mvm, "failed to update CQM thresholds\n"); 1373 if (ret)
1374 IWL_ERR(mvm,
1375 "failed to update CQM thresholds\n");
1376 }
1356 } 1377 }
1357 1378
1358 if (changes & BSS_CHANGED_ARP_FILTER) { 1379 if (changes & BSS_CHANGED_ARP_FILTER) {
1359 IWL_DEBUG_MAC80211(mvm, "arp filter changed"); 1380 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
1360 iwl_mvm_configure_bcast_filter(mvm, vif); 1381 iwl_mvm_configure_bcast_filter(mvm, vif);
1361 } 1382 }
1362} 1383}
@@ -1402,7 +1423,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
1402 mvmvif->ap_ibss_active = true; 1423 mvmvif->ap_ibss_active = true;
1403 1424
1404 /* power updated needs to be done before quotas */ 1425 /* power updated needs to be done before quotas */
1405 iwl_mvm_power_update_mac(mvm, vif); 1426 iwl_mvm_power_update_mac(mvm);
1406 1427
1407 ret = iwl_mvm_update_quotas(mvm, vif); 1428 ret = iwl_mvm_update_quotas(mvm, vif);
1408 if (ret) 1429 if (ret)
@@ -1410,7 +1431,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
1410 1431
1411 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 1432 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
1412 if (vif->p2p && mvm->p2p_device_vif) 1433 if (vif->p2p && mvm->p2p_device_vif)
1413 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif); 1434 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false);
1414 1435
1415 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS); 1436 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
1416 1437
@@ -1420,7 +1441,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
1420 return 0; 1441 return 0;
1421 1442
1422out_quota_failed: 1443out_quota_failed:
1423 iwl_mvm_power_update_mac(mvm, vif); 1444 iwl_mvm_power_update_mac(mvm);
1424 mvmvif->ap_ibss_active = false; 1445 mvmvif->ap_ibss_active = false;
1425 iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta); 1446 iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
1426out_unbind: 1447out_unbind:
@@ -1450,13 +1471,13 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
1450 1471
1451 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 1472 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
1452 if (vif->p2p && mvm->p2p_device_vif) 1473 if (vif->p2p && mvm->p2p_device_vif)
1453 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif); 1474 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false);
1454 1475
1455 iwl_mvm_update_quotas(mvm, NULL); 1476 iwl_mvm_update_quotas(mvm, NULL);
1456 iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta); 1477 iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
1457 iwl_mvm_binding_remove_vif(mvm, vif); 1478 iwl_mvm_binding_remove_vif(mvm, vif);
1458 1479
1459 iwl_mvm_power_update_mac(mvm, vif); 1480 iwl_mvm_power_update_mac(mvm);
1460 1481
1461 iwl_mvm_mac_ctxt_remove(mvm, vif); 1482 iwl_mvm_mac_ctxt_remove(mvm, vif);
1462 1483
@@ -1477,7 +1498,7 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
1477 1498
1478 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | 1499 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
1479 BSS_CHANGED_BANDWIDTH) && 1500 BSS_CHANGED_BANDWIDTH) &&
1480 iwl_mvm_mac_ctxt_changed(mvm, vif)) 1501 iwl_mvm_mac_ctxt_changed(mvm, vif, false))
1481 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 1502 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
1482 1503
1483 /* Need to send a new beacon template to the FW */ 1504 /* Need to send a new beacon template to the FW */
@@ -1495,6 +1516,9 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
1495 1516
1496 mutex_lock(&mvm->mutex); 1517 mutex_lock(&mvm->mutex);
1497 1518
1519 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
1520 iwl_mvm_sched_scan_stop(mvm, true);
1521
1498 switch (vif->type) { 1522 switch (vif->type) {
1499 case NL80211_IFTYPE_STATION: 1523 case NL80211_IFTYPE_STATION:
1500 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); 1524 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
@@ -1525,7 +1549,7 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
1525 1549
1526 switch (mvm->scan_status) { 1550 switch (mvm->scan_status) {
1527 case IWL_MVM_SCAN_SCHED: 1551 case IWL_MVM_SCAN_SCHED:
1528 ret = iwl_mvm_sched_scan_stop(mvm); 1552 ret = iwl_mvm_sched_scan_stop(mvm, true);
1529 if (ret) { 1553 if (ret) {
1530 ret = -EBUSY; 1554 ret = -EBUSY;
1531 goto out; 1555 goto out;
@@ -1697,6 +1721,11 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
1697 ret = iwl_mvm_add_sta(mvm, vif, sta); 1721 ret = iwl_mvm_add_sta(mvm, vif, sta);
1698 } else if (old_state == IEEE80211_STA_NONE && 1722 } else if (old_state == IEEE80211_STA_NONE &&
1699 new_state == IEEE80211_STA_AUTH) { 1723 new_state == IEEE80211_STA_AUTH) {
1724 /*
1725 * EBS may be disabled due to previous failures reported by FW.
1726 * Reset EBS status here assuming environment has been changed.
1727 */
1728 mvm->last_ebs_successful = true;
1700 ret = 0; 1729 ret = 0;
1701 } else if (old_state == IEEE80211_STA_AUTH && 1730 } else if (old_state == IEEE80211_STA_AUTH &&
1702 new_state == IEEE80211_STA_ASSOC) { 1731 new_state == IEEE80211_STA_ASSOC) {
@@ -1708,14 +1737,12 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
1708 } else if (old_state == IEEE80211_STA_ASSOC && 1737 } else if (old_state == IEEE80211_STA_ASSOC &&
1709 new_state == IEEE80211_STA_AUTHORIZED) { 1738 new_state == IEEE80211_STA_AUTHORIZED) {
1710 /* enable beacon filtering */ 1739 /* enable beacon filtering */
1711 if (vif->bss_conf.dtim_period) 1740 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
1712 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif,
1713 CMD_SYNC));
1714 ret = 0; 1741 ret = 0;
1715 } else if (old_state == IEEE80211_STA_AUTHORIZED && 1742 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
1716 new_state == IEEE80211_STA_ASSOC) { 1743 new_state == IEEE80211_STA_ASSOC) {
1717 /* disable beacon filtering */ 1744 /* disable beacon filtering */
1718 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC)); 1745 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
1719 ret = 0; 1746 ret = 0;
1720 } else if (old_state == IEEE80211_STA_ASSOC && 1747 } else if (old_state == IEEE80211_STA_ASSOC &&
1721 new_state == IEEE80211_STA_AUTH) { 1748 new_state == IEEE80211_STA_AUTH) {
@@ -1772,7 +1799,7 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
1772 int ret; 1799 int ret;
1773 1800
1774 mutex_lock(&mvm->mutex); 1801 mutex_lock(&mvm->mutex);
1775 ret = iwl_mvm_mac_ctxt_changed(mvm, vif); 1802 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
1776 mutex_unlock(&mvm->mutex); 1803 mutex_unlock(&mvm->mutex);
1777 return ret; 1804 return ret;
1778 } 1805 }
@@ -1865,7 +1892,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
1865 int ret; 1892 int ret;
1866 1893
1867 mutex_lock(&mvm->mutex); 1894 mutex_lock(&mvm->mutex);
1868 ret = iwl_mvm_sched_scan_stop(mvm); 1895 ret = iwl_mvm_sched_scan_stop(mvm, false);
1869 mutex_unlock(&mvm->mutex); 1896 mutex_unlock(&mvm->mutex);
1870 iwl_mvm_wait_for_async_handlers(mvm); 1897 iwl_mvm_wait_for_async_handlers(mvm);
1871 1898
@@ -2161,10 +2188,10 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
2161 return; 2188 return;
2162 2189
2163 mutex_lock(&mvm->mutex); 2190 mutex_lock(&mvm->mutex);
2191 iwl_mvm_bt_coex_vif_change(mvm);
2164 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, 2192 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
2165 ctx->rx_chains_static, 2193 ctx->rx_chains_static,
2166 ctx->rx_chains_dynamic); 2194 ctx->rx_chains_dynamic);
2167 iwl_mvm_bt_coex_vif_change(mvm);
2168 mutex_unlock(&mvm->mutex); 2195 mutex_unlock(&mvm->mutex);
2169} 2196}
2170 2197
@@ -2184,6 +2211,11 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
2184 2211
2185 switch (vif->type) { 2212 switch (vif->type) {
2186 case NL80211_IFTYPE_AP: 2213 case NL80211_IFTYPE_AP:
2214 /* Unless it's a CSA flow we have nothing to do here */
2215 if (vif->csa_active) {
2216 mvmvif->ap_ibss_active = true;
2217 break;
2218 }
2187 case NL80211_IFTYPE_ADHOC: 2219 case NL80211_IFTYPE_ADHOC:
2188 /* 2220 /*
2189 * The AP binding flow is handled as part of the start_ap flow 2221 * The AP binding flow is handled as part of the start_ap flow
@@ -2207,7 +2239,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
2207 * Power state must be updated before quotas, 2239 * Power state must be updated before quotas,
2208 * otherwise fw will complain. 2240 * otherwise fw will complain.
2209 */ 2241 */
2210 iwl_mvm_power_update_mac(mvm, vif); 2242 iwl_mvm_power_update_mac(mvm);
2211 2243
2212 /* Setting the quota at this stage is only required for monitor 2244 /* Setting the quota at this stage is only required for monitor
2213 * interfaces. For the other types, the bss_info changed flow 2245 * interfaces. For the other types, the bss_info changed flow
@@ -2220,11 +2252,17 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
2220 goto out_remove_binding; 2252 goto out_remove_binding;
2221 } 2253 }
2222 2254
2255 /* Handle binding during CSA */
2256 if (vif->type == NL80211_IFTYPE_AP) {
2257 iwl_mvm_update_quotas(mvm, vif);
2258 iwl_mvm_mac_ctxt_changed(mvm, vif, false);
2259 }
2260
2223 goto out_unlock; 2261 goto out_unlock;
2224 2262
2225 out_remove_binding: 2263 out_remove_binding:
2226 iwl_mvm_binding_remove_vif(mvm, vif); 2264 iwl_mvm_binding_remove_vif(mvm, vif);
2227 iwl_mvm_power_update_mac(mvm, vif); 2265 iwl_mvm_power_update_mac(mvm);
2228 out_unlock: 2266 out_unlock:
2229 mutex_unlock(&mvm->mutex); 2267 mutex_unlock(&mvm->mutex);
2230 if (ret) 2268 if (ret)
@@ -2244,22 +2282,29 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
2244 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); 2282 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
2245 2283
2246 switch (vif->type) { 2284 switch (vif->type) {
2247 case NL80211_IFTYPE_AP:
2248 case NL80211_IFTYPE_ADHOC: 2285 case NL80211_IFTYPE_ADHOC:
2249 goto out_unlock; 2286 goto out_unlock;
2250 case NL80211_IFTYPE_MONITOR: 2287 case NL80211_IFTYPE_MONITOR:
2251 mvmvif->monitor_active = false; 2288 mvmvif->monitor_active = false;
2252 iwl_mvm_update_quotas(mvm, NULL); 2289 iwl_mvm_update_quotas(mvm, NULL);
2253 break; 2290 break;
2291 case NL80211_IFTYPE_AP:
2292 /* This part is triggered only during CSA */
2293 if (!vif->csa_active || !mvmvif->ap_ibss_active)
2294 goto out_unlock;
2295
2296 mvmvif->ap_ibss_active = false;
2297 iwl_mvm_update_quotas(mvm, NULL);
2298 /*TODO: bt_coex notification here? */
2254 default: 2299 default:
2255 break; 2300 break;
2256 } 2301 }
2257 2302
2258 iwl_mvm_binding_remove_vif(mvm, vif); 2303 iwl_mvm_binding_remove_vif(mvm, vif);
2259 iwl_mvm_power_update_mac(mvm, vif);
2260 2304
2261out_unlock: 2305out_unlock:
2262 mvmvif->phy_ctxt = NULL; 2306 mvmvif->phy_ctxt = NULL;
2307 iwl_mvm_power_update_mac(mvm);
2263 mutex_unlock(&mvm->mutex); 2308 mutex_unlock(&mvm->mutex);
2264} 2309}
2265 2310
@@ -2323,9 +2368,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
2323 return -EINVAL; 2368 return -EINVAL;
2324 2369
2325 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) 2370 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
2326 return iwl_mvm_enable_beacon_filter(mvm, vif, 2371 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2327 CMD_SYNC); 2372 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
2328 return iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
2329 } 2373 }
2330 2374
2331 return -EOPNOTSUPP; 2375 return -EOPNOTSUPP;
@@ -2346,6 +2390,53 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
2346} 2390}
2347#endif 2391#endif
2348 2392
2393static void iwl_mvm_channel_switch_beacon(struct ieee80211_hw *hw,
2394 struct ieee80211_vif *vif,
2395 struct cfg80211_chan_def *chandef)
2396{
2397 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2398
2399 mutex_lock(&mvm->mutex);
2400 if (WARN(mvm->csa_vif && mvm->csa_vif->csa_active,
2401 "Another CSA is already in progress"))
2402 goto out_unlock;
2403
2404 IWL_DEBUG_MAC80211(mvm, "CSA started to freq %d\n",
2405 chandef->center_freq1);
2406 mvm->csa_vif = vif;
2407
2408out_unlock:
2409 mutex_unlock(&mvm->mutex);
2410}
2411
2412static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
2413 struct ieee80211_vif *vif, u32 queues, bool drop)
2414{
2415 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2416 struct iwl_mvm_vif *mvmvif;
2417 struct iwl_mvm_sta *mvmsta;
2418
2419 if (!vif || vif->type != NL80211_IFTYPE_STATION)
2420 return;
2421
2422 mutex_lock(&mvm->mutex);
2423 mvmvif = iwl_mvm_vif_from_mac80211(vif);
2424 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
2425
2426 if (WARN_ON_ONCE(!mvmsta))
2427 goto done;
2428
2429 if (drop) {
2430 if (iwl_mvm_flush_tx_path(mvm, mvmsta->tfd_queue_msk, true))
2431 IWL_ERR(mvm, "flush request fail\n");
2432 } else {
2433 iwl_trans_wait_tx_queue_empty(mvm->trans,
2434 mvmsta->tfd_queue_msk);
2435 }
2436done:
2437 mutex_unlock(&mvm->mutex);
2438}
2439
2349const struct ieee80211_ops iwl_mvm_hw_ops = { 2440const struct ieee80211_ops iwl_mvm_hw_ops = {
2350 .tx = iwl_mvm_mac_tx, 2441 .tx = iwl_mvm_mac_tx,
2351 .ampdu_action = iwl_mvm_mac_ampdu_action, 2442 .ampdu_action = iwl_mvm_mac_ampdu_action,
@@ -2369,6 +2460,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
2369 .sta_rc_update = iwl_mvm_sta_rc_update, 2460 .sta_rc_update = iwl_mvm_sta_rc_update,
2370 .conf_tx = iwl_mvm_mac_conf_tx, 2461 .conf_tx = iwl_mvm_mac_conf_tx,
2371 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, 2462 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
2463 .flush = iwl_mvm_mac_flush,
2372 .sched_scan_start = iwl_mvm_mac_sched_scan_start, 2464 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
2373 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, 2465 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
2374 .set_key = iwl_mvm_mac_set_key, 2466 .set_key = iwl_mvm_mac_set_key,
@@ -2388,6 +2480,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
2388 2480
2389 .set_tim = iwl_mvm_set_tim, 2481 .set_tim = iwl_mvm_set_tim,
2390 2482
2483 .channel_switch_beacon = iwl_mvm_channel_switch_beacon,
2484
2391 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) 2485 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
2392 2486
2393#ifdef CONFIG_PM_SLEEP 2487#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index f1ec0986c3c9..fcc6c29482d0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -164,7 +164,6 @@ enum iwl_dbgfs_pm_mask {
164 MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2), 164 MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
165 MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3), 165 MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
166 MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4), 166 MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
167 MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
168 MVM_DEBUGFS_PM_LPRX_ENA = BIT(6), 167 MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
169 MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), 168 MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
170 MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8), 169 MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
@@ -177,7 +176,6 @@ struct iwl_dbgfs_pm {
177 u32 tx_data_timeout; 176 u32 tx_data_timeout;
178 bool skip_over_dtim; 177 bool skip_over_dtim;
179 u8 skip_dtim_periods; 178 u8 skip_dtim_periods;
180 bool disable_power_off;
181 bool lprx_ena; 179 bool lprx_ena;
182 u32 lprx_rssi_threshold; 180 u32 lprx_rssi_threshold;
183 bool snooze_ena; 181 bool snooze_ena;
@@ -232,6 +230,7 @@ enum iwl_mvm_ref_type {
232 IWL_MVM_REF_USER, 230 IWL_MVM_REF_USER,
233 IWL_MVM_REF_TX, 231 IWL_MVM_REF_TX,
234 IWL_MVM_REF_TX_AGG, 232 IWL_MVM_REF_TX_AGG,
233 IWL_MVM_REF_EXIT_WORK,
235 234
236 IWL_MVM_REF_COUNT, 235 IWL_MVM_REF_COUNT,
237}; 236};
@@ -265,6 +264,7 @@ struct iwl_mvm_vif_bf_data {
265 * @uploaded: indicates the MAC context has been added to the device 264 * @uploaded: indicates the MAC context has been added to the device
266 * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface 265 * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
267 * should get quota etc. 266 * should get quota etc.
267 * @pm_enabled - Indicate if MAC power management is allowed
268 * @monitor_active: indicates that monitor context is configured, and that the 268 * @monitor_active: indicates that monitor context is configured, and that the
269 * interface should get quota etc. 269 * interface should get quota etc.
270 * @low_latency: indicates that this interface is in low-latency mode 270 * @low_latency: indicates that this interface is in low-latency mode
@@ -283,6 +283,7 @@ struct iwl_mvm_vif {
283 283
284 bool uploaded; 284 bool uploaded;
285 bool ap_ibss_active; 285 bool ap_ibss_active;
286 bool pm_enabled;
286 bool monitor_active; 287 bool monitor_active;
287 bool low_latency; 288 bool low_latency;
288 struct iwl_mvm_vif_bf_data bf_data; 289 struct iwl_mvm_vif_bf_data bf_data;
@@ -451,6 +452,11 @@ struct iwl_mvm_frame_stats {
451 int last_frame_idx; 452 int last_frame_idx;
452}; 453};
453 454
455enum {
456 D0I3_DEFER_WAKEUP,
457 D0I3_PENDING_WAKEUP,
458};
459
454struct iwl_mvm { 460struct iwl_mvm {
455 /* for logger access */ 461 /* for logger access */
456 struct device *dev; 462 struct device *dev;
@@ -484,6 +490,7 @@ struct iwl_mvm {
484 u32 log_event_table; 490 u32 log_event_table;
485 u32 umac_error_event_table; 491 u32 umac_error_event_table;
486 bool support_umac_log; 492 bool support_umac_log;
493 struct iwl_sf_region sf_space;
487 494
488 u32 ampdu_ref; 495 u32 ampdu_ref;
489 496
@@ -495,6 +502,7 @@ struct iwl_mvm {
495 u8 queue_to_mac80211[IWL_MAX_HW_QUEUES]; 502 u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
496 atomic_t queue_stop_count[IWL_MAX_HW_QUEUES]; 503 atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
497 504
505 const char *nvm_file_name;
498 struct iwl_nvm_data *nvm_data; 506 struct iwl_nvm_data *nvm_data;
499 /* NVM sections */ 507 /* NVM sections */
500 struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; 508 struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
@@ -535,6 +543,8 @@ struct iwl_mvm {
535 /* Internal station */ 543 /* Internal station */
536 struct iwl_mvm_int_sta aux_sta; 544 struct iwl_mvm_int_sta aux_sta;
537 545
546 bool last_ebs_successful;
547
538 u8 scan_last_antenna_idx; /* to toggle TX between antennas */ 548 u8 scan_last_antenna_idx; /* to toggle TX between antennas */
539 u8 mgmt_last_antenna_idx; 549 u8 mgmt_last_antenna_idx;
540 550
@@ -578,8 +588,12 @@ struct iwl_mvm {
578 void *fw_error_dump; 588 void *fw_error_dump;
579 void *fw_error_sram; 589 void *fw_error_sram;
580 u32 fw_error_sram_len; 590 u32 fw_error_sram_len;
591 u32 *fw_error_rxf;
592 u32 fw_error_rxf_len;
581 593
594#ifdef CONFIG_IWLWIFI_LEDS
582 struct led_classdev led; 595 struct led_classdev led;
596#endif
583 597
584 struct ieee80211_vif *p2p_device_vif; 598 struct ieee80211_vif *p2p_device_vif;
585 599
@@ -601,6 +615,9 @@ struct iwl_mvm {
601 bool d0i3_offloading; 615 bool d0i3_offloading;
602 struct work_struct d0i3_exit_work; 616 struct work_struct d0i3_exit_work;
603 struct sk_buff_head d0i3_tx; 617 struct sk_buff_head d0i3_tx;
618 /* protect d0i3_suspend_flags */
619 struct mutex d0i3_suspend_mutex;
620 unsigned long d0i3_suspend_flags;
604 /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ 621 /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
605 spinlock_t d0i3_tx_lock; 622 spinlock_t d0i3_tx_lock;
606 wait_queue_head_t d0i3_exit_waitq; 623 wait_queue_head_t d0i3_exit_waitq;
@@ -629,8 +646,8 @@ struct iwl_mvm {
629 646
630 /* Indicate if device power save is allowed */ 647 /* Indicate if device power save is allowed */
631 bool ps_disabled; 648 bool ps_disabled;
632 /* Indicate if device power management is allowed */ 649
633 bool pm_disabled; 650 struct ieee80211_vif *csa_vif;
634}; 651};
635 652
636/* Extract MVM priv from op_mode and _hw */ 653/* Extract MVM priv from op_mode and _hw */
@@ -705,6 +722,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
705#ifdef CONFIG_IWLWIFI_DEBUGFS 722#ifdef CONFIG_IWLWIFI_DEBUGFS
706void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); 723void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
707void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm); 724void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
725void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm);
708#endif 726#endif
709u8 first_antenna(u8 mask); 727u8 first_antenna(u8 mask);
710u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); 728u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@@ -745,7 +763,7 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
745 struct iwl_device_cmd *cmd); 763 struct iwl_device_cmd *cmd);
746 764
747/* NVM */ 765/* NVM */
748int iwl_nvm_init(struct iwl_mvm *mvm); 766int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
749int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); 767int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
750 768
751int iwl_mvm_up(struct iwl_mvm *mvm); 769int iwl_mvm_up(struct iwl_mvm *mvm);
@@ -796,7 +814,8 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
796int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 814int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
797void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 815void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
798int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 816int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
799int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 817int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
818 bool force_assoc_off);
800int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 819int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
801u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, 820u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
802 struct ieee80211_vif *vif); 821 struct ieee80211_vif *vif);
@@ -840,7 +859,7 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
840 struct cfg80211_sched_scan_request *req); 859 struct cfg80211_sched_scan_request *req);
841int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, 860int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
842 struct cfg80211_sched_scan_request *req); 861 struct cfg80211_sched_scan_request *req);
843int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm); 862int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify);
844int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm, 863int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
845 struct iwl_rx_cmd_buffer *rxb, 864 struct iwl_rx_cmd_buffer *rxb,
846 struct iwl_device_cmd *cmd); 865 struct iwl_device_cmd *cmd);
@@ -874,10 +893,8 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
874int rs_pretty_print_rate(char *buf, const u32 rate); 893int rs_pretty_print_rate(char *buf, const u32 rate);
875 894
876/* power management */ 895/* power management */
877int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
878
879int iwl_mvm_power_update_device(struct iwl_mvm *mvm); 896int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
880int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 897int iwl_mvm_power_update_mac(struct iwl_mvm *mvm);
881int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 898int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
882 char *buf, int bufsz); 899 char *buf, int bufsz);
883 900
@@ -886,8 +903,18 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
886 struct iwl_rx_cmd_buffer *rxb, 903 struct iwl_rx_cmd_buffer *rxb,
887 struct iwl_device_cmd *cmd); 904 struct iwl_device_cmd *cmd);
888 905
906#ifdef CONFIG_IWLWIFI_LEDS
889int iwl_mvm_leds_init(struct iwl_mvm *mvm); 907int iwl_mvm_leds_init(struct iwl_mvm *mvm);
890void iwl_mvm_leds_exit(struct iwl_mvm *mvm); 908void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
909#else
910static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm)
911{
912 return 0;
913}
914static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
915{
916}
917#endif
891 918
892/* D3 (WoWLAN, NetDetect) */ 919/* D3 (WoWLAN, NetDetect) */
893int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); 920int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
@@ -922,9 +949,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
922void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); 949void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
923void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); 950void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
924void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq); 951void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
952int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
925 953
926/* BT Coex */ 954/* BT Coex */
927int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
928int iwl_send_bt_init_conf(struct iwl_mvm *mvm); 955int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
929int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, 956int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
930 struct iwl_rx_cmd_buffer *rxb, 957 struct iwl_rx_cmd_buffer *rxb,
@@ -936,9 +963,10 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
936 struct ieee80211_sta *sta); 963 struct ieee80211_sta *sta);
937bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, 964bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
938 struct ieee80211_sta *sta); 965 struct ieee80211_sta *sta);
966bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
967 enum ieee80211_band band);
939u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, 968u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
940 struct ieee80211_tx_info *info, u8 ac); 969 struct ieee80211_tx_info *info, u8 ac);
941int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
942 970
943enum iwl_bt_kill_msk { 971enum iwl_bt_kill_msk {
944 BT_KILL_MSK_DEFAULT, 972 BT_KILL_MSK_DEFAULT,
@@ -969,17 +997,11 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
969int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, 997int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
970 struct ieee80211_vif *vif, 998 struct ieee80211_vif *vif,
971 u32 flags); 999 u32 flags);
972int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
973 struct ieee80211_vif *vif, bool enable);
974int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
975 struct ieee80211_vif *vif,
976 bool force,
977 u32 flags);
978
979/* SMPS */ 1000/* SMPS */
980void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1001void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
981 enum iwl_mvm_smps_type_request req_type, 1002 enum iwl_mvm_smps_type_request req_type,
982 enum ieee80211_smps_mode smps_request); 1003 enum ieee80211_smps_mode smps_request);
1004bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
983 1005
984/* Low latency */ 1006/* Low latency */
985int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1007int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index cf2d09f53782..808f78f6fbf9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -74,6 +74,12 @@
74#define NVM_WRITE_OPCODE 1 74#define NVM_WRITE_OPCODE 1
75#define NVM_READ_OPCODE 0 75#define NVM_READ_OPCODE 0
76 76
77/* load nvm chunk response */
78enum {
79 READ_NVM_CHUNK_SUCCEED = 0,
80 READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
81};
82
77/* 83/*
78 * prepare the NVM host command w/ the pointers to the nvm buffer 84 * prepare the NVM host command w/ the pointers to the nvm buffer
79 * and send it to fw 85 * and send it to fw
@@ -90,7 +96,7 @@ static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
90 struct iwl_host_cmd cmd = { 96 struct iwl_host_cmd cmd = {
91 .id = NVM_ACCESS_CMD, 97 .id = NVM_ACCESS_CMD,
92 .len = { sizeof(struct iwl_nvm_access_cmd), length }, 98 .len = { sizeof(struct iwl_nvm_access_cmd), length },
93 .flags = CMD_SYNC | CMD_SEND_IN_RFKILL, 99 .flags = CMD_SEND_IN_RFKILL,
94 .data = { &nvm_access_cmd, data }, 100 .data = { &nvm_access_cmd, data },
95 /* data may come from vmalloc, so use _DUP */ 101 /* data may come from vmalloc, so use _DUP */
96 .dataflags = { 0, IWL_HCMD_DFL_DUP }, 102 .dataflags = { 0, IWL_HCMD_DFL_DUP },
@@ -112,7 +118,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
112 struct iwl_rx_packet *pkt; 118 struct iwl_rx_packet *pkt;
113 struct iwl_host_cmd cmd = { 119 struct iwl_host_cmd cmd = {
114 .id = NVM_ACCESS_CMD, 120 .id = NVM_ACCESS_CMD,
115 .flags = CMD_SYNC | CMD_WANT_SKB | CMD_SEND_IN_RFKILL, 121 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
116 .data = { &nvm_access_cmd, }, 122 .data = { &nvm_access_cmd, },
117 }; 123 };
118 int ret, bytes_read, offset_read; 124 int ret, bytes_read, offset_read;
@@ -139,10 +145,26 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
139 offset_read = le16_to_cpu(nvm_resp->offset); 145 offset_read = le16_to_cpu(nvm_resp->offset);
140 resp_data = nvm_resp->data; 146 resp_data = nvm_resp->data;
141 if (ret) { 147 if (ret) {
142 IWL_ERR(mvm, 148 if ((offset != 0) &&
143 "NVM access command failed with status %d (device: %s)\n", 149 (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
144 ret, mvm->cfg->name); 150 /*
145 ret = -EINVAL; 151 * meaning of NOT_VALID_ADDRESS:
152 * driver try to read chunk from address that is
153 * multiple of 2K and got an error since addr is empty.
154 * meaning of (offset != 0): driver already
155 * read valid data from another chunk so this case
156 * is not an error.
157 */
158 IWL_DEBUG_EEPROM(mvm->trans->dev,
159 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
160 offset);
161 ret = 0;
162 } else {
163 IWL_DEBUG_EEPROM(mvm->trans->dev,
164 "NVM access command failed with status %d (device: %s)\n",
165 ret, mvm->cfg->name);
166 ret = -EIO;
167 }
146 goto exit; 168 goto exit;
147 } 169 }
148 170
@@ -211,9 +233,9 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
211 while (ret == length) { 233 while (ret == length) {
212 ret = iwl_nvm_read_chunk(mvm, section, offset, length, data); 234 ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
213 if (ret < 0) { 235 if (ret < 0) {
214 IWL_ERR(mvm, 236 IWL_DEBUG_EEPROM(mvm->trans->dev,
215 "Cannot read NVM from section %d offset %d, length %d\n", 237 "Cannot read NVM from section %d offset %d, length %d\n",
216 section, offset, length); 238 section, offset, length);
217 return ret; 239 return ret;
218 } 240 }
219 offset += ret; 241 offset += ret;
@@ -238,13 +260,20 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
238 return NULL; 260 return NULL;
239 } 261 }
240 } else { 262 } else {
263 /* SW and REGULATORY sections are mandatory */
241 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || 264 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
242 !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data ||
243 !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) { 265 !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
244 IWL_ERR(mvm, 266 IWL_ERR(mvm,
245 "Can't parse empty family 8000 NVM sections\n"); 267 "Can't parse empty family 8000 NVM sections\n");
246 return NULL; 268 return NULL;
247 } 269 }
270 /* MAC_OVERRIDE or at least HW section must exist */
271 if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
272 !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
273 IWL_ERR(mvm,
274 "Can't parse mac_address, empty sections\n");
275 return NULL;
276 }
248 } 277 }
249 278
250 if (WARN_ON(!mvm->cfg)) 279 if (WARN_ON(!mvm->cfg))
@@ -311,16 +340,16 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
311 * get here after that we assume the NVM request can be satisfied 340 * get here after that we assume the NVM request can be satisfied
312 * synchronously. 341 * synchronously.
313 */ 342 */
314 ret = request_firmware(&fw_entry, iwlwifi_mod_params.nvm_file, 343 ret = request_firmware(&fw_entry, mvm->nvm_file_name,
315 mvm->trans->dev); 344 mvm->trans->dev);
316 if (ret) { 345 if (ret) {
317 IWL_ERR(mvm, "ERROR: %s isn't available %d\n", 346 IWL_ERR(mvm, "ERROR: %s isn't available %d\n",
318 iwlwifi_mod_params.nvm_file, ret); 347 mvm->nvm_file_name, ret);
319 return ret; 348 return ret;
320 } 349 }
321 350
322 IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n", 351 IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
323 iwlwifi_mod_params.nvm_file, fw_entry->size); 352 mvm->nvm_file_name, fw_entry->size);
324 353
325 if (fw_entry->size < sizeof(*file_sec)) { 354 if (fw_entry->size < sizeof(*file_sec)) {
326 IWL_ERR(mvm, "NVM file too small\n"); 355 IWL_ERR(mvm, "NVM file too small\n");
@@ -427,53 +456,28 @@ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
427 return ret; 456 return ret;
428} 457}
429 458
430int iwl_nvm_init(struct iwl_mvm *mvm) 459int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
431{ 460{
432 int ret, i, section; 461 int ret, section;
433 u8 *nvm_buffer, *temp; 462 u8 *nvm_buffer, *temp;
434 int nvm_to_read[NVM_MAX_NUM_SECTIONS];
435 int num_of_sections_to_read;
436 463
437 if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS)) 464 if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
438 return -EINVAL; 465 return -EINVAL;
439 466
440 /* load external NVM if configured */ 467 /* load NVM values from nic */
441 if (iwlwifi_mod_params.nvm_file) { 468 if (read_nvm_from_nic) {
442 /* move to External NVM flow */
443 ret = iwl_mvm_read_external_nvm(mvm);
444 if (ret)
445 return ret;
446 } else {
447 /* list of NVM sections we are allowed/need to read */
448 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
449 nvm_to_read[0] = mvm->cfg->nvm_hw_section_num;
450 nvm_to_read[1] = NVM_SECTION_TYPE_SW;
451 nvm_to_read[2] = NVM_SECTION_TYPE_CALIBRATION;
452 nvm_to_read[3] = NVM_SECTION_TYPE_PRODUCTION;
453 num_of_sections_to_read = 4;
454 } else {
455 nvm_to_read[0] = NVM_SECTION_TYPE_SW;
456 nvm_to_read[1] = NVM_SECTION_TYPE_CALIBRATION;
457 nvm_to_read[2] = NVM_SECTION_TYPE_PRODUCTION;
458 nvm_to_read[3] = NVM_SECTION_TYPE_REGULATORY;
459 nvm_to_read[4] = NVM_SECTION_TYPE_MAC_OVERRIDE;
460 num_of_sections_to_read = 5;
461 }
462
463 /* Read From FW NVM */ 469 /* Read From FW NVM */
464 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); 470 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
465 471
466 /* TODO: find correct NVM max size for a section */
467 nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, 472 nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
468 GFP_KERNEL); 473 GFP_KERNEL);
469 if (!nvm_buffer) 474 if (!nvm_buffer)
470 return -ENOMEM; 475 return -ENOMEM;
471 for (i = 0; i < num_of_sections_to_read; i++) { 476 for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
472 section = nvm_to_read[i];
473 /* we override the constness for initial read */ 477 /* we override the constness for initial read */
474 ret = iwl_nvm_read_section(mvm, section, nvm_buffer); 478 ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
475 if (ret < 0) 479 if (ret < 0)
476 break; 480 continue;
477 temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); 481 temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
478 if (!temp) { 482 if (!temp) {
479 ret = -ENOMEM; 483 ret = -ENOMEM;
@@ -502,15 +506,21 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
502 mvm->nvm_hw_blob.size = ret; 506 mvm->nvm_hw_blob.size = ret;
503 break; 507 break;
504 } 508 }
505 WARN(1, "section: %d", section);
506 } 509 }
507#endif 510#endif
508 } 511 }
509 kfree(nvm_buffer); 512 kfree(nvm_buffer);
510 if (ret < 0) 513 }
514
515 /* load external NVM if configured */
516 if (mvm->nvm_file_name) {
517 /* move to External NVM flow */
518 ret = iwl_mvm_read_external_nvm(mvm);
519 if (ret)
511 return ret; 520 return ret;
512 } 521 }
513 522
523 /* parse the relevant nvm sections */
514 mvm->nvm_data = iwl_parse_nvm_sections(mvm); 524 mvm->nvm_data = iwl_parse_nvm_sections(mvm);
515 if (!mvm->nvm_data) 525 if (!mvm->nvm_data)
516 return -ENODATA; 526 return -ENODATA;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 9545d7fdd4bf..cc2f7de396de 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -79,8 +79,8 @@
79#include "iwl-prph.h" 79#include "iwl-prph.h"
80#include "rs.h" 80#include "rs.h"
81#include "fw-api-scan.h" 81#include "fw-api-scan.h"
82#include "fw-error-dump.h"
83#include "time-event.h" 82#include "time-event.h"
83#include "iwl-fw-error-dump.h"
84 84
85/* 85/*
86 * module name, copyright, version, etc. 86 * module name, copyright, version, etc.
@@ -220,7 +220,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
220 RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false), 220 RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
221 221
222 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true), 222 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
223 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false), 223 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, true),
224 RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true), 224 RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
225 RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION, 225 RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
226 iwl_mvm_rx_ant_coupling_notif, true), 226 iwl_mvm_rx_ant_coupling_notif, true),
@@ -402,6 +402,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
402 mvm->sf_state = SF_UNINIT; 402 mvm->sf_state = SF_UNINIT;
403 403
404 mutex_init(&mvm->mutex); 404 mutex_init(&mvm->mutex);
405 mutex_init(&mvm->d0i3_suspend_mutex);
405 spin_lock_init(&mvm->async_handlers_lock); 406 spin_lock_init(&mvm->async_handlers_lock);
406 INIT_LIST_HEAD(&mvm->time_event_list); 407 INIT_LIST_HEAD(&mvm->time_event_list);
407 INIT_LIST_HEAD(&mvm->async_handlers_list); 408 INIT_LIST_HEAD(&mvm->async_handlers_list);
@@ -465,13 +466,24 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
465 466
466 min_backoff = calc_min_backoff(trans, cfg); 467 min_backoff = calc_min_backoff(trans, cfg);
467 iwl_mvm_tt_initialize(mvm, min_backoff); 468 iwl_mvm_tt_initialize(mvm, min_backoff);
469 /* set the nvm_file_name according to priority */
470 if (iwlwifi_mod_params.nvm_file)
471 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
472 else
473 mvm->nvm_file_name = mvm->cfg->default_nvm_file;
474
475 if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
476 "not allowing power-up and not having nvm_file\n"))
477 goto out_free;
468 478
469 /* 479 /*
470 * If the NVM exists in an external file, 480 * Even if nvm exists in the nvm_file driver should read agin the nvm
471 * there is no need to unnecessarily power up the NIC at driver load 481 * from the nic because there might be entries that exist in the OTP
482 * and not in the file.
483 * for nics with no_power_up_nic_in_init: rely completley on nvm_file
472 */ 484 */
473 if (iwlwifi_mod_params.nvm_file) { 485 if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) {
474 err = iwl_nvm_init(mvm); 486 err = iwl_nvm_init(mvm, false);
475 if (err) 487 if (err)
476 goto out_free; 488 goto out_free;
477 } else { 489 } else {
@@ -518,7 +530,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
518 out_free: 530 out_free:
519 iwl_phy_db_free(mvm->phy_db); 531 iwl_phy_db_free(mvm->phy_db);
520 kfree(mvm->scan_cmd); 532 kfree(mvm->scan_cmd);
521 if (!iwlwifi_mod_params.nvm_file) 533 if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
522 iwl_trans_op_mode_leave(trans); 534 iwl_trans_op_mode_leave(trans);
523 ieee80211_free_hw(mvm->hw); 535 ieee80211_free_hw(mvm->hw);
524 return NULL; 536 return NULL;
@@ -538,6 +550,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
538 kfree(mvm->scan_cmd); 550 kfree(mvm->scan_cmd);
539 vfree(mvm->fw_error_dump); 551 vfree(mvm->fw_error_dump);
540 kfree(mvm->fw_error_sram); 552 kfree(mvm->fw_error_sram);
553 kfree(mvm->fw_error_rxf);
541 kfree(mvm->mcast_filter_cmd); 554 kfree(mvm->mcast_filter_cmd);
542 mvm->mcast_filter_cmd = NULL; 555 mvm->mcast_filter_cmd = NULL;
543 556
@@ -814,6 +827,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
814 struct iwl_fw_error_dump_file *dump_file; 827 struct iwl_fw_error_dump_file *dump_file;
815 struct iwl_fw_error_dump_data *dump_data; 828 struct iwl_fw_error_dump_data *dump_data;
816 u32 file_len; 829 u32 file_len;
830 u32 trans_len;
817 831
818 lockdep_assert_held(&mvm->mutex); 832 lockdep_assert_held(&mvm->mutex);
819 833
@@ -821,8 +835,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
821 return; 835 return;
822 836
823 file_len = mvm->fw_error_sram_len + 837 file_len = mvm->fw_error_sram_len +
838 mvm->fw_error_rxf_len +
824 sizeof(*dump_file) + 839 sizeof(*dump_file) +
825 sizeof(*dump_data); 840 sizeof(*dump_data) * 2;
841
842 trans_len = iwl_trans_dump_data(mvm->trans, NULL, 0);
843 if (trans_len)
844 file_len += trans_len;
826 845
827 dump_file = vmalloc(file_len); 846 dump_file = vmalloc(file_len);
828 if (!dump_file) 847 if (!dump_file)
@@ -833,7 +852,12 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
833 dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); 852 dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
834 dump_file->file_len = cpu_to_le32(file_len); 853 dump_file->file_len = cpu_to_le32(file_len);
835 dump_data = (void *)dump_file->data; 854 dump_data = (void *)dump_file->data;
836 dump_data->type = IWL_FW_ERROR_DUMP_SRAM; 855 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
856 dump_data->len = cpu_to_le32(mvm->fw_error_rxf_len);
857 memcpy(dump_data->data, mvm->fw_error_rxf, mvm->fw_error_rxf_len);
858
859 dump_data = iwl_mvm_fw_error_next_data(dump_data);
860 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_SRAM);
837 dump_data->len = cpu_to_le32(mvm->fw_error_sram_len); 861 dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
838 862
839 /* 863 /*
@@ -842,6 +866,23 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
842 * mvm->fw_error_sram right now. 866 * mvm->fw_error_sram right now.
843 */ 867 */
844 memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len); 868 memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
869
870 kfree(mvm->fw_error_rxf);
871 mvm->fw_error_rxf = NULL;
872 mvm->fw_error_rxf_len = 0;
873
874 kfree(mvm->fw_error_sram);
875 mvm->fw_error_sram = NULL;
876 mvm->fw_error_sram_len = 0;
877
878 if (trans_len) {
879 void *buf = iwl_mvm_fw_error_next_data(dump_data);
880 u32 real_trans_len = iwl_trans_dump_data(mvm->trans, buf,
881 trans_len);
882 dump_data = (void *)((u8 *)buf + real_trans_len);
883 dump_file->file_len =
884 cpu_to_le32(file_len - trans_len + real_trans_len);
885 }
845} 886}
846#endif 887#endif
847 888
@@ -853,6 +894,7 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
853 894
854#ifdef CONFIG_IWLWIFI_DEBUGFS 895#ifdef CONFIG_IWLWIFI_DEBUGFS
855 iwl_mvm_fw_error_sram_dump(mvm); 896 iwl_mvm_fw_error_sram_dump(mvm);
897 iwl_mvm_fw_error_rxf_dump(mvm);
856#endif 898#endif
857 899
858 iwl_mvm_nic_restart(mvm); 900 iwl_mvm_nic_restart(mvm);
@@ -1126,9 +1168,9 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
1126 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work); 1168 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
1127 struct iwl_host_cmd get_status_cmd = { 1169 struct iwl_host_cmd get_status_cmd = {
1128 .id = WOWLAN_GET_STATUSES, 1170 .id = WOWLAN_GET_STATUSES,
1129 .flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB, 1171 .flags = CMD_HIGH_PRIO | CMD_WANT_SKB,
1130 }; 1172 };
1131 struct iwl_wowlan_status_v6 *status; 1173 struct iwl_wowlan_status *status;
1132 int ret; 1174 int ret;
1133 u32 disconnection_reasons, wakeup_reasons; 1175 u32 disconnection_reasons, wakeup_reasons;
1134 __le16 *qos_seq = NULL; 1176 __le16 *qos_seq = NULL;
@@ -1158,18 +1200,27 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
1158 iwl_free_resp(&get_status_cmd); 1200 iwl_free_resp(&get_status_cmd);
1159out: 1201out:
1160 iwl_mvm_d0i3_enable_tx(mvm, qos_seq); 1202 iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
1203 iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
1161 mutex_unlock(&mvm->mutex); 1204 mutex_unlock(&mvm->mutex);
1162} 1205}
1163 1206
1164static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) 1207int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
1165{ 1208{
1166 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1167 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | 1209 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
1168 CMD_WAKE_UP_TRANS; 1210 CMD_WAKE_UP_TRANS;
1169 int ret; 1211 int ret;
1170 1212
1171 IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n"); 1213 IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
1172 1214
1215 mutex_lock(&mvm->d0i3_suspend_mutex);
1216 if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
1217 IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
1218 __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
1219 mutex_unlock(&mvm->d0i3_suspend_mutex);
1220 return 0;
1221 }
1222 mutex_unlock(&mvm->d0i3_suspend_mutex);
1223
1173 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL); 1224 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
1174 if (ret) 1225 if (ret)
1175 goto out; 1226 goto out;
@@ -1183,6 +1234,25 @@ out:
1183 return ret; 1234 return ret;
1184} 1235}
1185 1236
1237static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
1238{
1239 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1240
1241 iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
1242 return _iwl_mvm_exit_d0i3(mvm);
1243}
1244
1245static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
1246 struct napi_struct *napi,
1247 struct net_device *napi_dev,
1248 int (*poll)(struct napi_struct *, int),
1249 int weight)
1250{
1251 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1252
1253 ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight);
1254}
1255
1186static const struct iwl_op_mode_ops iwl_mvm_ops = { 1256static const struct iwl_op_mode_ops iwl_mvm_ops = {
1187 .start = iwl_op_mode_mvm_start, 1257 .start = iwl_op_mode_mvm_start,
1188 .stop = iwl_op_mode_mvm_stop, 1258 .stop = iwl_op_mode_mvm_stop,
@@ -1196,4 +1266,5 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
1196 .nic_config = iwl_mvm_nic_config, 1266 .nic_config = iwl_mvm_nic_config,
1197 .enter_d0i3 = iwl_mvm_enter_d0i3, 1267 .enter_d0i3 = iwl_mvm_enter_d0i3,
1198 .exit_d0i3 = iwl_mvm_exit_d0i3, 1268 .exit_d0i3 = iwl_mvm_exit_d0i3,
1269 .napi_add = iwl_mvm_napi_add,
1199}; 1270};
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index 237efe0ac1c4..539f3a942d43 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -156,6 +156,18 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
156 idle_cnt = chains_static; 156 idle_cnt = chains_static;
157 active_cnt = chains_dynamic; 157 active_cnt = chains_dynamic;
158 158
159 /* In scenarios where we only ever use a single-stream rates,
160 * i.e. legacy 11b/g/a associations, single-stream APs or even
161 * static SMPS, enable both chains to get diversity, improving
162 * the case where we're far enough from the AP that attenuation
163 * between the two antennas is sufficiently different to impact
164 * performance.
165 */
166 if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
167 idle_cnt = 2;
168 active_cnt = 2;
169 }
170
159 cmd->rxchain_info = cpu_to_le32(mvm->fw->valid_rx_ant << 171 cmd->rxchain_info = cpu_to_le32(mvm->fw->valid_rx_ant <<
160 PHY_RX_CHAIN_VALID_POS); 172 PHY_RX_CHAIN_VALID_POS);
161 cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); 173 cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
@@ -187,7 +199,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
187 iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef, 199 iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
188 chains_static, chains_dynamic); 200 chains_static, chains_dynamic);
189 201
190 ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC, 202 ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0,
191 sizeof(struct iwl_phy_context_cmd), 203 sizeof(struct iwl_phy_context_cmd),
192 &cmd); 204 &cmd);
193 if (ret) 205 if (ret)
@@ -202,18 +214,15 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
202 struct cfg80211_chan_def *chandef, 214 struct cfg80211_chan_def *chandef,
203 u8 chains_static, u8 chains_dynamic) 215 u8 chains_static, u8 chains_dynamic)
204{ 216{
205 int ret;
206
207 WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 217 WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
208 ctxt->ref); 218 ctxt->ref);
209 lockdep_assert_held(&mvm->mutex); 219 lockdep_assert_held(&mvm->mutex);
210 220
211 ctxt->channel = chandef->chan; 221 ctxt->channel = chandef->chan;
212 ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
213 chains_static, chains_dynamic,
214 FW_CTXT_ACTION_ADD, 0);
215 222
216 return ret; 223 return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
224 chains_static, chains_dynamic,
225 FW_CTXT_ACTION_ADD, 0);
217} 226}
218 227
219/* 228/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 6b636eab3339..c182a8baf685 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -123,28 +123,6 @@ void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
123 cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled); 123 cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
124} 124}
125 125
126int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
127 struct ieee80211_vif *vif, bool enable)
128{
129 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
130 struct iwl_beacon_filter_cmd cmd = {
131 IWL_BF_CMD_CONFIG_DEFAULTS,
132 .bf_enable_beacon_filter = cpu_to_le32(1),
133 .ba_enable_beacon_abort = cpu_to_le32(enable),
134 };
135
136 if (!mvmvif->bf_data.bf_enabled)
137 return 0;
138
139 if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
140 cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
141
142 mvmvif->bf_data.ba_enabled = enable;
143 iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
144 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
145 return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, CMD_SYNC);
146}
147
148static void iwl_mvm_power_log(struct iwl_mvm *mvm, 126static void iwl_mvm_power_log(struct iwl_mvm *mvm,
149 struct iwl_mac_power_cmd *cmd) 127 struct iwl_mac_power_cmd *cmd)
150{ 128{
@@ -268,6 +246,57 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
268 IWL_MVM_PS_HEAVY_RX_THLD_PERCENT; 246 IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
269} 247}
270 248
249static void iwl_mvm_binding_iterator(void *_data, u8 *mac,
250 struct ieee80211_vif *vif)
251{
252 unsigned long *data = _data;
253 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
254
255 if (!mvmvif->phy_ctxt)
256 return;
257
258 if (vif->type == NL80211_IFTYPE_STATION ||
259 vif->type == NL80211_IFTYPE_AP)
260 __set_bit(mvmvif->phy_ctxt->id, data);
261}
262
263static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
264 struct ieee80211_vif *vif)
265{
266 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
267 unsigned long phy_ctxt_counter = 0;
268
269 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
270 IEEE80211_IFACE_ITER_NORMAL,
271 iwl_mvm_binding_iterator,
272 &phy_ctxt_counter);
273
274 if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
275 ETH_ALEN))
276 return false;
277
278 if (vif->p2p &&
279 !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
280 return false;
281 /*
282 * Avoid using uAPSD if P2P client is associated to GO that uses
283 * opportunistic power save. This is due to current FW limitation.
284 */
285 if (vif->p2p &&
286 (vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
287 IEEE80211_P2P_OPPPS_ENABLE_BIT))
288 return false;
289
290 /*
291 * Avoid using uAPSD if client is in DCM -
292 * low latency issue in Miracast
293 */
294 if (hweight8(phy_ctxt_counter) >= 2)
295 return false;
296
297 return true;
298}
299
271static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, 300static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
272 struct ieee80211_vif *vif, 301 struct ieee80211_vif *vif,
273 struct iwl_mac_power_cmd *cmd) 302 struct iwl_mac_power_cmd *cmd)
@@ -280,7 +309,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
280 bool radar_detect = false; 309 bool radar_detect = false;
281 struct iwl_mvm_vif *mvmvif __maybe_unused = 310 struct iwl_mvm_vif *mvmvif __maybe_unused =
282 iwl_mvm_vif_from_mac80211(vif); 311 iwl_mvm_vif_from_mac80211(vif);
283 bool allow_uapsd = true;
284 312
285 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 313 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
286 mvmvif->color)); 314 mvmvif->color));
@@ -303,13 +331,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
303 331
304 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); 332 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
305 333
306#ifdef CONFIG_IWLWIFI_DEBUGFS
307 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
308 mvmvif->dbgfs_pm.disable_power_off)
309 cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
310#endif
311 if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) || 334 if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
312 mvm->pm_disabled) 335 !mvmvif->pm_enabled)
313 return; 336 return;
314 337
315 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); 338 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -351,23 +374,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
351 cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); 374 cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
352 } 375 }
353 376
354 if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, 377 if (iwl_mvm_power_allow_uapsd(mvm, vif))
355 ETH_ALEN))
356 allow_uapsd = false;
357
358 if (vif->p2p &&
359 !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
360 allow_uapsd = false;
361 /*
362 * Avoid using uAPSD if P2P client is associated to GO that uses
363 * opportunistic power save. This is due to current FW limitation.
364 */
365 if (vif->p2p &&
366 vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
367 IEEE80211_P2P_OPPPS_ENABLE_BIT)
368 allow_uapsd = false;
369
370 if (allow_uapsd)
371 iwl_mvm_power_configure_uapsd(mvm, vif, cmd); 378 iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
372 379
373#ifdef CONFIG_IWLWIFI_DEBUGFS 380#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -421,20 +428,13 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
421{ 428{
422 struct iwl_mac_power_cmd cmd = {}; 429 struct iwl_mac_power_cmd cmd = {};
423 430
424 if (vif->type != NL80211_IFTYPE_STATION)
425 return 0;
426
427 if (vif->p2p &&
428 !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))
429 return 0;
430
431 iwl_mvm_power_build_cmd(mvm, vif, &cmd); 431 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
432 iwl_mvm_power_log(mvm, &cmd); 432 iwl_mvm_power_log(mvm, &cmd);
433#ifdef CONFIG_IWLWIFI_DEBUGFS 433#ifdef CONFIG_IWLWIFI_DEBUGFS
434 memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd)); 434 memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
435#endif 435#endif
436 436
437 return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC, 437 return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0,
438 sizeof(cmd), &cmd); 438 sizeof(cmd), &cmd);
439} 439}
440 440
@@ -444,12 +444,6 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
444 .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK), 444 .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
445 }; 445 };
446 446
447 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
448 return 0;
449
450 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
451 return 0;
452
453 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) 447 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
454 mvm->ps_disabled = true; 448 mvm->ps_disabled = true;
455 449
@@ -466,7 +460,7 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
466 "Sending device power command with flags = 0x%X\n", 460 "Sending device power command with flags = 0x%X\n",
467 cmd.flags); 461 cmd.flags);
468 462
469 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, sizeof(cmd), 463 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd),
470 &cmd); 464 &cmd);
471} 465}
472 466
@@ -508,86 +502,69 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
508 return 0; 502 return 0;
509} 503}
510 504
511struct iwl_power_constraint { 505struct iwl_power_vifs {
512 struct ieee80211_vif *bf_vif; 506 struct ieee80211_vif *bf_vif;
513 struct ieee80211_vif *bss_vif; 507 struct ieee80211_vif *bss_vif;
514 struct ieee80211_vif *p2p_vif; 508 struct ieee80211_vif *p2p_vif;
515 u16 bss_phyctx_id; 509 struct ieee80211_vif *ap_vif;
516 u16 p2p_phyctx_id; 510 struct ieee80211_vif *monitor_vif;
517 bool pm_disabled; 511 bool p2p_active;
518 bool ps_disabled; 512 bool bss_active;
519 struct iwl_mvm *mvm; 513 bool ap_active;
514 bool monitor_active;
520}; 515};
521 516
522static void iwl_mvm_power_iterator(void *_data, u8 *mac, 517static void iwl_mvm_power_iterator(void *_data, u8 *mac,
523 struct ieee80211_vif *vif) 518 struct ieee80211_vif *vif)
524{ 519{
525 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 520 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
526 struct iwl_power_constraint *power_iterator = _data; 521 struct iwl_power_vifs *power_iterator = _data;
527 struct iwl_mvm *mvm = power_iterator->mvm;
528 522
523 mvmvif->pm_enabled = false;
529 switch (ieee80211_vif_type_p2p(vif)) { 524 switch (ieee80211_vif_type_p2p(vif)) {
530 case NL80211_IFTYPE_P2P_DEVICE: 525 case NL80211_IFTYPE_P2P_DEVICE:
531 break; 526 break;
532 527
533 case NL80211_IFTYPE_P2P_GO: 528 case NL80211_IFTYPE_P2P_GO:
534 case NL80211_IFTYPE_AP: 529 case NL80211_IFTYPE_AP:
535 /* no BSS power mgmt if we have an active AP */ 530 /* only a single MAC of the same type */
536 if (mvmvif->ap_ibss_active) 531 WARN_ON(power_iterator->ap_vif);
537 power_iterator->pm_disabled = true; 532 power_iterator->ap_vif = vif;
533 if (mvmvif->phy_ctxt)
534 if (mvmvif->phy_ctxt->id < MAX_PHYS)
535 power_iterator->ap_active = true;
538 break; 536 break;
539 537
540 case NL80211_IFTYPE_MONITOR: 538 case NL80211_IFTYPE_MONITOR:
541 /* no BSS power mgmt and no device power save */ 539 /* only a single MAC of the same type */
542 power_iterator->pm_disabled = true; 540 WARN_ON(power_iterator->monitor_vif);
543 power_iterator->ps_disabled = true; 541 power_iterator->monitor_vif = vif;
542 if (mvmvif->phy_ctxt)
543 if (mvmvif->phy_ctxt->id < MAX_PHYS)
544 power_iterator->monitor_active = true;
544 break; 545 break;
545 546
546 case NL80211_IFTYPE_P2P_CLIENT: 547 case NL80211_IFTYPE_P2P_CLIENT:
547 if (mvmvif->phy_ctxt) 548 /* only a single MAC of the same type */
548 power_iterator->p2p_phyctx_id = mvmvif->phy_ctxt->id;
549
550 /* we should have only one P2P vif */
551 WARN_ON(power_iterator->p2p_vif); 549 WARN_ON(power_iterator->p2p_vif);
552 power_iterator->p2p_vif = vif; 550 power_iterator->p2p_vif = vif;
553 551 if (mvmvif->phy_ctxt)
554 IWL_DEBUG_POWER(mvm, "p2p: p2p_id=%d, bss_id=%d\n", 552 if (mvmvif->phy_ctxt->id < MAX_PHYS)
555 power_iterator->p2p_phyctx_id, 553 power_iterator->p2p_active = true;
556 power_iterator->bss_phyctx_id);
557 if (!(mvm->fw->ucode_capa.flags &
558 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
559 /* no BSS power mgmt if we have a P2P client*/
560 power_iterator->pm_disabled = true;
561 } else if (power_iterator->p2p_phyctx_id < MAX_PHYS &&
562 power_iterator->bss_phyctx_id < MAX_PHYS &&
563 power_iterator->p2p_phyctx_id ==
564 power_iterator->bss_phyctx_id) {
565 power_iterator->pm_disabled = true;
566 }
567 break; 554 break;
568 555
569 case NL80211_IFTYPE_STATION: 556 case NL80211_IFTYPE_STATION:
570 if (mvmvif->phy_ctxt) 557 /* only a single MAC of the same type */
571 power_iterator->bss_phyctx_id = mvmvif->phy_ctxt->id;
572
573 /* we should have only one BSS vif */
574 WARN_ON(power_iterator->bss_vif); 558 WARN_ON(power_iterator->bss_vif);
575 power_iterator->bss_vif = vif; 559 power_iterator->bss_vif = vif;
560 if (mvmvif->phy_ctxt)
561 if (mvmvif->phy_ctxt->id < MAX_PHYS)
562 power_iterator->bss_active = true;
576 563
577 if (mvmvif->bf_data.bf_enabled && 564 if (mvmvif->bf_data.bf_enabled &&
578 !WARN_ON(power_iterator->bf_vif)) 565 !WARN_ON(power_iterator->bf_vif))
579 power_iterator->bf_vif = vif; 566 power_iterator->bf_vif = vif;
580 567
581 IWL_DEBUG_POWER(mvm, "bss: p2p_id=%d, bss_id=%d\n",
582 power_iterator->p2p_phyctx_id,
583 power_iterator->bss_phyctx_id);
584 if (mvm->fw->ucode_capa.flags &
585 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM &&
586 (power_iterator->p2p_phyctx_id < MAX_PHYS &&
587 power_iterator->bss_phyctx_id < MAX_PHYS &&
588 power_iterator->p2p_phyctx_id ==
589 power_iterator->bss_phyctx_id))
590 power_iterator->pm_disabled = true;
591 break; 568 break;
592 569
593 default: 570 default:
@@ -596,70 +573,73 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac,
596} 573}
597 574
598static void 575static void
599iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm, 576iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
600 struct iwl_power_constraint *constraint) 577 struct iwl_power_vifs *vifs)
601{ 578{
602 lockdep_assert_held(&mvm->mutex); 579 struct iwl_mvm_vif *bss_mvmvif = NULL;
580 struct iwl_mvm_vif *p2p_mvmvif = NULL;
581 struct iwl_mvm_vif *ap_mvmvif = NULL;
582 bool client_same_channel = false;
583 bool ap_same_channel = false;
603 584
604 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) { 585 lockdep_assert_held(&mvm->mutex);
605 constraint->pm_disabled = true;
606 constraint->ps_disabled = true;
607 }
608 586
587 /* get vifs info + set pm_enable to false */
609 ieee80211_iterate_active_interfaces_atomic(mvm->hw, 588 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
610 IEEE80211_IFACE_ITER_NORMAL, 589 IEEE80211_IFACE_ITER_NORMAL,
611 iwl_mvm_power_iterator, constraint); 590 iwl_mvm_power_iterator, vifs);
612}
613
614int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
615{
616 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
617 struct iwl_power_constraint constraint = {
618 .p2p_phyctx_id = MAX_PHYS,
619 .bss_phyctx_id = MAX_PHYS,
620 .mvm = mvm,
621 };
622 bool ba_enable;
623 int ret;
624 591
625 lockdep_assert_held(&mvm->mutex); 592 if (vifs->bss_vif)
593 bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif);
626 594
627 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) 595 if (vifs->p2p_vif)
628 return 0; 596 p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif);
629 597
630 iwl_mvm_power_get_global_constraint(mvm, &constraint); 598 if (vifs->ap_vif)
631 mvm->ps_disabled = constraint.ps_disabled; 599 ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
632 mvm->pm_disabled = constraint.pm_disabled;
633 600
634 /* don't update device power state unless we add / remove monitor */ 601 /* enable PM on bss if bss stand alone */
635 if (vif->type == NL80211_IFTYPE_MONITOR) { 602 if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
636 ret = iwl_mvm_power_update_device(mvm); 603 bss_mvmvif->pm_enabled = true;
637 if (ret) 604 return;
638 return ret;
639 } 605 }
640 606
641 if (constraint.bss_vif) { 607 /* enable PM on p2p if p2p stand alone */
642 ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif); 608 if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
643 if (ret) 609 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
644 return ret; 610 p2p_mvmvif->pm_enabled = true;
611 return;
645 } 612 }
646 613
647 if (constraint.p2p_vif) { 614 if (vifs->bss_active && vifs->p2p_active)
648 ret = iwl_mvm_power_send_cmd(mvm, constraint.p2p_vif); 615 client_same_channel = (bss_mvmvif->phy_ctxt->id ==
649 if (ret) 616 p2p_mvmvif->phy_ctxt->id);
650 return ret; 617 if (vifs->bss_active && vifs->ap_active)
618 ap_same_channel = (bss_mvmvif->phy_ctxt->id ==
619 ap_mvmvif->phy_ctxt->id);
620
621 /* clients are not stand alone: enable PM if DCM */
622 if (!(client_same_channel || ap_same_channel) &&
623 (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
624 if (vifs->bss_active)
625 bss_mvmvif->pm_enabled = true;
626 if (vifs->p2p_active &&
627 (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM))
628 p2p_mvmvif->pm_enabled = true;
629 return;
651 } 630 }
652 631
653 if (!constraint.bf_vif) 632 /*
654 return 0; 633 * There is only one channel in the system and there are only
655 634 * bss and p2p clients that share it
656 vif = constraint.bf_vif; 635 */
657 mvmvif = iwl_mvm_vif_from_mac80211(vif); 636 if (client_same_channel && !vifs->ap_active &&
658 637 (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) {
659 ba_enable = !(constraint.pm_disabled || constraint.ps_disabled || 638 /* share same channel*/
660 !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif)); 639 bss_mvmvif->pm_enabled = true;
661 640 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
662 return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable); 641 p2p_mvmvif->pm_enabled = true;
642 }
663} 643}
664 644
665#ifdef CONFIG_IWLWIFI_DEBUGFS 645#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -671,19 +651,10 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
671 struct iwl_mac_power_cmd cmd = {}; 651 struct iwl_mac_power_cmd cmd = {};
672 int pos = 0; 652 int pos = 0;
673 653
674 if (WARN_ON(!(mvm->fw->ucode_capa.flags &
675 IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
676 return 0;
677
678 mutex_lock(&mvm->mutex); 654 mutex_lock(&mvm->mutex);
679 memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd)); 655 memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
680 mutex_unlock(&mvm->mutex); 656 mutex_unlock(&mvm->mutex);
681 657
682 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
683 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
684 (cmd.flags &
685 cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
686 0 : 1);
687 pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", 658 pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
688 iwlmvm_mod_params.power_scheme); 659 iwlmvm_mod_params.power_scheme);
689 pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n", 660 pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@@ -790,7 +761,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
790 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 761 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
791 int ret; 762 int ret;
792 763
793 if (mvmvif != mvm->bf_allowed_vif || 764 if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period ||
794 vif->type != NL80211_IFTYPE_STATION || vif->p2p) 765 vif->type != NL80211_IFTYPE_STATION || vif->p2p)
795 return 0; 766 return 0;
796 767
@@ -818,6 +789,26 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
818 return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false); 789 return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
819} 790}
820 791
792static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
793 struct ieee80211_vif *vif,
794 bool enable)
795{
796 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
797 struct iwl_beacon_filter_cmd cmd = {
798 IWL_BF_CMD_CONFIG_DEFAULTS,
799 .bf_enable_beacon_filter = cpu_to_le32(1),
800 };
801
802 if (!mvmvif->bf_data.bf_enabled)
803 return 0;
804
805 if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
806 cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
807
808 mvmvif->bf_data.ba_enabled = enable;
809 return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
810}
811
821int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, 812int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
822 struct ieee80211_vif *vif, 813 struct ieee80211_vif *vif,
823 u32 flags) 814 u32 flags)
@@ -826,8 +817,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
826 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 817 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
827 int ret; 818 int ret;
828 819
829 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) || 820 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
830 vif->type != NL80211_IFTYPE_STATION || vif->p2p)
831 return 0; 821 return 0;
832 822
833 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags); 823 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
@@ -838,6 +828,55 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
838 return ret; 828 return ret;
839} 829}
840 830
831int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
832{
833 struct iwl_mvm_vif *mvmvif;
834 struct iwl_power_vifs vifs = {};
835 bool ba_enable;
836 int ret;
837
838 lockdep_assert_held(&mvm->mutex);
839
840 iwl_mvm_power_set_pm(mvm, &vifs);
841
842 /* disable PS if CAM */
843 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
844 mvm->ps_disabled = true;
845 } else {
846 /* don't update device power state unless we add / remove monitor */
847 if (vifs.monitor_vif) {
848 if (vifs.monitor_active)
849 mvm->ps_disabled = true;
850 ret = iwl_mvm_power_update_device(mvm);
851 if (ret)
852 return ret;
853 }
854 }
855
856 if (vifs.bss_vif) {
857 ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
858 if (ret)
859 return ret;
860 }
861
862 if (vifs.p2p_vif) {
863 ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
864 if (ret)
865 return ret;
866 }
867
868 if (!vifs.bf_vif)
869 return 0;
870
871 mvmvif = iwl_mvm_vif_from_mac80211(vifs.bf_vif);
872
873 ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled ||
874 !vifs.bf_vif->bss_conf.ps ||
875 iwl_mvm_vif_low_latency(mvmvif));
876
877 return iwl_mvm_update_beacon_abort(mvm, vifs.bf_vif, ba_enable);
878}
879
841int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, 880int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
842 struct ieee80211_vif *vif, 881 struct ieee80211_vif *vif,
843 bool enable, u32 flags) 882 bool enable, u32 flags)
@@ -861,9 +900,10 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
861 if (WARN_ON(!dtimper_msec)) 900 if (WARN_ON(!dtimper_msec))
862 return 0; 901 return 0;
863 902
864 cmd.flags |=
865 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
866 cmd.skip_dtim_periods = 300 / dtimper_msec; 903 cmd.skip_dtim_periods = 300 / dtimper_msec;
904 if (cmd.skip_dtim_periods)
905 cmd.flags |=
906 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
867 } 907 }
868 iwl_mvm_power_log(mvm, &cmd); 908 iwl_mvm_power_log(mvm, &cmd);
869#ifdef CONFIG_IWLWIFI_DEBUGFS 909#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -894,33 +934,3 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
894 934
895 return ret; 935 return ret;
896} 936}
897
898int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
899 struct ieee80211_vif *vif,
900 bool force,
901 u32 flags)
902{
903 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
904
905 if (mvmvif != mvm->bf_allowed_vif)
906 return 0;
907
908 if (!mvmvif->bf_data.bf_enabled) {
909 /* disable beacon filtering explicitly if force is true */
910 if (force)
911 return iwl_mvm_disable_beacon_filter(mvm, vif, flags);
912 return 0;
913 }
914
915 return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
916}
917
918int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
919{
920 struct iwl_powertable_cmd cmd = {
921 .keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
922 };
923
924 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
925 sizeof(cmd), &cmd);
926}
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 35e86e06dffd..ba68d7b84505 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -285,7 +285,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
285 285
286 iwl_mvm_adjust_quota_for_noa(mvm, &cmd); 286 iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
287 287
288 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC, 288 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
289 sizeof(cmd), &cmd); 289 sizeof(cmd), &cmd);
290 if (ret) 290 if (ret)
291 IWL_ERR(mvm, "Failed to send quota: %d\n", ret); 291 IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index e1c838899363..306a6caa4868 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -211,7 +211,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
211 .next_columns = { 211 .next_columns = {
212 RS_COLUMN_LEGACY_ANT_B, 212 RS_COLUMN_LEGACY_ANT_B,
213 RS_COLUMN_SISO_ANT_A, 213 RS_COLUMN_SISO_ANT_A,
214 RS_COLUMN_SISO_ANT_B, 214 RS_COLUMN_MIMO2,
215 RS_COLUMN_INVALID, 215 RS_COLUMN_INVALID,
216 RS_COLUMN_INVALID, 216 RS_COLUMN_INVALID,
217 RS_COLUMN_INVALID, 217 RS_COLUMN_INVALID,
@@ -223,8 +223,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
223 .ant = ANT_B, 223 .ant = ANT_B,
224 .next_columns = { 224 .next_columns = {
225 RS_COLUMN_LEGACY_ANT_A, 225 RS_COLUMN_LEGACY_ANT_A,
226 RS_COLUMN_SISO_ANT_A,
227 RS_COLUMN_SISO_ANT_B, 226 RS_COLUMN_SISO_ANT_B,
227 RS_COLUMN_MIMO2,
228 RS_COLUMN_INVALID, 228 RS_COLUMN_INVALID,
229 RS_COLUMN_INVALID, 229 RS_COLUMN_INVALID,
230 RS_COLUMN_INVALID, 230 RS_COLUMN_INVALID,
@@ -238,10 +238,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
238 RS_COLUMN_SISO_ANT_B, 238 RS_COLUMN_SISO_ANT_B,
239 RS_COLUMN_MIMO2, 239 RS_COLUMN_MIMO2,
240 RS_COLUMN_SISO_ANT_A_SGI, 240 RS_COLUMN_SISO_ANT_A_SGI,
241 RS_COLUMN_SISO_ANT_B_SGI,
242 RS_COLUMN_LEGACY_ANT_A, 241 RS_COLUMN_LEGACY_ANT_A,
243 RS_COLUMN_LEGACY_ANT_B, 242 RS_COLUMN_LEGACY_ANT_B,
244 RS_COLUMN_INVALID, 243 RS_COLUMN_INVALID,
244 RS_COLUMN_INVALID,
245 }, 245 },
246 .checks = { 246 .checks = {
247 rs_siso_allow, 247 rs_siso_allow,
@@ -254,10 +254,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
254 RS_COLUMN_SISO_ANT_A, 254 RS_COLUMN_SISO_ANT_A,
255 RS_COLUMN_MIMO2, 255 RS_COLUMN_MIMO2,
256 RS_COLUMN_SISO_ANT_B_SGI, 256 RS_COLUMN_SISO_ANT_B_SGI,
257 RS_COLUMN_SISO_ANT_A_SGI,
258 RS_COLUMN_LEGACY_ANT_A, 257 RS_COLUMN_LEGACY_ANT_A,
259 RS_COLUMN_LEGACY_ANT_B, 258 RS_COLUMN_LEGACY_ANT_B,
260 RS_COLUMN_INVALID, 259 RS_COLUMN_INVALID,
260 RS_COLUMN_INVALID,
261 }, 261 },
262 .checks = { 262 .checks = {
263 rs_siso_allow, 263 rs_siso_allow,
@@ -271,10 +271,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
271 RS_COLUMN_SISO_ANT_B_SGI, 271 RS_COLUMN_SISO_ANT_B_SGI,
272 RS_COLUMN_MIMO2_SGI, 272 RS_COLUMN_MIMO2_SGI,
273 RS_COLUMN_SISO_ANT_A, 273 RS_COLUMN_SISO_ANT_A,
274 RS_COLUMN_SISO_ANT_B,
275 RS_COLUMN_MIMO2,
276 RS_COLUMN_LEGACY_ANT_A, 274 RS_COLUMN_LEGACY_ANT_A,
277 RS_COLUMN_LEGACY_ANT_B, 275 RS_COLUMN_LEGACY_ANT_B,
276 RS_COLUMN_INVALID,
277 RS_COLUMN_INVALID,
278 }, 278 },
279 .checks = { 279 .checks = {
280 rs_siso_allow, 280 rs_siso_allow,
@@ -289,10 +289,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
289 RS_COLUMN_SISO_ANT_A_SGI, 289 RS_COLUMN_SISO_ANT_A_SGI,
290 RS_COLUMN_MIMO2_SGI, 290 RS_COLUMN_MIMO2_SGI,
291 RS_COLUMN_SISO_ANT_B, 291 RS_COLUMN_SISO_ANT_B,
292 RS_COLUMN_SISO_ANT_A,
293 RS_COLUMN_MIMO2,
294 RS_COLUMN_LEGACY_ANT_A, 292 RS_COLUMN_LEGACY_ANT_A,
295 RS_COLUMN_LEGACY_ANT_B, 293 RS_COLUMN_LEGACY_ANT_B,
294 RS_COLUMN_INVALID,
295 RS_COLUMN_INVALID,
296 }, 296 },
297 .checks = { 297 .checks = {
298 rs_siso_allow, 298 rs_siso_allow,
@@ -304,12 +304,12 @@ static const struct rs_tx_column rs_tx_columns[] = {
304 .ant = ANT_AB, 304 .ant = ANT_AB,
305 .next_columns = { 305 .next_columns = {
306 RS_COLUMN_SISO_ANT_A, 306 RS_COLUMN_SISO_ANT_A,
307 RS_COLUMN_SISO_ANT_B,
308 RS_COLUMN_SISO_ANT_A_SGI,
309 RS_COLUMN_SISO_ANT_B_SGI,
310 RS_COLUMN_MIMO2_SGI, 307 RS_COLUMN_MIMO2_SGI,
311 RS_COLUMN_LEGACY_ANT_A, 308 RS_COLUMN_LEGACY_ANT_A,
312 RS_COLUMN_LEGACY_ANT_B, 309 RS_COLUMN_LEGACY_ANT_B,
310 RS_COLUMN_INVALID,
311 RS_COLUMN_INVALID,
312 RS_COLUMN_INVALID,
313 }, 313 },
314 .checks = { 314 .checks = {
315 rs_mimo_allow, 315 rs_mimo_allow,
@@ -321,12 +321,12 @@ static const struct rs_tx_column rs_tx_columns[] = {
321 .sgi = true, 321 .sgi = true,
322 .next_columns = { 322 .next_columns = {
323 RS_COLUMN_SISO_ANT_A_SGI, 323 RS_COLUMN_SISO_ANT_A_SGI,
324 RS_COLUMN_SISO_ANT_B_SGI,
325 RS_COLUMN_SISO_ANT_A,
326 RS_COLUMN_SISO_ANT_B,
327 RS_COLUMN_MIMO2, 324 RS_COLUMN_MIMO2,
328 RS_COLUMN_LEGACY_ANT_A, 325 RS_COLUMN_LEGACY_ANT_A,
329 RS_COLUMN_LEGACY_ANT_B, 326 RS_COLUMN_LEGACY_ANT_B,
327 RS_COLUMN_INVALID,
328 RS_COLUMN_INVALID,
329 RS_COLUMN_INVALID,
330 }, 330 },
331 .checks = { 331 .checks = {
332 rs_mimo_allow, 332 rs_mimo_allow,
@@ -527,6 +527,9 @@ static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
527 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n"); 527 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
528 for (i = 0; i < IWL_RATE_COUNT; i++) 528 for (i = 0; i < IWL_RATE_COUNT; i++)
529 rs_rate_scale_clear_window(&tbl->win[i]); 529 rs_rate_scale_clear_window(&tbl->win[i]);
530
531 for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
532 rs_rate_scale_clear_window(&tbl->tpc_win[i]);
530} 533}
531 534
532static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type) 535static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
@@ -656,17 +659,34 @@ static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
656 return 0; 659 return 0;
657} 660}
658 661
659static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, 662static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
660 int scale_index, int attempts, int successes) 663 struct iwl_scale_tbl_info *tbl,
664 int scale_index, int attempts, int successes,
665 u8 reduced_txp)
661{ 666{
662 struct iwl_rate_scale_data *window = NULL; 667 struct iwl_rate_scale_data *window = NULL;
668 int ret;
663 669
664 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) 670 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
665 return -EINVAL; 671 return -EINVAL;
666 672
673 if (tbl->column != RS_COLUMN_INVALID) {
674 lq_sta->tx_stats[tbl->column][scale_index].total += attempts;
675 lq_sta->tx_stats[tbl->column][scale_index].success += successes;
676 }
677
667 /* Select window for current tx bit rate */ 678 /* Select window for current tx bit rate */
668 window = &(tbl->win[scale_index]); 679 window = &(tbl->win[scale_index]);
669 680
681 ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes,
682 window);
683 if (ret)
684 return ret;
685
686 if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
687 return -EINVAL;
688
689 window = &tbl->tpc_win[reduced_txp];
670 return _rs_collect_tx_data(tbl, scale_index, attempts, successes, 690 return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
671 window); 691 window);
672} 692}
@@ -1000,6 +1020,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
1000 u32 ucode_rate; 1020 u32 ucode_rate;
1001 struct rs_rate rate; 1021 struct rs_rate rate;
1002 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; 1022 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
1023 u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
1003 1024
1004 /* Treat uninitialized rate scaling data same as non-existing. */ 1025 /* Treat uninitialized rate scaling data same as non-existing. */
1005 if (!lq_sta) { 1026 if (!lq_sta) {
@@ -1141,9 +1162,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
1141 if (info->flags & IEEE80211_TX_STAT_AMPDU) { 1162 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1142 ucode_rate = le32_to_cpu(table->rs_table[0]); 1163 ucode_rate = le32_to_cpu(table->rs_table[0]);
1143 rs_rate_from_ucode_rate(ucode_rate, info->band, &rate); 1164 rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
1144 rs_collect_tx_data(curr_tbl, rate.index, 1165 rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
1145 info->status.ampdu_len, 1166 info->status.ampdu_len,
1146 info->status.ampdu_ack_len); 1167 info->status.ampdu_ack_len,
1168 reduced_txp);
1147 1169
1148 /* Update success/fail counts if not searching for new mode */ 1170 /* Update success/fail counts if not searching for new mode */
1149 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { 1171 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
@@ -1176,8 +1198,9 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
1176 else 1198 else
1177 continue; 1199 continue;
1178 1200
1179 rs_collect_tx_data(tmp_tbl, rate.index, 1, 1201 rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1,
1180 i < retries ? 0 : legacy_success); 1202 i < retries ? 0 : legacy_success,
1203 reduced_txp);
1181 } 1204 }
1182 1205
1183 /* Update success/fail counts if not searching for new mode */ 1206 /* Update success/fail counts if not searching for new mode */
@@ -1188,6 +1211,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
1188 } 1211 }
1189 /* The last TX rate is cached in lq_sta; it's set in if/else above */ 1212 /* The last TX rate is cached in lq_sta; it's set in if/else above */
1190 lq_sta->last_rate_n_flags = ucode_rate; 1213 lq_sta->last_rate_n_flags = ucode_rate;
1214 IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
1191done: 1215done:
1192 /* See if there's a better rate or modulation mode to try. */ 1216 /* See if there's a better rate or modulation mode to try. */
1193 if (sta && sta->supp_rates[sband->band]) 1217 if (sta && sta->supp_rates[sband->band])
@@ -1311,105 +1335,50 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1311 tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw); 1335 tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
1312} 1336}
1313 1337
1314/*
1315 * Find starting rate for new "search" high-throughput mode of modulation.
1316 * Goal is to find lowest expected rate (under perfect conditions) that is
1317 * above the current measured throughput of "active" mode, to give new mode
1318 * a fair chance to prove itself without too many challenges.
1319 *
1320 * This gets called when transitioning to more aggressive modulation
1321 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1322 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1323 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1324 * bit rate will typically need to increase, but not if performance was bad.
1325 */
1326static s32 rs_get_best_rate(struct iwl_mvm *mvm, 1338static s32 rs_get_best_rate(struct iwl_mvm *mvm,
1327 struct iwl_lq_sta *lq_sta, 1339 struct iwl_lq_sta *lq_sta,
1328 struct iwl_scale_tbl_info *tbl, /* "search" */ 1340 struct iwl_scale_tbl_info *tbl, /* "search" */
1329 u16 rate_mask, s8 index) 1341 unsigned long rate_mask, s8 index)
1330{ 1342{
1331 /* "active" values */
1332 struct iwl_scale_tbl_info *active_tbl = 1343 struct iwl_scale_tbl_info *active_tbl =
1333 &(lq_sta->lq_info[lq_sta->active_tbl]); 1344 &(lq_sta->lq_info[lq_sta->active_tbl]);
1334 s32 active_sr = active_tbl->win[index].success_ratio; 1345 s32 success_ratio = active_tbl->win[index].success_ratio;
1335 s32 active_tpt = active_tbl->expected_tpt[index]; 1346 u16 expected_current_tpt = active_tbl->expected_tpt[index];
1336 /* expected "search" throughput */
1337 const u16 *tpt_tbl = tbl->expected_tpt; 1347 const u16 *tpt_tbl = tbl->expected_tpt;
1338
1339 s32 new_rate, high, low, start_hi;
1340 u16 high_low; 1348 u16 high_low;
1341 s8 rate = index; 1349 u32 target_tpt;
1342 1350 int rate_idx;
1343 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1344
1345 while (1) {
1346 high_low = rs_get_adjacent_rate(mvm, rate, rate_mask,
1347 tbl->rate.type);
1348
1349 low = high_low & 0xff;
1350 high = (high_low >> 8) & 0xff;
1351 1351
1352 /* 1352 if (success_ratio > RS_SR_NO_DECREASE) {
1353 * Lower the "search" bit rate, to give new "search" mode 1353 target_tpt = 100 * expected_current_tpt;
1354 * approximately the same throughput as "active" if: 1354 IWL_DEBUG_RATE(mvm,
1355 * 1355 "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
1356 * 1) "Active" mode has been working modestly well (but not 1356 success_ratio, target_tpt);
1357 * great), and expected "search" throughput (under perfect 1357 } else {
1358 * conditions) at candidate rate is above the actual 1358 target_tpt = lq_sta->last_tpt;
1359 * measured "active" throughput (but less than expected 1359 IWL_DEBUG_RATE(mvm,
1360 * "active" throughput under perfect conditions). 1360 "SR %d not thag good. Find rate exceeding ACTUAL_TPT %d\n",
1361 * OR 1361 success_ratio, target_tpt);
1362 * 2) "Active" mode has been working perfectly or very well 1362 }
1363 * and expected "search" throughput (under perfect
1364 * conditions) at candidate rate is above expected
1365 * "active" throughput (under perfect conditions).
1366 */
1367 if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
1368 ((active_sr > RS_SR_FORCE_DECREASE) &&
1369 (active_sr <= IWL_RATE_HIGH_TH) &&
1370 (tpt_tbl[rate] <= active_tpt))) ||
1371 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
1372 (tpt_tbl[rate] > active_tpt))) {
1373 /* (2nd or later pass)
1374 * If we've already tried to raise the rate, and are
1375 * now trying to lower it, use the higher rate. */
1376 if (start_hi != IWL_RATE_INVALID) {
1377 new_rate = start_hi;
1378 break;
1379 }
1380
1381 new_rate = rate;
1382 1363
1383 /* Loop again with lower rate */ 1364 rate_idx = find_first_bit(&rate_mask, BITS_PER_LONG);
1384 if (low != IWL_RATE_INVALID)
1385 rate = low;
1386 1365
1387 /* Lower rate not available, use the original */ 1366 while (rate_idx != IWL_RATE_INVALID) {
1388 else 1367 if (target_tpt < (100 * tpt_tbl[rate_idx]))
1389 break; 1368 break;
1390
1391 /* Else try to raise the "search" rate to match "active" */
1392 } else {
1393 /* (2nd or later pass)
1394 * If we've already tried to lower the rate, and are
1395 * now trying to raise it, use the lower rate. */
1396 if (new_rate != IWL_RATE_INVALID)
1397 break;
1398 1369
1399 /* Loop again with higher rate */ 1370 high_low = rs_get_adjacent_rate(mvm, rate_idx, rate_mask,
1400 else if (high != IWL_RATE_INVALID) { 1371 tbl->rate.type);
1401 start_hi = high;
1402 rate = high;
1403 1372
1404 /* Higher rate not available, use the original */ 1373 rate_idx = (high_low >> 8) & 0xff;
1405 } else {
1406 new_rate = rate;
1407 break;
1408 }
1409 }
1410 } 1374 }
1411 1375
1412 return new_rate; 1376 IWL_DEBUG_RATE(mvm, "Best rate found %d target_tp %d expected_new %d\n",
1377 rate_idx, target_tpt,
1378 rate_idx != IWL_RATE_INVALID ?
1379 100 * tpt_tbl[rate_idx] : IWL_INVALID_VALUE);
1380
1381 return rate_idx;
1413} 1382}
1414 1383
1415static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta) 1384static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
@@ -1584,7 +1553,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1584 1553
1585 tpt = lq_sta->last_tpt / 100; 1554 tpt = lq_sta->last_tpt / 100;
1586 expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col, 1555 expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
1587 tbl->rate.bw); 1556 rs_bw_from_sta_bw(sta));
1588 if (WARN_ON_ONCE(!expected_tpt_tbl)) 1557 if (WARN_ON_ONCE(!expected_tpt_tbl))
1589 continue; 1558 continue;
1590 1559
@@ -1625,7 +1594,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
1625 const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column]; 1594 const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
1626 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1595 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1627 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1596 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1628 u16 rate_mask = 0; 1597 unsigned long rate_mask = 0;
1629 u32 rate_idx = 0; 1598 u32 rate_idx = 0;
1630 1599
1631 memcpy(search_tbl, tbl, sz); 1600 memcpy(search_tbl, tbl, sz);
@@ -1667,7 +1636,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
1667 !(BIT(rate_idx) & rate_mask)) { 1636 !(BIT(rate_idx) & rate_mask)) {
1668 IWL_DEBUG_RATE(mvm, 1637 IWL_DEBUG_RATE(mvm,
1669 "can not switch with index %d" 1638 "can not switch with index %d"
1670 " rate mask %x\n", 1639 " rate mask %lx\n",
1671 rate_idx, rate_mask); 1640 rate_idx, rate_mask);
1672 1641
1673 goto err; 1642 goto err;
@@ -1769,6 +1738,203 @@ out:
1769 return action; 1738 return action;
1770} 1739}
1771 1740
1741static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
1742 int *weaker, int *stronger)
1743{
1744 *weaker = index + TPC_TX_POWER_STEP;
1745 if (*weaker > TPC_MAX_REDUCTION)
1746 *weaker = TPC_INVALID;
1747
1748 *stronger = index - TPC_TX_POWER_STEP;
1749 if (*stronger < 0)
1750 *stronger = TPC_INVALID;
1751}
1752
1753static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1754 struct rs_rate *rate, enum ieee80211_band band)
1755{
1756 int index = rate->index;
1757 bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
1758 bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
1759 !vif->bss_conf.ps);
1760
1761 IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n",
1762 cam, sta_ps_disabled);
1763 /*
1764 * allow tpc only if power management is enabled, or bt coex
1765 * activity grade allows it and we are on 2.4Ghz.
1766 */
1767 if ((cam || sta_ps_disabled) &&
1768 !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
1769 return false;
1770
1771 IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
1772 if (is_legacy(rate))
1773 return index == IWL_RATE_54M_INDEX;
1774 if (is_ht(rate))
1775 return index == IWL_RATE_MCS_7_INDEX;
1776 if (is_vht(rate))
1777 return index == IWL_RATE_MCS_7_INDEX ||
1778 index == IWL_RATE_MCS_8_INDEX ||
1779 index == IWL_RATE_MCS_9_INDEX;
1780
1781 WARN_ON_ONCE(1);
1782 return false;
1783}
1784
1785enum tpc_action {
1786 TPC_ACTION_STAY,
1787 TPC_ACTION_DECREASE,
1788 TPC_ACTION_INCREASE,
1789 TPC_ACTION_NO_RESTIRCTION,
1790};
1791
1792static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
1793 s32 sr, int weak, int strong,
1794 int current_tpt,
1795 int weak_tpt, int strong_tpt)
1796{
1797 /* stay until we have valid tpt */
1798 if (current_tpt == IWL_INVALID_VALUE) {
1799 IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
1800 return TPC_ACTION_STAY;
1801 }
1802
1803 /* Too many failures, increase txp */
1804 if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) {
1805 IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
1806 return TPC_ACTION_NO_RESTIRCTION;
1807 }
1808
1809 /* try decreasing first if applicable */
1810 if (weak != TPC_INVALID) {
1811 if (weak_tpt == IWL_INVALID_VALUE &&
1812 (strong_tpt == IWL_INVALID_VALUE ||
1813 current_tpt >= strong_tpt)) {
1814 IWL_DEBUG_RATE(mvm,
1815 "no weak txp measurement. decrease txp\n");
1816 return TPC_ACTION_DECREASE;
1817 }
1818
1819 if (weak_tpt > current_tpt) {
1820 IWL_DEBUG_RATE(mvm,
1821 "lower txp has better tpt. decrease txp\n");
1822 return TPC_ACTION_DECREASE;
1823 }
1824 }
1825
1826 /* next, increase if needed */
1827 if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) {
1828 if (weak_tpt == IWL_INVALID_VALUE &&
1829 strong_tpt != IWL_INVALID_VALUE &&
1830 current_tpt < strong_tpt) {
1831 IWL_DEBUG_RATE(mvm,
1832 "higher txp has better tpt. increase txp\n");
1833 return TPC_ACTION_INCREASE;
1834 }
1835
1836 if (weak_tpt < current_tpt &&
1837 (strong_tpt == IWL_INVALID_VALUE ||
1838 strong_tpt > current_tpt)) {
1839 IWL_DEBUG_RATE(mvm,
1840 "lower txp has worse tpt. increase txp\n");
1841 return TPC_ACTION_INCREASE;
1842 }
1843 }
1844
1845 IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
1846 return TPC_ACTION_STAY;
1847}
1848
1849static bool rs_tpc_perform(struct iwl_mvm *mvm,
1850 struct ieee80211_sta *sta,
1851 struct iwl_lq_sta *lq_sta,
1852 struct iwl_scale_tbl_info *tbl)
1853{
1854 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
1855 struct ieee80211_vif *vif = mvm_sta->vif;
1856 struct ieee80211_chanctx_conf *chanctx_conf;
1857 enum ieee80211_band band;
1858 struct iwl_rate_scale_data *window;
1859 struct rs_rate *rate = &tbl->rate;
1860 enum tpc_action action;
1861 s32 sr;
1862 u8 cur = lq_sta->lq.reduced_tpc;
1863 int current_tpt;
1864 int weak, strong;
1865 int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
1866
1867#ifdef CONFIG_MAC80211_DEBUGFS
1868 if (lq_sta->dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
1869 IWL_DEBUG_RATE(mvm, "fixed tpc: %d\n",
1870 lq_sta->dbg_fixed_txp_reduction);
1871 lq_sta->lq.reduced_tpc = lq_sta->dbg_fixed_txp_reduction;
1872 return cur != lq_sta->dbg_fixed_txp_reduction;
1873 }
1874#endif
1875
1876 rcu_read_lock();
1877 chanctx_conf = rcu_dereference(vif->chanctx_conf);
1878 if (WARN_ON(!chanctx_conf))
1879 band = IEEE80211_NUM_BANDS;
1880 else
1881 band = chanctx_conf->def.chan->band;
1882 rcu_read_unlock();
1883
1884 if (!rs_tpc_allowed(mvm, vif, rate, band)) {
1885 IWL_DEBUG_RATE(mvm,
1886 "tpc is not allowed. remove txp restrictions\n");
1887 lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
1888 return cur != TPC_NO_REDUCTION;
1889 }
1890
1891 rs_get_adjacent_txp(mvm, cur, &weak, &strong);
1892
1893 /* Collect measured throughputs for current and adjacent rates */
1894 window = tbl->tpc_win;
1895 sr = window[cur].success_ratio;
1896 current_tpt = window[cur].average_tpt;
1897 if (weak != TPC_INVALID)
1898 weak_tpt = window[weak].average_tpt;
1899 if (strong != TPC_INVALID)
1900 strong_tpt = window[strong].average_tpt;
1901
1902 IWL_DEBUG_RATE(mvm,
1903 "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
1904 cur, current_tpt, sr, weak, strong,
1905 weak_tpt, strong_tpt);
1906
1907 action = rs_get_tpc_action(mvm, sr, weak, strong,
1908 current_tpt, weak_tpt, strong_tpt);
1909
1910 /* override actions if we are on the edge */
1911 if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
1912 IWL_DEBUG_RATE(mvm, "already in lowest txp, stay\n");
1913 action = TPC_ACTION_STAY;
1914 } else if (strong == TPC_INVALID &&
1915 (action == TPC_ACTION_INCREASE ||
1916 action == TPC_ACTION_NO_RESTIRCTION)) {
1917 IWL_DEBUG_RATE(mvm, "already in highest txp, stay\n");
1918 action = TPC_ACTION_STAY;
1919 }
1920
1921 switch (action) {
1922 case TPC_ACTION_DECREASE:
1923 lq_sta->lq.reduced_tpc = weak;
1924 return true;
1925 case TPC_ACTION_INCREASE:
1926 lq_sta->lq.reduced_tpc = strong;
1927 return true;
1928 case TPC_ACTION_NO_RESTIRCTION:
1929 lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
1930 return true;
1931 case TPC_ACTION_STAY:
1932 /* do nothing */
1933 break;
1934 }
1935 return false;
1936}
1937
1772/* 1938/*
1773 * Do rate scaling and search for new modulation mode. 1939 * Do rate scaling and search for new modulation mode.
1774 */ 1940 */
@@ -2019,6 +2185,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
2019 break; 2185 break;
2020 case RS_ACTION_STAY: 2186 case RS_ACTION_STAY:
2021 /* No change */ 2187 /* No change */
2188 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN)
2189 update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
2190 break;
2022 default: 2191 default:
2023 break; 2192 break;
2024 } 2193 }
@@ -2271,10 +2440,6 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
2271 if (i == IWL_RATE_9M_INDEX) 2440 if (i == IWL_RATE_9M_INDEX)
2272 continue; 2441 continue;
2273 2442
2274 /* Disable MCS9 as a workaround */
2275 if (i == IWL_RATE_MCS_9_INDEX)
2276 continue;
2277
2278 /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */ 2443 /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
2279 if (i == IWL_RATE_MCS_9_INDEX && 2444 if (i == IWL_RATE_MCS_9_INDEX &&
2280 sta->bandwidth == IEEE80211_STA_RX_BW_20) 2445 sta->bandwidth == IEEE80211_STA_RX_BW_20)
@@ -2293,10 +2458,6 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
2293 if (i == IWL_RATE_9M_INDEX) 2458 if (i == IWL_RATE_9M_INDEX)
2294 continue; 2459 continue;
2295 2460
2296 /* Disable MCS9 as a workaround */
2297 if (i == IWL_RATE_MCS_9_INDEX)
2298 continue;
2299
2300 /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */ 2461 /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
2301 if (i == IWL_RATE_MCS_9_INDEX && 2462 if (i == IWL_RATE_MCS_9_INDEX &&
2302 sta->bandwidth == IEEE80211_STA_RX_BW_20) 2463 sta->bandwidth == IEEE80211_STA_RX_BW_20)
@@ -2478,6 +2639,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2478 lq_sta->is_agg = 0; 2639 lq_sta->is_agg = 0;
2479#ifdef CONFIG_MAC80211_DEBUGFS 2640#ifdef CONFIG_MAC80211_DEBUGFS
2480 lq_sta->dbg_fixed_rate = 0; 2641 lq_sta->dbg_fixed_rate = 0;
2642 lq_sta->dbg_fixed_txp_reduction = TPC_INVALID;
2481#endif 2643#endif
2482#ifdef CONFIG_IWLWIFI_DEBUGFS 2644#ifdef CONFIG_IWLWIFI_DEBUGFS
2483 iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats); 2645 iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
@@ -2653,6 +2815,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
2653 rs_build_rates_table_from_fixed(mvm, lq_cmd, 2815 rs_build_rates_table_from_fixed(mvm, lq_cmd,
2654 lq_sta->band, 2816 lq_sta->band,
2655 lq_sta->dbg_fixed_rate); 2817 lq_sta->dbg_fixed_rate);
2818 lq_cmd->reduced_tpc = 0;
2656 ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >> 2819 ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
2657 RATE_MCS_ANT_POS; 2820 RATE_MCS_ANT_POS;
2658 } else 2821 } else
@@ -2783,7 +2946,6 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2783 size_t buf_size; 2946 size_t buf_size;
2784 u32 parsed_rate; 2947 u32 parsed_rate;
2785 2948
2786
2787 mvm = lq_sta->drv; 2949 mvm = lq_sta->drv;
2788 memset(buf, 0, sizeof(buf)); 2950 memset(buf, 0, sizeof(buf));
2789 buf_size = min(count, sizeof(buf) - 1); 2951 buf_size = min(count, sizeof(buf) - 1);
@@ -2856,6 +3018,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2856 lq_sta->lq.agg_disable_start_th, 3018 lq_sta->lq.agg_disable_start_th,
2857 lq_sta->lq.agg_frame_cnt_limit); 3019 lq_sta->lq.agg_frame_cnt_limit);
2858 3020
3021 desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
2859 desc += sprintf(buff+desc, 3022 desc += sprintf(buff+desc,
2860 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", 3023 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
2861 lq_sta->lq.initial_rate_index[0], 3024 lq_sta->lq.initial_rate_index[0],
@@ -2928,6 +3091,94 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2928 .llseek = default_llseek, 3091 .llseek = default_llseek,
2929}; 3092};
2930 3093
3094static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
3095 char __user *user_buf,
3096 size_t count, loff_t *ppos)
3097{
3098 static const char * const column_name[] = {
3099 [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
3100 [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
3101 [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
3102 [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
3103 [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
3104 [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
3105 [RS_COLUMN_MIMO2] = "MIMO2",
3106 [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
3107 };
3108
3109 static const char * const rate_name[] = {
3110 [IWL_RATE_1M_INDEX] = "1M",
3111 [IWL_RATE_2M_INDEX] = "2M",
3112 [IWL_RATE_5M_INDEX] = "5.5M",
3113 [IWL_RATE_11M_INDEX] = "11M",
3114 [IWL_RATE_6M_INDEX] = "6M|MCS0",
3115 [IWL_RATE_9M_INDEX] = "9M",
3116 [IWL_RATE_12M_INDEX] = "12M|MCS1",
3117 [IWL_RATE_18M_INDEX] = "18M|MCS2",
3118 [IWL_RATE_24M_INDEX] = "24M|MCS3",
3119 [IWL_RATE_36M_INDEX] = "36M|MCS4",
3120 [IWL_RATE_48M_INDEX] = "48M|MCS5",
3121 [IWL_RATE_54M_INDEX] = "54M|MCS6",
3122 [IWL_RATE_MCS_7_INDEX] = "MCS7",
3123 [IWL_RATE_MCS_8_INDEX] = "MCS8",
3124 [IWL_RATE_MCS_9_INDEX] = "MCS9",
3125 };
3126
3127 char *buff, *pos, *endpos;
3128 int col, rate;
3129 ssize_t ret;
3130 struct iwl_lq_sta *lq_sta = file->private_data;
3131 struct rs_rate_stats *stats;
3132 static const size_t bufsz = 1024;
3133
3134 buff = kmalloc(bufsz, GFP_KERNEL);
3135 if (!buff)
3136 return -ENOMEM;
3137
3138 pos = buff;
3139 endpos = pos + bufsz;
3140
3141 pos += scnprintf(pos, endpos - pos, "COLUMN,");
3142 for (rate = 0; rate < IWL_RATE_COUNT; rate++)
3143 pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
3144 pos += scnprintf(pos, endpos - pos, "\n");
3145
3146 for (col = 0; col < RS_COLUMN_COUNT; col++) {
3147 pos += scnprintf(pos, endpos - pos,
3148 "%s,", column_name[col]);
3149
3150 for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
3151 stats = &(lq_sta->tx_stats[col][rate]);
3152 pos += scnprintf(pos, endpos - pos,
3153 "%llu/%llu,",
3154 stats->success,
3155 stats->total);
3156 }
3157 pos += scnprintf(pos, endpos - pos, "\n");
3158 }
3159
3160 ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
3161 kfree(buff);
3162 return ret;
3163}
3164
3165static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
3166 const char __user *user_buf,
3167 size_t count, loff_t *ppos)
3168{
3169 struct iwl_lq_sta *lq_sta = file->private_data;
3170 memset(lq_sta->tx_stats, 0, sizeof(lq_sta->tx_stats));
3171
3172 return count;
3173}
3174
3175static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
3176 .read = rs_sta_dbgfs_drv_tx_stats_read,
3177 .write = rs_sta_dbgfs_drv_tx_stats_write,
3178 .open = simple_open,
3179 .llseek = default_llseek,
3180};
3181
2931static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir) 3182static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
2932{ 3183{
2933 struct iwl_lq_sta *lq_sta = mvm_sta; 3184 struct iwl_lq_sta *lq_sta = mvm_sta;
@@ -2937,9 +3188,15 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
2937 lq_sta->rs_sta_dbgfs_stats_table_file = 3188 lq_sta->rs_sta_dbgfs_stats_table_file =
2938 debugfs_create_file("rate_stats_table", S_IRUSR, dir, 3189 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
2939 lq_sta, &rs_sta_dbgfs_stats_table_ops); 3190 lq_sta, &rs_sta_dbgfs_stats_table_ops);
3191 lq_sta->rs_sta_dbgfs_drv_tx_stats_file =
3192 debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
3193 lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
2940 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = 3194 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2941 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, 3195 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
2942 &lq_sta->tx_agg_tid_en); 3196 &lq_sta->tx_agg_tid_en);
3197 lq_sta->rs_sta_dbgfs_reduced_txp_file =
3198 debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
3199 &lq_sta->dbg_fixed_txp_reduction);
2943} 3200}
2944 3201
2945static void rs_remove_debugfs(void *mvm, void *mvm_sta) 3202static void rs_remove_debugfs(void *mvm, void *mvm_sta)
@@ -2947,7 +3204,9 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
2947 struct iwl_lq_sta *lq_sta = mvm_sta; 3204 struct iwl_lq_sta *lq_sta = mvm_sta;
2948 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); 3205 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2949 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); 3206 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
3207 debugfs_remove(lq_sta->rs_sta_dbgfs_drv_tx_stats_file);
2950 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); 3208 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
3209 debugfs_remove(lq_sta->rs_sta_dbgfs_reduced_txp_file);
2951} 3210}
2952#endif 3211#endif
2953 3212
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 0acfac96a56c..374a83d7db25 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -158,6 +158,13 @@ enum {
158#define RS_SR_FORCE_DECREASE 1920 /* 15% */ 158#define RS_SR_FORCE_DECREASE 1920 /* 15% */
159#define RS_SR_NO_DECREASE 10880 /* 85% */ 159#define RS_SR_NO_DECREASE 10880 /* 85% */
160 160
161#define TPC_SR_FORCE_INCREASE 9600 /* 75% */
162#define TPC_SR_NO_INCREASE 10880 /* 85% */
163#define TPC_TX_POWER_STEP 3
164#define TPC_MAX_REDUCTION 15
165#define TPC_NO_REDUCTION 0
166#define TPC_INVALID 0xff
167
161#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ 168#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
162#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) 169#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
163#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100) 170#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
@@ -266,9 +273,16 @@ enum rs_column {
266 RS_COLUMN_MIMO2_SGI, 273 RS_COLUMN_MIMO2_SGI,
267 274
268 RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI, 275 RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
276 RS_COLUMN_COUNT = RS_COLUMN_LAST + 1,
269 RS_COLUMN_INVALID, 277 RS_COLUMN_INVALID,
270}; 278};
271 279
280/* Packet stats per rate */
281struct rs_rate_stats {
282 u64 success;
283 u64 total;
284};
285
272/** 286/**
273 * struct iwl_scale_tbl_info -- tx params and success history for all rates 287 * struct iwl_scale_tbl_info -- tx params and success history for all rates
274 * 288 *
@@ -280,6 +294,8 @@ struct iwl_scale_tbl_info {
280 enum rs_column column; 294 enum rs_column column;
281 const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ 295 const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
282 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ 296 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
297 /* per txpower-reduction history */
298 struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1];
283}; 299};
284 300
285enum { 301enum {
@@ -315,6 +331,8 @@ struct iwl_lq_sta {
315 bool is_vht; 331 bool is_vht;
316 enum ieee80211_band band; 332 enum ieee80211_band band;
317 333
334 struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
335
318 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 336 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
319 unsigned long active_legacy_rate; 337 unsigned long active_legacy_rate;
320 unsigned long active_siso_rate; 338 unsigned long active_siso_rate;
@@ -334,8 +352,11 @@ struct iwl_lq_sta {
334#ifdef CONFIG_MAC80211_DEBUGFS 352#ifdef CONFIG_MAC80211_DEBUGFS
335 struct dentry *rs_sta_dbgfs_scale_table_file; 353 struct dentry *rs_sta_dbgfs_scale_table_file;
336 struct dentry *rs_sta_dbgfs_stats_table_file; 354 struct dentry *rs_sta_dbgfs_stats_table_file;
355 struct dentry *rs_sta_dbgfs_drv_tx_stats_file;
337 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; 356 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
357 struct dentry *rs_sta_dbgfs_reduced_txp_file;
338 u32 dbg_fixed_rate; 358 u32 dbg_fixed_rate;
359 u8 dbg_fixed_txp_reduction;
339#endif 360#endif
340 struct iwl_mvm *drv; 361 struct iwl_mvm *drv;
341 362
@@ -345,6 +366,9 @@ struct iwl_lq_sta {
345 u32 last_rate_n_flags; 366 u32 last_rate_n_flags;
346 /* packets destined for this STA are aggregated */ 367 /* packets destined for this STA are aggregated */
347 u8 is_agg; 368 u8 is_agg;
369
370 /* tx power reduce for this sta */
371 int tpc_reduce;
348}; 372};
349 373
350/* Initialize station's rate scaling information after adding station */ 374/* Initialize station's rate scaling information after adding station */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 6061553a5e44..cf7276967acd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -60,7 +60,6 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/ 61 *****************************************************************************/
62#include "iwl-trans.h" 62#include "iwl-trans.h"
63
64#include "mvm.h" 63#include "mvm.h"
65#include "fw-api.h" 64#include "fw-api.h"
66 65
@@ -130,42 +129,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
130 129
131 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 130 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
132 131
133 ieee80211_rx_ni(mvm->hw, skb); 132 ieee80211_rx(mvm->hw, skb);
134}
135
136static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
137 struct iwl_rx_phy_info *phy_info,
138 struct ieee80211_rx_status *rx_status)
139{
140 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
141 u32 agc_a, agc_b;
142 u32 val;
143
144 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
145 agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
146 agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
147
148 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
149 rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
150 rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
151
152 /*
153 * dBm = rssi dB - agc dB - constant.
154 * Higher AGC (higher radio gain) means lower signal.
155 */
156 rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
157 rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
158 max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
159
160 IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
161 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
162
163 rx_status->signal = max_rssi_dbm;
164 rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
165 RX_RES_PHY_FLAGS_ANTENNA)
166 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
167 rx_status->chain_signal[0] = rssi_a_dbm;
168 rx_status->chain_signal[1] = rssi_b_dbm;
169} 133}
170 134
171/* 135/*
@@ -337,10 +301,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
337 */ 301 */
338 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ 302 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
339 303
340 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_RX_ENERGY_API) 304 iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
341 iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
342 else
343 iwl_mvm_calc_rssi(mvm, phy_info, &rx_status);
344 305
345 IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal, 306 IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
346 (unsigned long long)rx_status.mactime); 307 (unsigned long long)rx_status.mactime);
@@ -394,6 +355,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
394 rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; 355 rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
395 rx_status.flag |= RX_FLAG_VHT; 356 rx_status.flag |= RX_FLAG_VHT;
396 rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT; 357 rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
358 if (rate_n_flags & RATE_MCS_BF_MSK)
359 rx_status.vht_flag |= RX_VHT_FLAG_BF;
397 } else { 360 } else {
398 rx_status.rate_idx = 361 rx_status.rate_idx =
399 iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, 362 iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index c28de54c75d4..4b6c7d4bd199 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -306,7 +306,6 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
306 .id = SCAN_REQUEST_CMD, 306 .id = SCAN_REQUEST_CMD,
307 .len = { 0, }, 307 .len = { 0, },
308 .data = { mvm->scan_cmd, }, 308 .data = { mvm->scan_cmd, },
309 .flags = CMD_SYNC,
310 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 309 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
311 }; 310 };
312 struct iwl_scan_cmd *cmd = mvm->scan_cmd; 311 struct iwl_scan_cmd *cmd = mvm->scan_cmd;
@@ -319,7 +318,10 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
319 struct iwl_mvm_scan_params params = {}; 318 struct iwl_mvm_scan_params params = {};
320 319
321 lockdep_assert_held(&mvm->mutex); 320 lockdep_assert_held(&mvm->mutex);
322 BUG_ON(mvm->scan_cmd == NULL); 321
322 /* we should have failed registration if scan_cmd was NULL */
323 if (WARN_ON(mvm->scan_cmd == NULL))
324 return -ENOMEM;
323 325
324 IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n"); 326 IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
325 mvm->scan_status = IWL_MVM_SCAN_OS; 327 mvm->scan_status = IWL_MVM_SCAN_OS;
@@ -514,7 +516,7 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
514 ARRAY_SIZE(scan_abort_notif), 516 ARRAY_SIZE(scan_abort_notif),
515 iwl_mvm_scan_abort_notif, NULL); 517 iwl_mvm_scan_abort_notif, NULL);
516 518
517 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL); 519 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, 0, 0, NULL);
518 if (ret) { 520 if (ret) {
519 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret); 521 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
520 /* mac80211's state will be cleaned in the nic_restart flow */ 522 /* mac80211's state will be cleaned in the nic_restart flow */
@@ -538,9 +540,13 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
538 /* scan status must be locked for proper checking */ 540 /* scan status must be locked for proper checking */
539 lockdep_assert_held(&mvm->mutex); 541 lockdep_assert_held(&mvm->mutex);
540 542
541 IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n", 543 IWL_DEBUG_SCAN(mvm,
544 "Scheduled scan completed, status %s EBS status %s:%d\n",
542 scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? 545 scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
543 "completed" : "aborted"); 546 "completed" : "aborted", scan_notif->ebs_status ==
547 IWL_SCAN_EBS_SUCCESS ? "success" : "failed",
548 scan_notif->ebs_status);
549
544 550
545 /* only call mac80211 completion if the stop was initiated by FW */ 551 /* only call mac80211 completion if the stop was initiated by FW */
546 if (mvm->scan_status == IWL_MVM_SCAN_SCHED) { 552 if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
@@ -548,6 +554,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
548 ieee80211_sched_scan_stopped(mvm->hw); 554 ieee80211_sched_scan_stopped(mvm->hw);
549 } 555 }
550 556
557 mvm->last_ebs_successful = !scan_notif->ebs_status;
558
551 return 0; 559 return 0;
552} 560}
553 561
@@ -740,7 +748,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
740 struct iwl_scan_offload_cfg *scan_cfg; 748 struct iwl_scan_offload_cfg *scan_cfg;
741 struct iwl_host_cmd cmd = { 749 struct iwl_host_cmd cmd = {
742 .id = SCAN_OFFLOAD_CONFIG_CMD, 750 .id = SCAN_OFFLOAD_CONFIG_CMD,
743 .flags = CMD_SYNC,
744 }; 751 };
745 struct iwl_mvm_scan_params params = {}; 752 struct iwl_mvm_scan_params params = {};
746 753
@@ -798,7 +805,6 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
798 struct iwl_scan_offload_blacklist *blacklist; 805 struct iwl_scan_offload_blacklist *blacklist;
799 struct iwl_host_cmd cmd = { 806 struct iwl_host_cmd cmd = {
800 .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD, 807 .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
801 .flags = CMD_SYNC,
802 .len[1] = sizeof(*profile_cfg), 808 .len[1] = sizeof(*profile_cfg),
803 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 809 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
804 .dataflags[1] = IWL_HCMD_DFL_NOCOPY, 810 .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
@@ -884,7 +890,12 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
884 scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL); 890 scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
885 } 891 }
886 892
887 return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC, 893 if (mvm->last_ebs_successful &&
894 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
895 scan_req.flags |=
896 cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
897
898 return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, 0,
888 sizeof(scan_req), &scan_req); 899 sizeof(scan_req), &scan_req);
889} 900}
890 901
@@ -893,7 +904,6 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
893 int ret; 904 int ret;
894 struct iwl_host_cmd cmd = { 905 struct iwl_host_cmd cmd = {
895 .id = SCAN_OFFLOAD_ABORT_CMD, 906 .id = SCAN_OFFLOAD_ABORT_CMD,
896 .flags = CMD_SYNC,
897 }; 907 };
898 u32 status; 908 u32 status;
899 909
@@ -922,7 +932,7 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
922 return ret; 932 return ret;
923} 933}
924 934
925int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm) 935int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify)
926{ 936{
927 int ret; 937 int ret;
928 struct iwl_notification_wait wait_scan_done; 938 struct iwl_notification_wait wait_scan_done;
@@ -960,5 +970,8 @@ int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
960 */ 970 */
961 mvm->scan_status = IWL_MVM_SCAN_NONE; 971 mvm->scan_status = IWL_MVM_SCAN_NONE;
962 972
973 if (notify)
974 ieee80211_sched_scan_stopped(mvm->hw);
975
963 return 0; 976 return 0;
964} 977}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
index 88809b2d1654..7edfd15efc9d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -237,9 +237,6 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
237 .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT, 237 .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
238 }; 238 };
239 239
240 if (IWL_UCODE_API(mvm->fw->ucode_ver) < 8)
241 return 0;
242
243 /* 240 /*
244 * Ignore the call if we are in HW Restart flow, or if the handled 241 * Ignore the call if we are in HW Restart flow, or if the handled
245 * vif is a p2p device. 242 * vif is a p2p device.
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index f339ef884250..1fb01ea2e704 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,115 +66,6 @@
66#include "sta.h" 66#include "sta.h"
67#include "rs.h" 67#include "rs.h"
68 68
69static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
70 struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
71{
72 memset(cmd_v5, 0, sizeof(*cmd_v5));
73
74 cmd_v5->add_modify = cmd_v7->add_modify;
75 cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
76 cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
77 memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
78 cmd_v5->sta_id = cmd_v7->sta_id;
79 cmd_v5->modify_mask = cmd_v7->modify_mask;
80 cmd_v5->station_flags = cmd_v7->station_flags;
81 cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
82 cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
83 cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
84 cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
85 cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
86 cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
87 cmd_v5->assoc_id = cmd_v7->assoc_id;
88 cmd_v5->beamform_flags = cmd_v7->beamform_flags;
89 cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
90}
91
92static void
93iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
94 struct iwl_mvm_add_sta_cmd_v5 *sta_cmd,
95 u32 mac_id_n_color)
96{
97 memset(sta_cmd, 0, sizeof(*sta_cmd));
98
99 sta_cmd->sta_id = key_cmd->sta_id;
100 sta_cmd->add_modify = STA_MODE_MODIFY;
101 sta_cmd->modify_mask = STA_MODIFY_KEY;
102 sta_cmd->mac_id_n_color = cpu_to_le32(mac_id_n_color);
103
104 sta_cmd->key.key_offset = key_cmd->key_offset;
105 sta_cmd->key.key_flags = key_cmd->key_flags;
106 memcpy(sta_cmd->key.key, key_cmd->key, sizeof(sta_cmd->key.key));
107 sta_cmd->key.tkip_rx_tsc_byte2 = key_cmd->tkip_rx_tsc_byte2;
108 memcpy(sta_cmd->key.tkip_rx_ttak, key_cmd->tkip_rx_ttak,
109 sizeof(sta_cmd->key.tkip_rx_ttak));
110}
111
112static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
113 struct iwl_mvm_add_sta_cmd_v7 *cmd,
114 int *status)
115{
116 struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
117
118 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
119 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
120 cmd, status);
121
122 iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
123
124 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
125 &cmd_v5, status);
126}
127
128static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
129 struct iwl_mvm_add_sta_cmd_v7 *cmd)
130{
131 struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
132
133 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
134 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
135 sizeof(*cmd), cmd);
136
137 iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
138
139 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
140 &cmd_v5);
141}
142
143static int
144iwl_mvm_send_add_sta_key_cmd_status(struct iwl_mvm *mvm,
145 struct iwl_mvm_add_sta_key_cmd *cmd,
146 u32 mac_id_n_color,
147 int *status)
148{
149 struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
150
151 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
152 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY,
153 sizeof(*cmd), cmd, status);
154
155 iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
156
157 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(sta_cmd),
158 &sta_cmd, status);
159}
160
161static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
162 u32 flags,
163 struct iwl_mvm_add_sta_key_cmd *cmd,
164 u32 mac_id_n_color)
165{
166 struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
167
168 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
169 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, flags,
170 sizeof(*cmd), cmd);
171
172 iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
173
174 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(sta_cmd),
175 &sta_cmd);
176}
177
178static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, 69static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
179 enum nl80211_iftype iftype) 70 enum nl80211_iftype iftype)
180{ 71{
@@ -207,7 +98,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
207 bool update) 98 bool update)
208{ 99{
209 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 100 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
210 struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd; 101 struct iwl_mvm_add_sta_cmd add_sta_cmd;
211 int ret; 102 int ret;
212 u32 status; 103 u32 status;
213 u32 agg_size = 0, mpdu_dens = 0; 104 u32 agg_size = 0, mpdu_dens = 0;
@@ -295,7 +186,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
295 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); 186 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
296 187
297 status = ADD_STA_SUCCESS; 188 status = ADD_STA_SUCCESS;
298 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &add_sta_cmd, &status); 189 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
190 &add_sta_cmd, &status);
299 if (ret) 191 if (ret)
300 return ret; 192 return ret;
301 193
@@ -380,7 +272,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
380int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 272int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
381 bool drain) 273 bool drain)
382{ 274{
383 struct iwl_mvm_add_sta_cmd_v7 cmd = {}; 275 struct iwl_mvm_add_sta_cmd cmd = {};
384 int ret; 276 int ret;
385 u32 status; 277 u32 status;
386 278
@@ -393,7 +285,8 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
393 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); 285 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
394 286
395 status = ADD_STA_SUCCESS; 287 status = ADD_STA_SUCCESS;
396 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status); 288 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
289 &cmd, &status);
397 if (ret) 290 if (ret)
398 return ret; 291 return ret;
399 292
@@ -434,7 +327,7 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
434 return -EINVAL; 327 return -EINVAL;
435 } 328 }
436 329
437 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, CMD_SYNC, 330 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
438 sizeof(rm_sta_cmd), &rm_sta_cmd); 331 sizeof(rm_sta_cmd), &rm_sta_cmd);
439 if (ret) { 332 if (ret) {
440 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); 333 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
@@ -498,7 +391,7 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
498 sta_id); 391 sta_id);
499 continue; 392 continue;
500 } 393 }
501 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL); 394 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
502 clear_bit(sta_id, mvm->sta_drained); 395 clear_bit(sta_id, mvm->sta_drained);
503 } 396 }
504 397
@@ -520,14 +413,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
520 /* flush its queues here since we are freeing mvm_sta */ 413 /* flush its queues here since we are freeing mvm_sta */
521 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true); 414 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
522 415
523 /*
524 * Put a non-NULL since the fw station isn't removed.
525 * It will be removed after the MAC will be set as
526 * unassoc.
527 */
528 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
529 ERR_PTR(-EINVAL));
530
531 /* if we are associated - we can't remove the AP STA now */ 416 /* if we are associated - we can't remove the AP STA now */
532 if (vif->bss_conf.assoc) 417 if (vif->bss_conf.assoc)
533 return ret; 418 return ret;
@@ -557,7 +442,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
557 } else { 442 } else {
558 spin_unlock_bh(&mvm_sta->lock); 443 spin_unlock_bh(&mvm_sta->lock);
559 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); 444 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
560 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); 445 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
561 } 446 }
562 447
563 return ret; 448 return ret;
@@ -571,7 +456,7 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
571 456
572 lockdep_assert_held(&mvm->mutex); 457 lockdep_assert_held(&mvm->mutex);
573 458
574 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL); 459 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
575 return ret; 460 return ret;
576} 461}
577 462
@@ -593,7 +478,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
593 478
594void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) 479void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
595{ 480{
596 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], NULL); 481 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
597 memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); 482 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
598 sta->sta_id = IWL_MVM_STATION_COUNT; 483 sta->sta_id = IWL_MVM_STATION_COUNT;
599} 484}
@@ -603,13 +488,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
603 const u8 *addr, 488 const u8 *addr,
604 u16 mac_id, u16 color) 489 u16 mac_id, u16 color)
605{ 490{
606 struct iwl_mvm_add_sta_cmd_v7 cmd; 491 struct iwl_mvm_add_sta_cmd cmd;
607 int ret; 492 int ret;
608 u32 status; 493 u32 status;
609 494
610 lockdep_assert_held(&mvm->mutex); 495 lockdep_assert_held(&mvm->mutex);
611 496
612 memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7)); 497 memset(&cmd, 0, sizeof(cmd));
613 cmd.sta_id = sta->sta_id; 498 cmd.sta_id = sta->sta_id;
614 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, 499 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
615 color)); 500 color));
@@ -619,7 +504,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
619 if (addr) 504 if (addr)
620 memcpy(cmd.addr, addr, ETH_ALEN); 505 memcpy(cmd.addr, addr, ETH_ALEN);
621 506
622 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status); 507 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
508 &cmd, &status);
623 if (ret) 509 if (ret)
624 return ret; 510 return ret;
625 511
@@ -753,7 +639,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
753 int tid, u16 ssn, bool start) 639 int tid, u16 ssn, bool start)
754{ 640{
755 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 641 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
756 struct iwl_mvm_add_sta_cmd_v7 cmd = {}; 642 struct iwl_mvm_add_sta_cmd cmd = {};
757 int ret; 643 int ret;
758 u32 status; 644 u32 status;
759 645
@@ -777,7 +663,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
777 STA_MODIFY_REMOVE_BA_TID; 663 STA_MODIFY_REMOVE_BA_TID;
778 664
779 status = ADD_STA_SUCCESS; 665 status = ADD_STA_SUCCESS;
780 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status); 666 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
667 &cmd, &status);
781 if (ret) 668 if (ret)
782 return ret; 669 return ret;
783 670
@@ -812,7 +699,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
812 int tid, u8 queue, bool start) 699 int tid, u8 queue, bool start)
813{ 700{
814 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 701 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
815 struct iwl_mvm_add_sta_cmd_v7 cmd = {}; 702 struct iwl_mvm_add_sta_cmd cmd = {};
816 int ret; 703 int ret;
817 u32 status; 704 u32 status;
818 705
@@ -834,7 +721,8 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
834 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); 721 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
835 722
836 status = ADD_STA_SUCCESS; 723 status = ADD_STA_SUCCESS;
837 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status); 724 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
725 &cmd, &status);
838 if (ret) 726 if (ret)
839 return ret; 727 return ret;
840 728
@@ -1129,12 +1017,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1129 u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k, 1017 u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
1130 u32 cmd_flags) 1018 u32 cmd_flags)
1131{ 1019{
1132 __le16 key_flags;
1133 struct iwl_mvm_add_sta_key_cmd cmd = {}; 1020 struct iwl_mvm_add_sta_key_cmd cmd = {};
1021 __le16 key_flags;
1134 int ret, status; 1022 int ret, status;
1135 u16 keyidx; 1023 u16 keyidx;
1136 int i; 1024 int i;
1137 u32 mac_id_n_color = mvm_sta->mac_id_n_color;
1138 1025
1139 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & 1026 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
1140 STA_KEY_FLG_KEYID_MSK; 1027 STA_KEY_FLG_KEYID_MSK;
@@ -1166,13 +1053,12 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1166 cmd.sta_id = sta_id; 1053 cmd.sta_id = sta_id;
1167 1054
1168 status = ADD_STA_SUCCESS; 1055 status = ADD_STA_SUCCESS;
1169 if (cmd_flags == CMD_SYNC) 1056 if (cmd_flags & CMD_ASYNC)
1170 ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd, 1057 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
1171 mac_id_n_color, 1058 sizeof(cmd), &cmd);
1172 &status);
1173 else 1059 else
1174 ret = iwl_mvm_send_add_sta_key_cmd(mvm, CMD_ASYNC, &cmd, 1060 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
1175 mac_id_n_color); 1061 &cmd, &status);
1176 1062
1177 switch (status) { 1063 switch (status) {
1178 case ADD_STA_SUCCESS: 1064 case ADD_STA_SUCCESS:
@@ -1225,7 +1111,7 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
1225 remove_key ? "removing" : "installing", 1111 remove_key ? "removing" : "installing",
1226 igtk_cmd.sta_id); 1112 igtk_cmd.sta_id);
1227 1113
1228 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, CMD_SYNC, 1114 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
1229 sizeof(igtk_cmd), &igtk_cmd); 1115 sizeof(igtk_cmd), &igtk_cmd);
1230} 1116}
1231 1117
@@ -1312,15 +1198,15 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1312 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 1198 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1313 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); 1199 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
1314 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id, 1200 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
1315 seq.tkip.iv32, p1k, CMD_SYNC); 1201 seq.tkip.iv32, p1k, 0);
1316 break; 1202 break;
1317 case WLAN_CIPHER_SUITE_CCMP: 1203 case WLAN_CIPHER_SUITE_CCMP:
1318 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id, 1204 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
1319 0, NULL, CMD_SYNC); 1205 0, NULL, 0);
1320 break; 1206 break;
1321 default: 1207 default:
1322 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, 1208 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf,
1323 sta_id, 0, NULL, CMD_SYNC); 1209 sta_id, 0, NULL, 0);
1324 } 1210 }
1325 1211
1326 if (ret) 1212 if (ret)
@@ -1399,9 +1285,8 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1399 cmd.sta_id = sta_id; 1285 cmd.sta_id = sta_id;
1400 1286
1401 status = ADD_STA_SUCCESS; 1287 status = ADD_STA_SUCCESS;
1402 ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd, 1288 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
1403 mvm_sta->mac_id_n_color, 1289 &cmd, &status);
1404 &status);
1405 1290
1406 switch (status) { 1291 switch (status) {
1407 case ADD_STA_SUCCESS: 1292 case ADD_STA_SUCCESS:
@@ -1448,7 +1333,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1448 struct ieee80211_sta *sta) 1333 struct ieee80211_sta *sta)
1449{ 1334{
1450 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1335 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1451 struct iwl_mvm_add_sta_cmd_v7 cmd = { 1336 struct iwl_mvm_add_sta_cmd cmd = {
1452 .add_modify = STA_MODE_MODIFY, 1337 .add_modify = STA_MODE_MODIFY,
1453 .sta_id = mvmsta->sta_id, 1338 .sta_id = mvmsta->sta_id,
1454 .station_flags_msk = cpu_to_le32(STA_FLG_PS), 1339 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1456,7 +1341,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1456 }; 1341 };
1457 int ret; 1342 int ret;
1458 1343
1459 ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd); 1344 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1460 if (ret) 1345 if (ret)
1461 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 1346 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1462} 1347}
@@ -1468,7 +1353,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
1468 bool agg) 1353 bool agg)
1469{ 1354{
1470 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1355 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1471 struct iwl_mvm_add_sta_cmd_v7 cmd = { 1356 struct iwl_mvm_add_sta_cmd cmd = {
1472 .add_modify = STA_MODE_MODIFY, 1357 .add_modify = STA_MODE_MODIFY,
1473 .sta_id = mvmsta->sta_id, 1358 .sta_id = mvmsta->sta_id,
1474 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, 1359 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
@@ -1538,7 +1423,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
1538 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD); 1423 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
1539 } 1424 }
1540 1425
1541 ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd); 1426 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1542 if (ret) 1427 if (ret)
1543 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 1428 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1544} 1429}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 2ed84c421481..d98e8a2142b8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -253,6 +253,8 @@ enum iwl_mvm_agg_state {
253 * This is basically (last acked packet++). 253 * This is basically (last acked packet++).
254 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the 254 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
255 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). 255 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
256 * @reduced_tpc: Reduced tx power. Holds the data between the
257 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
256 * @state: state of the BA agreement establishment / tear down. 258 * @state: state of the BA agreement establishment / tear down.
257 * @txq_id: Tx queue used by the BA session 259 * @txq_id: Tx queue used by the BA session
258 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or 260 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
@@ -265,6 +267,7 @@ struct iwl_mvm_tid_data {
265 u16 next_reclaimed; 267 u16 next_reclaimed;
266 /* The rest is Tx AGG related */ 268 /* The rest is Tx AGG related */
267 u32 rate_n_flags; 269 u32 rate_n_flags;
270 u8 reduced_tpc;
268 enum iwl_mvm_agg_state state; 271 enum iwl_mvm_agg_state state;
269 u16 txq_id; 272 u16 txq_id;
270 u16 ssn; 273 u16 ssn;
@@ -284,8 +287,6 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
284 * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for 287 * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
285 * tid. 288 * tid.
286 * @max_agg_bufsize: the maximal size of the AGG buffer for this station 289 * @max_agg_bufsize: the maximal size of the AGG buffer for this station
287 * @bt_reduced_txpower_dbg: debug mode in which %bt_reduced_txpower is forced
288 * by debugfs.
289 * @bt_reduced_txpower: is reduced tx power enabled for this station 290 * @bt_reduced_txpower: is reduced tx power enabled for this station
290 * @next_status_eosp: the next reclaimed packet is a PS-Poll response and 291 * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
291 * we need to signal the EOSP 292 * we need to signal the EOSP
@@ -306,7 +307,6 @@ struct iwl_mvm_sta {
306 u32 mac_id_n_color; 307 u32 mac_id_n_color;
307 u16 tid_disable_agg; 308 u16 tid_disable_agg;
308 u8 max_agg_bufsize; 309 u8 max_agg_bufsize;
309 bool bt_reduced_txpower_dbg;
310 bool bt_reduced_txpower; 310 bool bt_reduced_txpower;
311 bool next_status_eosp; 311 bool next_status_eosp;
312 spinlock_t lock; 312 spinlock_t lock;
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 61331245ad93..80100f6cc12a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -273,67 +273,10 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
273 return true; 273 return true;
274} 274}
275 275
276/* used to convert from time event API v2 to v1 */
277#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
278 TE_V2_EVENT_SOCIOPATHIC)
279static inline u16 te_v2_get_notify(__le16 policy)
280{
281 return le16_to_cpu(policy) & TE_V2_NOTIF_MSK;
282}
283
284static inline u16 te_v2_get_dep_policy(__le16 policy)
285{
286 return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >>
287 TE_V2_PLACEMENT_POS;
288}
289
290static inline u16 te_v2_get_absence(__le16 policy)
291{
292 return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS;
293}
294
295static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2,
296 struct iwl_time_event_cmd_v1 *cmd_v1)
297{
298 cmd_v1->id_and_color = cmd_v2->id_and_color;
299 cmd_v1->action = cmd_v2->action;
300 cmd_v1->id = cmd_v2->id;
301 cmd_v1->apply_time = cmd_v2->apply_time;
302 cmd_v1->max_delay = cmd_v2->max_delay;
303 cmd_v1->depends_on = cmd_v2->depends_on;
304 cmd_v1->interval = cmd_v2->interval;
305 cmd_v1->duration = cmd_v2->duration;
306 if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS)
307 cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS);
308 else
309 cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat);
310 cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags);
311 cmd_v1->interval_reciprocal = 0; /* unused */
312
313 cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy));
314 cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy));
315 cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy));
316}
317
318static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm,
319 const struct iwl_time_event_cmd_v2 *cmd)
320{
321 struct iwl_time_event_cmd_v1 cmd_v1;
322
323 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
324 return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
325 sizeof(*cmd), cmd);
326
327 iwl_mvm_te_v2_to_v1(cmd, &cmd_v1);
328 return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
329 sizeof(cmd_v1), &cmd_v1);
330}
331
332
333static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, 276static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
334 struct ieee80211_vif *vif, 277 struct ieee80211_vif *vif,
335 struct iwl_mvm_time_event_data *te_data, 278 struct iwl_mvm_time_event_data *te_data,
336 struct iwl_time_event_cmd_v2 *te_cmd) 279 struct iwl_time_event_cmd *te_cmd)
337{ 280{
338 static const u8 time_event_response[] = { TIME_EVENT_CMD }; 281 static const u8 time_event_response[] = { TIME_EVENT_CMD };
339 struct iwl_notification_wait wait_time_event; 282 struct iwl_notification_wait wait_time_event;
@@ -369,7 +312,8 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
369 ARRAY_SIZE(time_event_response), 312 ARRAY_SIZE(time_event_response),
370 iwl_mvm_time_event_response, te_data); 313 iwl_mvm_time_event_response, te_data);
371 314
372 ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd); 315 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
316 sizeof(*te_cmd), te_cmd);
373 if (ret) { 317 if (ret) {
374 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret); 318 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
375 iwl_remove_notification(&mvm->notif_wait, &wait_time_event); 319 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
@@ -397,7 +341,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
397{ 341{
398 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 342 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
399 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 343 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
400 struct iwl_time_event_cmd_v2 time_cmd = {}; 344 struct iwl_time_event_cmd time_cmd = {};
401 345
402 lockdep_assert_held(&mvm->mutex); 346 lockdep_assert_held(&mvm->mutex);
403 347
@@ -453,7 +397,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
453 struct iwl_mvm_vif *mvmvif, 397 struct iwl_mvm_vif *mvmvif,
454 struct iwl_mvm_time_event_data *te_data) 398 struct iwl_mvm_time_event_data *te_data)
455{ 399{
456 struct iwl_time_event_cmd_v2 time_cmd = {}; 400 struct iwl_time_event_cmd time_cmd = {};
457 u32 id, uid; 401 u32 id, uid;
458 int ret; 402 int ret;
459 403
@@ -490,7 +434,8 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
490 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 434 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
491 435
492 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); 436 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
493 ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd); 437 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
438 sizeof(time_cmd), &time_cmd);
494 if (WARN_ON(ret)) 439 if (WARN_ON(ret))
495 return; 440 return;
496} 441}
@@ -510,7 +455,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
510{ 455{
511 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 456 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
512 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 457 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
513 struct iwl_time_event_cmd_v2 time_cmd = {}; 458 struct iwl_time_event_cmd time_cmd = {};
514 459
515 lockdep_assert_held(&mvm->mutex); 460 lockdep_assert_held(&mvm->mutex);
516 if (te_data->running) { 461 if (te_data->running) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 7a99fa361954..868561512783 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -409,7 +409,6 @@ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
409 .id = REPLY_THERMAL_MNG_BACKOFF, 409 .id = REPLY_THERMAL_MNG_BACKOFF,
410 .len = { sizeof(u32), }, 410 .len = { sizeof(u32), },
411 .data = { &backoff, }, 411 .data = { &backoff, },
412 .flags = CMD_SYNC,
413 }; 412 };
414 413
415 backoff = max(backoff, mvm->thermal_throttle.min_backoff); 414 backoff = max(backoff, mvm->thermal_throttle.min_backoff);
@@ -468,13 +467,14 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
468 } 467 }
469 468
470 if (params->support_tx_backoff) { 469 if (params->support_tx_backoff) {
471 tx_backoff = 0; 470 tx_backoff = tt->min_backoff;
472 for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) { 471 for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
473 if (temperature < params->tx_backoff[i].temperature) 472 if (temperature < params->tx_backoff[i].temperature)
474 break; 473 break;
475 tx_backoff = params->tx_backoff[i].backoff; 474 tx_backoff = max(tt->min_backoff,
475 params->tx_backoff[i].backoff);
476 } 476 }
477 if (tx_backoff != 0) 477 if (tx_backoff != tt->min_backoff)
478 throttle_enable = true; 478 throttle_enable = true;
479 if (tt->tx_backoff != tx_backoff) 479 if (tt->tx_backoff != tx_backoff)
480 iwl_mvm_tt_tx_backoff(mvm, tx_backoff); 480 iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
@@ -484,7 +484,8 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
484 IWL_WARN(mvm, 484 IWL_WARN(mvm,
485 "Due to high temperature thermal throttling initiated\n"); 485 "Due to high temperature thermal throttling initiated\n");
486 tt->throttle = true; 486 tt->throttle = true;
487 } else if (tt->throttle && !tt->dynamic_smps && tt->tx_backoff == 0 && 487 } else if (tt->throttle && !tt->dynamic_smps &&
488 tt->tx_backoff == tt->min_backoff &&
488 temperature <= params->tx_protection_exit) { 489 temperature <= params->tx_protection_exit) {
489 IWL_WARN(mvm, 490 IWL_WARN(mvm,
490 "Temperature is back to normal thermal throttling stopped\n"); 491 "Temperature is back to normal thermal throttling stopped\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 879aeac46cc1..3846a6c41eb1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -636,7 +636,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
636 seq_ctl = le16_to_cpu(hdr->seq_ctrl); 636 seq_ctl = le16_to_cpu(hdr->seq_ctrl);
637 } 637 }
638 638
639 ieee80211_tx_status_ni(mvm->hw, skb); 639 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
640 info->status.status_driver_data[0] =
641 (void *)(uintptr_t)tx_resp->reduced_tpc;
642
643 ieee80211_tx_status(mvm->hw, skb);
640 } 644 }
641 645
642 if (txq_id >= mvm->first_agg_queue) { 646 if (txq_id >= mvm->first_agg_queue) {
@@ -815,6 +819,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
815 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 819 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
816 mvmsta->tid_data[tid].rate_n_flags = 820 mvmsta->tid_data[tid].rate_n_flags =
817 le32_to_cpu(tx_resp->initial_rate); 821 le32_to_cpu(tx_resp->initial_rate);
822 mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
818 } 823 }
819 824
820 rcu_read_unlock(); 825 rcu_read_unlock();
@@ -928,6 +933,8 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
928 info->status.ampdu_len = ba_notif->txed; 933 info->status.ampdu_len = ba_notif->txed;
929 iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags, 934 iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
930 info); 935 info);
936 info->status.status_driver_data[0] =
937 (void *)(uintptr_t)tid_data->reduced_tpc;
931 } 938 }
932 } 939 }
933 940
@@ -937,7 +944,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
937 944
938 while (!skb_queue_empty(&reclaimed_skbs)) { 945 while (!skb_queue_empty(&reclaimed_skbs)) {
939 skb = __skb_dequeue(&reclaimed_skbs); 946 skb = __skb_dequeue(&reclaimed_skbs);
940 ieee80211_tx_status_ni(mvm->hw, skb); 947 ieee80211_tx_status(mvm->hw, skb);
941 } 948 }
942 949
943 return 0; 950 return 0;
@@ -951,7 +958,7 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
951 .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH), 958 .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
952 }; 959 };
953 960
954 u32 flags = sync ? CMD_SYNC : CMD_ASYNC; 961 u32 flags = sync ? 0 : CMD_ASYNC;
955 962
956 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, 963 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
957 sizeof(flush_cmd), &flush_cmd); 964 sizeof(flush_cmd), &flush_cmd);
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 2180902266ae..aa9fc77e8413 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -64,6 +64,7 @@
64 64
65#include "iwl-debug.h" 65#include "iwl-debug.h"
66#include "iwl-io.h" 66#include "iwl-io.h"
67#include "iwl-prph.h"
67 68
68#include "mvm.h" 69#include "mvm.h"
69#include "fw-api-rs.h" 70#include "fw-api-rs.h"
@@ -143,7 +144,7 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
143 "cmd flags %x", cmd->flags)) 144 "cmd flags %x", cmd->flags))
144 return -EINVAL; 145 return -EINVAL;
145 146
146 cmd->flags |= CMD_SYNC | CMD_WANT_SKB; 147 cmd->flags |= CMD_WANT_SKB;
147 148
148 ret = iwl_trans_send_cmd(mvm->trans, cmd); 149 ret = iwl_trans_send_cmd(mvm->trans, cmd);
149 if (ret == -ERFKILL) { 150 if (ret == -ERFKILL) {
@@ -469,6 +470,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
469 mvm->status, table.valid); 470 mvm->status, table.valid);
470 } 471 }
471 472
473 /* Do not change this output - scripts rely on it */
474
472 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); 475 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
473 476
474 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, 477 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
@@ -516,13 +519,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
516 iwl_mvm_dump_umac_error_log(mvm); 519 iwl_mvm_dump_umac_error_log(mvm);
517} 520}
518 521
522#ifdef CONFIG_IWLWIFI_DEBUGFS
519void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm) 523void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
520{ 524{
521 const struct fw_img *img; 525 const struct fw_img *img;
522 u32 ofs, sram_len; 526 u32 ofs, sram_len;
523 void *sram; 527 void *sram;
524 528
525 if (!mvm->ucode_loaded || mvm->fw_error_sram) 529 if (!mvm->ucode_loaded || mvm->fw_error_sram || mvm->fw_error_dump)
526 return; 530 return;
527 531
528 img = &mvm->fw->img[mvm->cur_ucode]; 532 img = &mvm->fw->img[mvm->cur_ucode];
@@ -538,6 +542,48 @@ void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
538 mvm->fw_error_sram_len = sram_len; 542 mvm->fw_error_sram_len = sram_len;
539} 543}
540 544
545void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm)
546{
547 int i, reg_val;
548 unsigned long flags;
549
550 if (!mvm->ucode_loaded || mvm->fw_error_rxf || mvm->fw_error_dump)
551 return;
552
553 /* reading buffer size */
554 reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
555 mvm->fw_error_rxf_len =
556 (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
557
558 /* the register holds the value divided by 128 */
559 mvm->fw_error_rxf_len = mvm->fw_error_rxf_len << 7;
560
561 if (!mvm->fw_error_rxf_len)
562 return;
563
564 mvm->fw_error_rxf = kzalloc(mvm->fw_error_rxf_len, GFP_ATOMIC);
565 if (!mvm->fw_error_rxf) {
566 mvm->fw_error_rxf_len = 0;
567 return;
568 }
569
570 if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
571 kfree(mvm->fw_error_rxf);
572 mvm->fw_error_rxf = NULL;
573 mvm->fw_error_rxf_len = 0;
574 return;
575 }
576
577 for (i = 0; i < (mvm->fw_error_rxf_len / sizeof(u32)); i++) {
578 iwl_trans_write_prph(mvm->trans, RXF_LD_FENCE_OFFSET_ADDR,
579 i * sizeof(u32));
580 mvm->fw_error_rxf[i] =
581 iwl_trans_read_prph(mvm->trans, RXF_FIFO_RD_FENCE_ADDR);
582 }
583 iwl_trans_release_nic_access(mvm->trans, &flags);
584}
585#endif
586
541/** 587/**
542 * iwl_mvm_send_lq_cmd() - Send link quality command 588 * iwl_mvm_send_lq_cmd() - Send link quality command
543 * @init: This command is sent as part of station initialization right 589 * @init: This command is sent as part of station initialization right
@@ -553,7 +599,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
553 struct iwl_host_cmd cmd = { 599 struct iwl_host_cmd cmd = {
554 .id = LQ_CMD, 600 .id = LQ_CMD,
555 .len = { sizeof(struct iwl_lq_cmd), }, 601 .len = { sizeof(struct iwl_lq_cmd), },
556 .flags = init ? CMD_SYNC : CMD_ASYNC, 602 .flags = init ? 0 : CMD_ASYNC,
557 .data = { lq, }, 603 .data = { lq, },
558 }; 604 };
559 605
@@ -604,6 +650,39 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
604 ieee80211_request_smps(vif, smps_mode); 650 ieee80211_request_smps(vif, smps_mode);
605} 651}
606 652
653static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
654 struct ieee80211_vif *vif)
655{
656 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
657 bool *result = _data;
658 int i;
659
660 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
661 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
662 mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
663 *result = false;
664 }
665}
666
667bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
668{
669 bool result = true;
670
671 lockdep_assert_held(&mvm->mutex);
672
673 if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
674 return false;
675
676 if (!mvm->cfg->rx_with_siso_diversity)
677 return false;
678
679 ieee80211_iterate_active_interfaces_atomic(
680 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
681 iwl_mvm_diversity_iter, &result);
682
683 return result;
684}
685
607int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 686int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
608 bool value) 687 bool value)
609{ 688{
@@ -623,7 +702,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
623 702
624 iwl_mvm_bt_coex_vif_change(mvm); 703 iwl_mvm_bt_coex_vif_change(mvm);
625 704
626 return iwl_mvm_power_update_mac(mvm, vif); 705 return iwl_mvm_power_update_mac(mvm);
627} 706}
628 707
629static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) 708static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3d1d57f9f5bc..7091a18d5a72 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -417,7 +417,7 @@ static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
417 splx->package.count != 2 || 417 splx->package.count != 2 ||
418 splx->package.elements[0].type != ACPI_TYPE_INTEGER || 418 splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
419 splx->package.elements[0].integer.value != 0) { 419 splx->package.elements[0].integer.value != 0) {
420 IWL_ERR(trans, "Unsupported splx structure"); 420 IWL_ERR(trans, "Unsupported splx structure\n");
421 return 0; 421 return 0;
422 } 422 }
423 423
@@ -426,14 +426,14 @@ static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
426 limits->package.count < 2 || 426 limits->package.count < 2 ||
427 limits->package.elements[0].type != ACPI_TYPE_INTEGER || 427 limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
428 limits->package.elements[1].type != ACPI_TYPE_INTEGER) { 428 limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
429 IWL_ERR(trans, "Invalid limits element"); 429 IWL_ERR(trans, "Invalid limits element\n");
430 return 0; 430 return 0;
431 } 431 }
432 432
433 domain_type = &limits->package.elements[0]; 433 domain_type = &limits->package.elements[0];
434 power_limit = &limits->package.elements[1]; 434 power_limit = &limits->package.elements[1];
435 if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) { 435 if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
436 IWL_DEBUG_INFO(trans, "WiFi power is not limited"); 436 IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
437 return 0; 437 return 0;
438 } 438 }
439 439
@@ -450,26 +450,26 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
450 pxsx_handle = ACPI_HANDLE(&pdev->dev); 450 pxsx_handle = ACPI_HANDLE(&pdev->dev);
451 if (!pxsx_handle) { 451 if (!pxsx_handle) {
452 IWL_DEBUG_INFO(trans, 452 IWL_DEBUG_INFO(trans,
453 "Could not retrieve root port ACPI handle"); 453 "Could not retrieve root port ACPI handle\n");
454 return; 454 return;
455 } 455 }
456 456
457 /* Get the method's handle */ 457 /* Get the method's handle */
458 status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle); 458 status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
459 if (ACPI_FAILURE(status)) { 459 if (ACPI_FAILURE(status)) {
460 IWL_DEBUG_INFO(trans, "SPL method not found"); 460 IWL_DEBUG_INFO(trans, "SPL method not found\n");
461 return; 461 return;
462 } 462 }
463 463
464 /* Call SPLC with no arguments */ 464 /* Call SPLC with no arguments */
465 status = acpi_evaluate_object(handle, NULL, NULL, &splx); 465 status = acpi_evaluate_object(handle, NULL, NULL, &splx);
466 if (ACPI_FAILURE(status)) { 466 if (ACPI_FAILURE(status)) {
467 IWL_ERR(trans, "SPLC invocation failed (0x%x)", status); 467 IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
468 return; 468 return;
469 } 469 }
470 470
471 trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer); 471 trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
472 IWL_DEBUG_INFO(trans, "Default power limit set to %lld", 472 IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
473 trans->dflt_pwr_limit); 473 trans->dflt_pwr_limit);
474 kfree(splx.pointer); 474 kfree(splx.pointer);
475} 475}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 9091513ea738..6c22b23a2845 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -102,7 +102,7 @@ struct iwl_rxq {
102 u32 write_actual; 102 u32 write_actual;
103 struct list_head rx_free; 103 struct list_head rx_free;
104 struct list_head rx_used; 104 struct list_head rx_used;
105 int need_update; 105 bool need_update;
106 struct iwl_rb_status *rb_stts; 106 struct iwl_rb_status *rb_stts;
107 dma_addr_t rb_stts_dma; 107 dma_addr_t rb_stts_dma;
108 spinlock_t lock; 108 spinlock_t lock;
@@ -117,21 +117,19 @@ struct iwl_dma_ptr {
117/** 117/**
118 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 118 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
119 * @index -- current index 119 * @index -- current index
120 * @n_bd -- total number of entries in queue (must be power of 2)
121 */ 120 */
122static inline int iwl_queue_inc_wrap(int index, int n_bd) 121static inline int iwl_queue_inc_wrap(int index)
123{ 122{
124 return ++index & (n_bd - 1); 123 return ++index & (TFD_QUEUE_SIZE_MAX - 1);
125} 124}
126 125
127/** 126/**
128 * iwl_queue_dec_wrap - decrement queue index, wrap back to end 127 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
129 * @index -- current index 128 * @index -- current index
130 * @n_bd -- total number of entries in queue (must be power of 2)
131 */ 129 */
132static inline int iwl_queue_dec_wrap(int index, int n_bd) 130static inline int iwl_queue_dec_wrap(int index)
133{ 131{
134 return --index & (n_bd - 1); 132 return --index & (TFD_QUEUE_SIZE_MAX - 1);
135} 133}
136 134
137struct iwl_cmd_meta { 135struct iwl_cmd_meta {
@@ -145,13 +143,13 @@ struct iwl_cmd_meta {
145 * 143 *
146 * Contains common data for Rx and Tx queues. 144 * Contains common data for Rx and Tx queues.
147 * 145 *
148 * Note the difference between n_bd and n_window: the hardware 146 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
149 * always assumes 256 descriptors, so n_bd is always 256 (unless 147 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
150 * there might be HW changes in the future). For the normal TX 148 * there might be HW changes in the future). For the normal TX
151 * queues, n_window, which is the size of the software queue data 149 * queues, n_window, which is the size of the software queue data
152 * is also 256; however, for the command queue, n_window is only 150 * is also 256; however, for the command queue, n_window is only
153 * 32 since we don't need so many commands pending. Since the HW 151 * 32 since we don't need so many commands pending. Since the HW
154 * still uses 256 BDs for DMA though, n_bd stays 256. As a result, 152 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
155 * the software buffers (in the variables @meta, @txb in struct 153 * the software buffers (in the variables @meta, @txb in struct
156 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in 154 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
157 * the same struct) have 256. 155 * the same struct) have 256.
@@ -162,7 +160,6 @@ struct iwl_cmd_meta {
162 * data is a window overlayed over the HW queue. 160 * data is a window overlayed over the HW queue.
163 */ 161 */
164struct iwl_queue { 162struct iwl_queue {
165 int n_bd; /* number of BDs in this queue */
166 int write_ptr; /* 1-st empty entry (index) host_w*/ 163 int write_ptr; /* 1-st empty entry (index) host_w*/
167 int read_ptr; /* last used entry (index) host_r*/ 164 int read_ptr; /* last used entry (index) host_r*/
168 /* use for monitoring and recovering the stuck queue */ 165 /* use for monitoring and recovering the stuck queue */
@@ -231,7 +228,7 @@ struct iwl_txq {
231 spinlock_t lock; 228 spinlock_t lock;
232 struct timer_list stuck_timer; 229 struct timer_list stuck_timer;
233 struct iwl_trans_pcie *trans_pcie; 230 struct iwl_trans_pcie *trans_pcie;
234 u8 need_update; 231 bool need_update;
235 u8 active; 232 u8 active;
236 bool ampdu; 233 bool ampdu;
237}; 234};
@@ -270,6 +267,9 @@ struct iwl_trans_pcie {
270 struct iwl_trans *trans; 267 struct iwl_trans *trans;
271 struct iwl_drv *drv; 268 struct iwl_drv *drv;
272 269
270 struct net_device napi_dev;
271 struct napi_struct napi;
272
273 /* INT ICT Table */ 273 /* INT ICT Table */
274 __le32 *ict_tbl; 274 __le32 *ict_tbl;
275 dma_addr_t ict_tbl_dma; 275 dma_addr_t ict_tbl_dma;
@@ -362,7 +362,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
362void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); 362void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
363int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 363int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
364 struct iwl_device_cmd *dev_cmd, int txq_id); 364 struct iwl_device_cmd *dev_cmd, int txq_id);
365void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq); 365void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
366int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 366int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
367void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 367void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
368 struct iwl_rx_cmd_buffer *rxb, int handler_status); 368 struct iwl_rx_cmd_buffer *rxb, int handler_status);
@@ -370,6 +370,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
370 struct sk_buff_head *skbs); 370 struct sk_buff_head *skbs);
371void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 371void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
372 372
373static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
374{
375 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
376
377 return le16_to_cpu(tb->hi_n_len) >> 4;
378}
379
373/***************************************************** 380/*****************************************************
374* Error handling 381* Error handling
375******************************************************/ 382******************************************************/
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index fdfa3969cac9..a2698e5e062c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -145,15 +145,13 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
145/* 145/*
146 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue 146 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
147 */ 147 */
148static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, 148static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
149 struct iwl_rxq *rxq)
150{ 149{
150 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
151 struct iwl_rxq *rxq = &trans_pcie->rxq;
151 u32 reg; 152 u32 reg;
152 153
153 spin_lock(&rxq->lock); 154 lockdep_assert_held(&rxq->lock);
154
155 if (rxq->need_update == 0)
156 goto exit_unlock;
157 155
158 /* 156 /*
159 * explicitly wake up the NIC if: 157 * explicitly wake up the NIC if:
@@ -169,13 +167,27 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
169 reg); 167 reg);
170 iwl_set_bit(trans, CSR_GP_CNTRL, 168 iwl_set_bit(trans, CSR_GP_CNTRL,
171 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 169 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
172 goto exit_unlock; 170 rxq->need_update = true;
171 return;
173 } 172 }
174 } 173 }
175 174
176 rxq->write_actual = round_down(rxq->write, 8); 175 rxq->write_actual = round_down(rxq->write, 8);
177 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 176 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
178 rxq->need_update = 0; 177}
178
179static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
180{
181 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
182 struct iwl_rxq *rxq = &trans_pcie->rxq;
183
184 spin_lock(&rxq->lock);
185
186 if (!rxq->need_update)
187 goto exit_unlock;
188
189 iwl_pcie_rxq_inc_wr_ptr(trans);
190 rxq->need_update = false;
179 191
180 exit_unlock: 192 exit_unlock:
181 spin_unlock(&rxq->lock); 193 spin_unlock(&rxq->lock);
@@ -236,9 +248,8 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
236 * Increment device's write pointer in multiples of 8. */ 248 * Increment device's write pointer in multiples of 8. */
237 if (rxq->write_actual != (rxq->write & ~0x7)) { 249 if (rxq->write_actual != (rxq->write & ~0x7)) {
238 spin_lock(&rxq->lock); 250 spin_lock(&rxq->lock);
239 rxq->need_update = 1; 251 iwl_pcie_rxq_inc_wr_ptr(trans);
240 spin_unlock(&rxq->lock); 252 spin_unlock(&rxq->lock);
241 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
242 } 253 }
243} 254}
244 255
@@ -362,20 +373,9 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
362 * Also restock the Rx queue via iwl_pcie_rxq_restock. 373 * Also restock the Rx queue via iwl_pcie_rxq_restock.
363 * This is called as a scheduled work item (except for during initialization) 374 * This is called as a scheduled work item (except for during initialization)
364 */ 375 */
365static void iwl_pcie_rx_replenish(struct iwl_trans *trans) 376static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
366{
367 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
368
369 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
370
371 spin_lock(&trans_pcie->irq_lock);
372 iwl_pcie_rxq_restock(trans);
373 spin_unlock(&trans_pcie->irq_lock);
374}
375
376static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
377{ 377{
378 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); 378 iwl_pcie_rxq_alloc_rbs(trans, gfp);
379 379
380 iwl_pcie_rxq_restock(trans); 380 iwl_pcie_rxq_restock(trans);
381} 381}
@@ -385,7 +385,7 @@ static void iwl_pcie_rx_replenish_work(struct work_struct *data)
385 struct iwl_trans_pcie *trans_pcie = 385 struct iwl_trans_pcie *trans_pcie =
386 container_of(data, struct iwl_trans_pcie, rx_replenish); 386 container_of(data, struct iwl_trans_pcie, rx_replenish);
387 387
388 iwl_pcie_rx_replenish(trans_pcie->trans); 388 iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
389} 389}
390 390
391static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 391static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
@@ -521,14 +521,13 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
521 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 521 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
522 spin_unlock(&rxq->lock); 522 spin_unlock(&rxq->lock);
523 523
524 iwl_pcie_rx_replenish(trans); 524 iwl_pcie_rx_replenish(trans, GFP_KERNEL);
525 525
526 iwl_pcie_rx_hw_init(trans, rxq); 526 iwl_pcie_rx_hw_init(trans, rxq);
527 527
528 spin_lock(&trans_pcie->irq_lock); 528 spin_lock(&rxq->lock);
529 rxq->need_update = 1; 529 iwl_pcie_rxq_inc_wr_ptr(trans);
530 iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 530 spin_unlock(&rxq->lock);
531 spin_unlock(&trans_pcie->irq_lock);
532 531
533 return 0; 532 return 0;
534} 533}
@@ -673,7 +672,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
673 /* Reuse the page if possible. For notification packets and 672 /* Reuse the page if possible. For notification packets and
674 * SKBs that fail to Rx correctly, add them back into the 673 * SKBs that fail to Rx correctly, add them back into the
675 * rx_free list for reuse later. */ 674 * rx_free list for reuse later. */
676 spin_lock(&rxq->lock);
677 if (rxb->page != NULL) { 675 if (rxb->page != NULL) {
678 rxb->page_dma = 676 rxb->page_dma =
679 dma_map_page(trans->dev, rxb->page, 0, 677 dma_map_page(trans->dev, rxb->page, 0,
@@ -694,7 +692,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
694 } 692 }
695 } else 693 } else
696 list_add_tail(&rxb->list, &rxq->rx_used); 694 list_add_tail(&rxb->list, &rxq->rx_used);
697 spin_unlock(&rxq->lock);
698} 695}
699 696
700/* 697/*
@@ -709,6 +706,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
709 u32 count = 8; 706 u32 count = 8;
710 int total_empty; 707 int total_empty;
711 708
709restart:
710 spin_lock(&rxq->lock);
712 /* uCode's read index (stored in shared DRAM) indicates the last Rx 711 /* uCode's read index (stored in shared DRAM) indicates the last Rx
713 * buffer that the driver may process (last buffer filled by ucode). */ 712 * buffer that the driver may process (last buffer filled by ucode). */
714 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; 713 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
@@ -743,18 +742,25 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
743 count++; 742 count++;
744 if (count >= 8) { 743 if (count >= 8) {
745 rxq->read = i; 744 rxq->read = i;
746 iwl_pcie_rx_replenish_now(trans); 745 spin_unlock(&rxq->lock);
746 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
747 count = 0; 747 count = 0;
748 goto restart;
748 } 749 }
749 } 750 }
750 } 751 }
751 752
752 /* Backtrack one entry */ 753 /* Backtrack one entry */
753 rxq->read = i; 754 rxq->read = i;
755 spin_unlock(&rxq->lock);
756
754 if (fill_rx) 757 if (fill_rx)
755 iwl_pcie_rx_replenish_now(trans); 758 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
756 else 759 else
757 iwl_pcie_rxq_restock(trans); 760 iwl_pcie_rxq_restock(trans);
761
762 if (trans_pcie->napi.poll)
763 napi_gro_flush(&trans_pcie->napi, false);
758} 764}
759 765
760/* 766/*
@@ -844,7 +850,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
844 trans_pcie->ict_index, read); 850 trans_pcie->ict_index, read);
845 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; 851 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
846 trans_pcie->ict_index = 852 trans_pcie->ict_index =
847 iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT); 853 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
848 854
849 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 855 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
850 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, 856 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
@@ -876,7 +882,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
876 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 882 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
877 u32 inta = 0; 883 u32 inta = 0;
878 u32 handled = 0; 884 u32 handled = 0;
879 u32 i;
880 885
881 lock_map_acquire(&trans->sync_cmd_lockdep_map); 886 lock_map_acquire(&trans->sync_cmd_lockdep_map);
882 887
@@ -1028,9 +1033,8 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1028 /* uCode wakes up after power-down sleep */ 1033 /* uCode wakes up after power-down sleep */
1029 if (inta & CSR_INT_BIT_WAKEUP) { 1034 if (inta & CSR_INT_BIT_WAKEUP) {
1030 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 1035 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1031 iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq); 1036 iwl_pcie_rxq_check_wrptr(trans);
1032 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) 1037 iwl_pcie_txq_check_wrptrs(trans);
1033 iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
1034 1038
1035 isr_stats->wakeup++; 1039 isr_stats->wakeup++;
1036 1040
@@ -1068,8 +1072,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1068 iwl_write8(trans, CSR_INT_PERIODIC_REG, 1072 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1069 CSR_INT_PERIODIC_DIS); 1073 CSR_INT_PERIODIC_DIS);
1070 1074
1071 iwl_pcie_rx_handle(trans);
1072
1073 /* 1075 /*
1074 * Enable periodic interrupt in 8 msec only if we received 1076 * Enable periodic interrupt in 8 msec only if we received
1075 * real RX interrupt (instead of just periodic int), to catch 1077 * real RX interrupt (instead of just periodic int), to catch
@@ -1082,6 +1084,10 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1082 CSR_INT_PERIODIC_ENA); 1084 CSR_INT_PERIODIC_ENA);
1083 1085
1084 isr_stats->rx++; 1086 isr_stats->rx++;
1087
1088 local_bh_disable();
1089 iwl_pcie_rx_handle(trans);
1090 local_bh_enable();
1085 } 1091 }
1086 1092
1087 /* This "Tx" DMA channel is used only for loading uCode */ 1093 /* This "Tx" DMA channel is used only for loading uCode */
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 2365553f1ef7..788085bc65d7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -73,6 +73,7 @@
73#include "iwl-csr.h" 73#include "iwl-csr.h"
74#include "iwl-prph.h" 74#include "iwl-prph.h"
75#include "iwl-agn-hw.h" 75#include "iwl-agn-hw.h"
76#include "iwl-fw-error-dump.h"
76#include "internal.h" 77#include "internal.h"
77 78
78static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) 79static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
@@ -103,7 +104,6 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
103 104
104/* PCI registers */ 105/* PCI registers */
105#define PCI_CFG_RETRY_TIMEOUT 0x041 106#define PCI_CFG_RETRY_TIMEOUT 0x041
106#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
107 107
108static void iwl_pcie_apm_config(struct iwl_trans *trans) 108static void iwl_pcie_apm_config(struct iwl_trans *trans)
109{ 109{
@@ -454,6 +454,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
454{ 454{
455 int ret; 455 int ret;
456 int t = 0; 456 int t = 0;
457 int iter;
457 458
458 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 459 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
459 460
@@ -462,18 +463,23 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
462 if (ret >= 0) 463 if (ret >= 0)
463 return 0; 464 return 0;
464 465
465 /* If HW is not ready, prepare the conditions to check again */ 466 for (iter = 0; iter < 10; iter++) {
466 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 467 /* If HW is not ready, prepare the conditions to check again */
467 CSR_HW_IF_CONFIG_REG_PREPARE); 468 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
469 CSR_HW_IF_CONFIG_REG_PREPARE);
470
471 do {
472 ret = iwl_pcie_set_hw_ready(trans);
473 if (ret >= 0)
474 return 0;
468 475
469 do { 476 usleep_range(200, 1000);
470 ret = iwl_pcie_set_hw_ready(trans); 477 t += 200;
471 if (ret >= 0) 478 } while (t < 150000);
472 return 0; 479 msleep(25);
480 }
473 481
474 usleep_range(200, 1000); 482 IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
475 t += 200;
476 } while (t < 150000);
477 483
478 return ret; 484 return ret;
479} 485}
@@ -1053,6 +1059,12 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1053 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 1059 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1054} 1060}
1055 1061
1062static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1063{
1064 WARN_ON(1);
1065 return 0;
1066}
1067
1056static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1068static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1057 const struct iwl_trans_config *trans_cfg) 1069 const struct iwl_trans_config *trans_cfg)
1058{ 1070{
@@ -1079,6 +1091,18 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1079 1091
1080 trans_pcie->command_names = trans_cfg->command_names; 1092 trans_pcie->command_names = trans_cfg->command_names;
1081 trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; 1093 trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
1094
1095 /* Initialize NAPI here - it should be before registering to mac80211
1096 * in the opmode but after the HW struct is allocated.
1097 * As this function may be called again in some corner cases don't
1098 * do anything if NAPI was already initialized.
1099 */
1100 if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
1101 init_dummy_netdev(&trans_pcie->napi_dev);
1102 iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
1103 &trans_pcie->napi_dev,
1104 iwl_pcie_dummy_napi_poll, 64);
1105 }
1082} 1106}
1083 1107
1084void iwl_trans_pcie_free(struct iwl_trans *trans) 1108void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -1099,6 +1123,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
1099 pci_disable_device(trans_pcie->pci_dev); 1123 pci_disable_device(trans_pcie->pci_dev);
1100 kmem_cache_destroy(trans->dev_cmd_pool); 1124 kmem_cache_destroy(trans->dev_cmd_pool);
1101 1125
1126 if (trans_pcie->napi.poll)
1127 netif_napi_del(&trans_pcie->napi);
1128
1102 kfree(trans); 1129 kfree(trans);
1103} 1130}
1104 1131
@@ -1237,7 +1264,7 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
1237 1264
1238#define IWL_FLUSH_WAIT_MS 2000 1265#define IWL_FLUSH_WAIT_MS 2000
1239 1266
1240static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans) 1267static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
1241{ 1268{
1242 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1269 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1243 struct iwl_txq *txq; 1270 struct iwl_txq *txq;
@@ -1250,13 +1277,31 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
1250 1277
1251 /* waiting for all the tx frames complete might take a while */ 1278 /* waiting for all the tx frames complete might take a while */
1252 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { 1279 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1280 u8 wr_ptr;
1281
1253 if (cnt == trans_pcie->cmd_queue) 1282 if (cnt == trans_pcie->cmd_queue)
1254 continue; 1283 continue;
1284 if (!test_bit(cnt, trans_pcie->queue_used))
1285 continue;
1286 if (!(BIT(cnt) & txq_bm))
1287 continue;
1288
1289 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
1255 txq = &trans_pcie->txq[cnt]; 1290 txq = &trans_pcie->txq[cnt];
1256 q = &txq->q; 1291 q = &txq->q;
1257 while (q->read_ptr != q->write_ptr && !time_after(jiffies, 1292 wr_ptr = ACCESS_ONCE(q->write_ptr);
1258 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) 1293
1294 while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
1295 !time_after(jiffies,
1296 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
1297 u8 write_ptr = ACCESS_ONCE(q->write_ptr);
1298
1299 if (WARN_ONCE(wr_ptr != write_ptr,
1300 "WR pointer moved while flushing %d -> %d\n",
1301 wr_ptr, write_ptr))
1302 return -ETIMEDOUT;
1259 msleep(1); 1303 msleep(1);
1304 }
1260 1305
1261 if (q->read_ptr != q->write_ptr) { 1306 if (q->read_ptr != q->write_ptr) {
1262 IWL_ERR(trans, 1307 IWL_ERR(trans,
@@ -1264,6 +1309,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
1264 ret = -ETIMEDOUT; 1309 ret = -ETIMEDOUT;
1265 break; 1310 break;
1266 } 1311 }
1312 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
1267 } 1313 }
1268 1314
1269 if (!ret) 1315 if (!ret)
@@ -1298,8 +1344,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
1298 IWL_ERR(trans, 1344 IWL_ERR(trans,
1299 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", 1345 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1300 cnt, active ? "" : "in", fifo, tbl_dw, 1346 cnt, active ? "" : "in", fifo, tbl_dw,
1301 iwl_read_prph(trans, 1347 iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
1302 SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1), 1348 (TFD_QUEUE_SIZE_MAX - 1),
1303 iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); 1349 iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
1304 } 1350 }
1305 1351
@@ -1630,6 +1676,61 @@ err:
1630 IWL_ERR(trans, "failed to create the trans debugfs entry\n"); 1676 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
1631 return -ENOMEM; 1677 return -ENOMEM;
1632} 1678}
1679
1680static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
1681{
1682 u32 cmdlen = 0;
1683 int i;
1684
1685 for (i = 0; i < IWL_NUM_OF_TBS; i++)
1686 cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
1687
1688 return cmdlen;
1689}
1690
1691static u32 iwl_trans_pcie_dump_data(struct iwl_trans *trans,
1692 void *buf, u32 buflen)
1693{
1694 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1695 struct iwl_fw_error_dump_data *data;
1696 struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
1697 struct iwl_fw_error_dump_txcmd *txcmd;
1698 u32 len;
1699 int i, ptr;
1700
1701 if (!buf)
1702 return sizeof(*data) +
1703 cmdq->q.n_window * (sizeof(*txcmd) +
1704 TFD_MAX_PAYLOAD_SIZE);
1705
1706 len = 0;
1707 data = buf;
1708 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
1709 txcmd = (void *)data->data;
1710 spin_lock_bh(&cmdq->lock);
1711 ptr = cmdq->q.write_ptr;
1712 for (i = 0; i < cmdq->q.n_window; i++) {
1713 u8 idx = get_cmd_index(&cmdq->q, ptr);
1714 u32 caplen, cmdlen;
1715
1716 cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
1717 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
1718
1719 if (cmdlen) {
1720 len += sizeof(*txcmd) + caplen;
1721 txcmd->cmdlen = cpu_to_le32(cmdlen);
1722 txcmd->caplen = cpu_to_le32(caplen);
1723 memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
1724 txcmd = (void *)((u8 *)txcmd->data + caplen);
1725 }
1726
1727 ptr = iwl_queue_dec_wrap(ptr);
1728 }
1729 spin_unlock_bh(&cmdq->lock);
1730
1731 data->len = cpu_to_le32(len);
1732 return sizeof(*data) + len;
1733}
1633#else 1734#else
1634static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, 1735static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1635 struct dentry *dir) 1736 struct dentry *dir)
@@ -1672,6 +1773,10 @@ static const struct iwl_trans_ops trans_ops_pcie = {
1672 .grab_nic_access = iwl_trans_pcie_grab_nic_access, 1773 .grab_nic_access = iwl_trans_pcie_grab_nic_access,
1673 .release_nic_access = iwl_trans_pcie_release_nic_access, 1774 .release_nic_access = iwl_trans_pcie_release_nic_access,
1674 .set_bits_mask = iwl_trans_pcie_set_bits_mask, 1775 .set_bits_mask = iwl_trans_pcie_set_bits_mask,
1776
1777#ifdef CONFIG_IWLWIFI_DEBUGFS
1778 .dump_data = iwl_trans_pcie_dump_data,
1779#endif
1675}; 1780};
1676 1781
1677struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 1782struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 3b0c72c10054..038940afbdc5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -70,20 +70,20 @@ static int iwl_queue_space(const struct iwl_queue *q)
70 70
71 /* 71 /*
72 * To avoid ambiguity between empty and completely full queues, there 72 * To avoid ambiguity between empty and completely full queues, there
73 * should always be less than q->n_bd elements in the queue. 73 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
74 * If q->n_window is smaller than q->n_bd, there is no need to reserve 74 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
75 * any queue entries for this purpose. 75 * to reserve any queue entries for this purpose.
76 */ 76 */
77 if (q->n_window < q->n_bd) 77 if (q->n_window < TFD_QUEUE_SIZE_MAX)
78 max = q->n_window; 78 max = q->n_window;
79 else 79 else
80 max = q->n_bd - 1; 80 max = TFD_QUEUE_SIZE_MAX - 1;
81 81
82 /* 82 /*
83 * q->n_bd is a power of 2, so the following is equivalent to modulo by 83 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
84 * q->n_bd and is well defined for negative dividends. 84 * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
85 */ 85 */
86 used = (q->write_ptr - q->read_ptr) & (q->n_bd - 1); 86 used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
87 87
88 if (WARN_ON(used > max)) 88 if (WARN_ON(used > max))
89 return 0; 89 return 0;
@@ -94,17 +94,11 @@ static int iwl_queue_space(const struct iwl_queue *q)
94/* 94/*
95 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 95 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
96 */ 96 */
97static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) 97static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
98{ 98{
99 q->n_bd = count;
100 q->n_window = slots_num; 99 q->n_window = slots_num;
101 q->id = id; 100 q->id = id;
102 101
103 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
104 * and iwl_queue_dec_wrap are broken. */
105 if (WARN_ON(!is_power_of_2(count)))
106 return -EINVAL;
107
108 /* slots_num must be power-of-two size, otherwise 102 /* slots_num must be power-of-two size, otherwise
109 * get_cmd_index is broken. */ 103 * get_cmd_index is broken. */
110 if (WARN_ON(!is_power_of_2(slots_num))) 104 if (WARN_ON(!is_power_of_2(slots_num)))
@@ -197,17 +191,17 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
197 IWL_ERR(trans, 191 IWL_ERR(trans,
198 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", 192 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
199 i, active ? "" : "in", fifo, tbl_dw, 193 i, active ? "" : "in", fifo, tbl_dw,
200 iwl_read_prph(trans, 194 iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
201 SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1), 195 (TFD_QUEUE_SIZE_MAX - 1),
202 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); 196 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
203 } 197 }
204 198
205 for (i = q->read_ptr; i != q->write_ptr; 199 for (i = q->read_ptr; i != q->write_ptr;
206 i = iwl_queue_inc_wrap(i, q->n_bd)) 200 i = iwl_queue_inc_wrap(i))
207 IWL_ERR(trans, "scratch %d = 0x%08x\n", i, 201 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
208 le32_to_cpu(txq->scratchbufs[i].scratch)); 202 le32_to_cpu(txq->scratchbufs[i].scratch));
209 203
210 iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1); 204 iwl_force_nmi(trans);
211} 205}
212 206
213/* 207/*
@@ -287,14 +281,14 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
287/* 281/*
288 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 282 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
289 */ 283 */
290void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) 284static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
285 struct iwl_txq *txq)
291{ 286{
292 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 287 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
293 u32 reg = 0; 288 u32 reg = 0;
294 int txq_id = txq->q.id; 289 int txq_id = txq->q.id;
295 290
296 if (txq->need_update == 0) 291 lockdep_assert_held(&txq->lock);
297 return;
298 292
299 /* 293 /*
300 * explicitly wake up the NIC if: 294 * explicitly wake up the NIC if:
@@ -317,6 +311,7 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
317 txq_id, reg); 311 txq_id, reg);
318 iwl_set_bit(trans, CSR_GP_CNTRL, 312 iwl_set_bit(trans, CSR_GP_CNTRL,
319 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 313 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
314 txq->need_update = true;
320 return; 315 return;
321 } 316 }
322 } 317 }
@@ -327,8 +322,23 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
327 */ 322 */
328 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr); 323 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
329 iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); 324 iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
325}
326
327void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
328{
329 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
330 int i;
331
332 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
333 struct iwl_txq *txq = &trans_pcie->txq[i];
330 334
331 txq->need_update = 0; 335 spin_lock_bh(&txq->lock);
336 if (trans_pcie->txq[i].need_update) {
337 iwl_pcie_txq_inc_wr_ptr(trans, txq);
338 trans_pcie->txq[i].need_update = false;
339 }
340 spin_unlock_bh(&txq->lock);
341 }
332} 342}
333 343
334static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) 344static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
@@ -343,13 +353,6 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
343 return addr; 353 return addr;
344} 354}
345 355
346static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
347{
348 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
349
350 return le16_to_cpu(tb->hi_n_len) >> 4;
351}
352
353static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, 356static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
354 dma_addr_t addr, u16 len) 357 dma_addr_t addr, u16 len)
355{ 358{
@@ -409,13 +412,17 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
409{ 412{
410 struct iwl_tfd *tfd_tmp = txq->tfds; 413 struct iwl_tfd *tfd_tmp = txq->tfds;
411 414
412 /* rd_ptr is bounded by n_bd and idx is bounded by n_window */ 415 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
416 * idx is bounded by n_window
417 */
413 int rd_ptr = txq->q.read_ptr; 418 int rd_ptr = txq->q.read_ptr;
414 int idx = get_cmd_index(&txq->q, rd_ptr); 419 int idx = get_cmd_index(&txq->q, rd_ptr);
415 420
416 lockdep_assert_held(&txq->lock); 421 lockdep_assert_held(&txq->lock);
417 422
418 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ 423 /* We have only q->n_window txq->entries, but we use
424 * TFD_QUEUE_SIZE_MAX tfds
425 */
419 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); 426 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
420 427
421 /* free SKB */ 428 /* free SKB */
@@ -436,7 +443,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
436} 443}
437 444
438static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 445static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
439 dma_addr_t addr, u16 len, u8 reset) 446 dma_addr_t addr, u16 len, bool reset)
440{ 447{
441 struct iwl_queue *q; 448 struct iwl_queue *q;
442 struct iwl_tfd *tfd, *tfd_tmp; 449 struct iwl_tfd *tfd, *tfd_tmp;
@@ -542,15 +549,14 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
542{ 549{
543 int ret; 550 int ret;
544 551
545 txq->need_update = 0; 552 txq->need_update = false;
546 553
547 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 554 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
548 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 555 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
549 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 556 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
550 557
551 /* Initialize queue's high/low-water marks, and head/tail indexes */ 558 /* Initialize queue's high/low-water marks, and head/tail indexes */
552 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num, 559 ret = iwl_queue_init(&txq->q, slots_num, txq_id);
553 txq_id);
554 if (ret) 560 if (ret)
555 return ret; 561 return ret;
556 562
@@ -575,15 +581,12 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
575 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 581 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
576 struct iwl_queue *q = &txq->q; 582 struct iwl_queue *q = &txq->q;
577 583
578 if (!q->n_bd)
579 return;
580
581 spin_lock_bh(&txq->lock); 584 spin_lock_bh(&txq->lock);
582 while (q->write_ptr != q->read_ptr) { 585 while (q->write_ptr != q->read_ptr) {
583 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 586 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
584 txq_id, q->read_ptr); 587 txq_id, q->read_ptr);
585 iwl_pcie_txq_free_tfd(trans, txq); 588 iwl_pcie_txq_free_tfd(trans, txq);
586 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 589 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
587 } 590 }
588 txq->active = false; 591 txq->active = false;
589 spin_unlock_bh(&txq->lock); 592 spin_unlock_bh(&txq->lock);
@@ -620,10 +623,12 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
620 } 623 }
621 624
622 /* De-alloc circular buffer of TFDs */ 625 /* De-alloc circular buffer of TFDs */
623 if (txq->q.n_bd) { 626 if (txq->tfds) {
624 dma_free_coherent(dev, sizeof(struct iwl_tfd) * 627 dma_free_coherent(dev,
625 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 628 sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
629 txq->tfds, txq->q.dma_addr);
626 txq->q.dma_addr = 0; 630 txq->q.dma_addr = 0;
631 txq->tfds = NULL;
627 632
628 dma_free_coherent(dev, 633 dma_free_coherent(dev,
629 sizeof(*txq->scratchbufs) * txq->q.n_window, 634 sizeof(*txq->scratchbufs) * txq->q.n_window,
@@ -680,7 +685,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
680 /* The chain extension of the SCD doesn't work well. This feature is 685 /* The chain extension of the SCD doesn't work well. This feature is
681 * enabled by default by the HW, so we need to disable it manually. 686 * enabled by default by the HW, so we need to disable it manually.
682 */ 687 */
683 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 688 if (trans->cfg->base_params->scd_chain_ext_wa)
689 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
684 690
685 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, 691 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
686 trans_pcie->cmd_fifo); 692 trans_pcie->cmd_fifo);
@@ -931,8 +937,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
931{ 937{
932 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 938 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
933 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 939 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
934 /* n_bd is usually 256 => n_bd - 1 = 0xff */ 940 int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
935 int tfd_num = ssn & (txq->q.n_bd - 1);
936 struct iwl_queue *q = &txq->q; 941 struct iwl_queue *q = &txq->q;
937 int last_to_free; 942 int last_to_free;
938 943
@@ -956,12 +961,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
956 961
957 /*Since we free until index _not_ inclusive, the one before index is 962 /*Since we free until index _not_ inclusive, the one before index is
958 * the last we will free. This one must be used */ 963 * the last we will free. This one must be used */
959 last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd); 964 last_to_free = iwl_queue_dec_wrap(tfd_num);
960 965
961 if (!iwl_queue_used(q, last_to_free)) { 966 if (!iwl_queue_used(q, last_to_free)) {
962 IWL_ERR(trans, 967 IWL_ERR(trans,
963 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 968 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
964 __func__, txq_id, last_to_free, q->n_bd, 969 __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
965 q->write_ptr, q->read_ptr); 970 q->write_ptr, q->read_ptr);
966 goto out; 971 goto out;
967 } 972 }
@@ -971,7 +976,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
971 976
972 for (; 977 for (;
973 q->read_ptr != tfd_num; 978 q->read_ptr != tfd_num;
974 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 979 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
975 980
976 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) 981 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
977 continue; 982 continue;
@@ -1010,25 +1015,26 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1010 1015
1011 lockdep_assert_held(&txq->lock); 1016 lockdep_assert_held(&txq->lock);
1012 1017
1013 if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) { 1018 if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
1014 IWL_ERR(trans, 1019 IWL_ERR(trans,
1015 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 1020 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1016 __func__, txq_id, idx, q->n_bd, 1021 __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
1017 q->write_ptr, q->read_ptr); 1022 q->write_ptr, q->read_ptr);
1018 return; 1023 return;
1019 } 1024 }
1020 1025
1021 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; 1026 for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
1022 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1027 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
1023 1028
1024 if (nfreed++ > 0) { 1029 if (nfreed++ > 0) {
1025 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1030 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1026 idx, q->write_ptr, q->read_ptr); 1031 idx, q->write_ptr, q->read_ptr);
1027 iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1); 1032 iwl_force_nmi(trans);
1028 } 1033 }
1029 } 1034 }
1030 1035
1031 if (q->read_ptr == q->write_ptr) { 1036 if (trans->cfg->base_params->apmg_wake_up_wa &&
1037 q->read_ptr == q->write_ptr) {
1032 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1038 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1033 WARN_ON(!trans_pcie->cmd_in_flight); 1039 WARN_ON(!trans_pcie->cmd_in_flight);
1034 trans_pcie->cmd_in_flight = false; 1040 trans_pcie->cmd_in_flight = false;
@@ -1309,28 +1315,39 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1309 cmd_pos = offsetof(struct iwl_device_cmd, payload); 1315 cmd_pos = offsetof(struct iwl_device_cmd, payload);
1310 copy_size = sizeof(out_cmd->hdr); 1316 copy_size = sizeof(out_cmd->hdr);
1311 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1317 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1312 int copy = 0; 1318 int copy;
1313 1319
1314 if (!cmd->len[i]) 1320 if (!cmd->len[i])
1315 continue; 1321 continue;
1316 1322
1317 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1318 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1319 copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1320
1321 if (copy > cmd->len[i])
1322 copy = cmd->len[i];
1323 }
1324
1325 /* copy everything if not nocopy/dup */ 1323 /* copy everything if not nocopy/dup */
1326 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1324 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1327 IWL_HCMD_DFL_DUP))) 1325 IWL_HCMD_DFL_DUP))) {
1328 copy = cmd->len[i]; 1326 copy = cmd->len[i];
1329 1327
1330 if (copy) {
1331 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1328 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1332 cmd_pos += copy; 1329 cmd_pos += copy;
1333 copy_size += copy; 1330 copy_size += copy;
1331 continue;
1332 }
1333
1334 /*
1335 * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
1336 * in total (for the scratchbuf handling), but copy up to what
1337 * we can fit into the payload for debug dump purposes.
1338 */
1339 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1340
1341 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1342 cmd_pos += copy;
1343
1344 /* However, treat copy_size the proper way, we need it below */
1345 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1346 copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1347
1348 if (copy > cmd->len[i])
1349 copy = cmd->len[i];
1350 copy_size += copy;
1334 } 1351 }
1335 } 1352 }
1336 1353
@@ -1345,7 +1362,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1345 memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); 1362 memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
1346 iwl_pcie_txq_build_tfd(trans, txq, 1363 iwl_pcie_txq_build_tfd(trans, txq,
1347 iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), 1364 iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
1348 scratch_size, 1); 1365 scratch_size, true);
1349 1366
1350 /* map first command fragment, if any remains */ 1367 /* map first command fragment, if any remains */
1351 if (copy_size > scratch_size) { 1368 if (copy_size > scratch_size) {
@@ -1361,7 +1378,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1361 } 1378 }
1362 1379
1363 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1380 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1364 copy_size - scratch_size, 0); 1381 copy_size - scratch_size, false);
1365 } 1382 }
1366 1383
1367 /* map the remaining (adjusted) nocopy/dup fragments */ 1384 /* map the remaining (adjusted) nocopy/dup fragments */
@@ -1384,7 +1401,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1384 goto out; 1401 goto out;
1385 } 1402 }
1386 1403
1387 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0); 1404 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1388 } 1405 }
1389 1406
1390 out_meta->flags = cmd->flags; 1407 out_meta->flags = cmd->flags;
@@ -1392,8 +1409,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1392 kfree(txq->entries[idx].free_buf); 1409 kfree(txq->entries[idx].free_buf);
1393 txq->entries[idx].free_buf = dup_buf; 1410 txq->entries[idx].free_buf = dup_buf;
1394 1411
1395 txq->need_update = 1;
1396
1397 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr); 1412 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
1398 1413
1399 /* start timer if queue currently empty */ 1414 /* start timer if queue currently empty */
@@ -1405,9 +1420,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1405 /* 1420 /*
1406 * wake up the NIC to make sure that the firmware will see the host 1421 * wake up the NIC to make sure that the firmware will see the host
1407 * command - we will let the NIC sleep once all the host commands 1422 * command - we will let the NIC sleep once all the host commands
1408 * returned. 1423 * returned. This needs to be done only on NICs that have
1424 * apmg_wake_up_wa set.
1409 */ 1425 */
1410 if (!trans_pcie->cmd_in_flight) { 1426 if (trans->cfg->base_params->apmg_wake_up_wa &&
1427 !trans_pcie->cmd_in_flight) {
1411 trans_pcie->cmd_in_flight = true; 1428 trans_pcie->cmd_in_flight = true;
1412 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1429 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1413 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1430 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -1427,7 +1444,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1427 } 1444 }
1428 1445
1429 /* Increment and update queue's write index */ 1446 /* Increment and update queue's write index */
1430 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1447 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
1431 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1448 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1432 1449
1433 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1450 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1583,7 +1600,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1583 get_cmd_string(trans_pcie, cmd->id)); 1600 get_cmd_string(trans_pcie, cmd->id));
1584 ret = -ETIMEDOUT; 1601 ret = -ETIMEDOUT;
1585 1602
1586 iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1); 1603 iwl_force_nmi(trans);
1587 iwl_trans_fw_error(trans); 1604 iwl_trans_fw_error(trans);
1588 1605
1589 goto cancel; 1606 goto cancel;
@@ -1661,7 +1678,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1661 dma_addr_t tb0_phys, tb1_phys, scratch_phys; 1678 dma_addr_t tb0_phys, tb1_phys, scratch_phys;
1662 void *tb1_addr; 1679 void *tb1_addr;
1663 u16 len, tb1_len, tb2_len; 1680 u16 len, tb1_len, tb2_len;
1664 u8 wait_write_ptr = 0; 1681 bool wait_write_ptr;
1665 __le16 fc = hdr->frame_control; 1682 __le16 fc = hdr->frame_control;
1666 u8 hdr_len = ieee80211_hdrlen(fc); 1683 u8 hdr_len = ieee80211_hdrlen(fc);
1667 u16 wifi_seq; 1684 u16 wifi_seq;
@@ -1722,7 +1739,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1722 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, 1739 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
1723 IWL_HCMD_SCRATCHBUF_SIZE); 1740 IWL_HCMD_SCRATCHBUF_SIZE);
1724 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 1741 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
1725 IWL_HCMD_SCRATCHBUF_SIZE, 1); 1742 IWL_HCMD_SCRATCHBUF_SIZE, true);
1726 1743
1727 /* there must be data left over for TB1 or this code must be changed */ 1744 /* there must be data left over for TB1 or this code must be changed */
1728 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE); 1745 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
@@ -1732,7 +1749,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1732 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 1749 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
1733 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 1750 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
1734 goto out_err; 1751 goto out_err;
1735 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0); 1752 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
1736 1753
1737 /* 1754 /*
1738 * Set up TFD's third entry to point directly to remainder 1755 * Set up TFD's third entry to point directly to remainder
@@ -1748,7 +1765,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1748 &txq->tfds[q->write_ptr]); 1765 &txq->tfds[q->write_ptr]);
1749 goto out_err; 1766 goto out_err;
1750 } 1767 }
1751 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0); 1768 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
1752 } 1769 }
1753 1770
1754 /* Set up entry for this TFD in Tx byte-count array */ 1771 /* Set up entry for this TFD in Tx byte-count array */
@@ -1762,12 +1779,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1762 trace_iwlwifi_dev_tx_data(trans->dev, skb, 1779 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1763 skb->data + hdr_len, tb2_len); 1780 skb->data + hdr_len, tb2_len);
1764 1781
1765 if (!ieee80211_has_morefrags(fc)) { 1782 wait_write_ptr = ieee80211_has_morefrags(fc);
1766 txq->need_update = 1;
1767 } else {
1768 wait_write_ptr = 1;
1769 txq->need_update = 0;
1770 }
1771 1783
1772 /* start timer if queue currently empty */ 1784 /* start timer if queue currently empty */
1773 if (txq->need_update && q->read_ptr == q->write_ptr && 1785 if (txq->need_update && q->read_ptr == q->write_ptr &&
@@ -1775,22 +1787,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1775 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); 1787 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1776 1788
1777 /* Tell device the write index *just past* this latest filled TFD */ 1789 /* Tell device the write index *just past* this latest filled TFD */
1778 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1790 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
1779 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1791 if (!wait_write_ptr)
1792 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1780 1793
1781 /* 1794 /*
1782 * At this point the frame is "transmitted" successfully 1795 * At this point the frame is "transmitted" successfully
1783 * and we will get a TX status notification eventually, 1796 * and we will get a TX status notification eventually.
1784 * regardless of the value of ret. "ret" only indicates
1785 * whether or not we should update the write pointer.
1786 */ 1797 */
1787 if (iwl_queue_space(q) < q->high_mark) { 1798 if (iwl_queue_space(q) < q->high_mark) {
1788 if (wait_write_ptr) { 1799 if (wait_write_ptr)
1789 txq->need_update = 1;
1790 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1800 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1791 } else { 1801 else
1792 iwl_stop_queue(trans, txq); 1802 iwl_stop_queue(trans, txq);
1793 }
1794 } 1803 }
1795 spin_unlock(&txq->lock); 1804 spin_unlock(&txq->lock);
1796 return 0; 1805 return 0;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 54e344aed6e0..47a998d8f99e 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1006,9 +1006,8 @@ struct cmd_key_material {
1006} __packed; 1006} __packed;
1007 1007
1008static int lbs_set_key_material(struct lbs_private *priv, 1008static int lbs_set_key_material(struct lbs_private *priv,
1009 int key_type, 1009 int key_type, int key_info,
1010 int key_info, 1010 const u8 *key, u16 key_len)
1011 u8 *key, u16 key_len)
1012{ 1011{
1013 struct cmd_key_material cmd; 1012 struct cmd_key_material cmd;
1014 int ret; 1013 int ret;
@@ -1610,7 +1609,7 @@ static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev,
1610 */ 1609 */
1611 1610
1612static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev, 1611static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
1613 u8 *mac, struct station_info *sinfo) 1612 const u8 *mac, struct station_info *sinfo)
1614{ 1613{
1615 struct lbs_private *priv = wiphy_priv(wiphy); 1614 struct lbs_private *priv = wiphy_priv(wiphy);
1616 s8 signal, noise; 1615 s8 signal, noise;
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index ab966f08024a..407784aca627 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -90,7 +90,8 @@ do { if ((lbs_debug & (grp)) == (grp)) \
90#define lbs_deb_cfg80211(fmt, args...) LBS_DEB_LL(LBS_DEB_CFG80211, " cfg80211", fmt, ##args) 90#define lbs_deb_cfg80211(fmt, args...) LBS_DEB_LL(LBS_DEB_CFG80211, " cfg80211", fmt, ##args)
91 91
92#ifdef DEBUG 92#ifdef DEBUG
93static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len) 93static inline void lbs_deb_hex(unsigned int grp, const char *prompt,
94 const u8 *buf, int len)
94{ 95{
95 int i = 0; 96 int i = 0;
96 97
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index c7366b07b568..e446fed7b345 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -71,8 +71,10 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
71 71
72 skb->ip_summed = CHECKSUM_NONE; 72 skb->ip_summed = CHECKSUM_NONE;
73 73
74 if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) 74 if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
75 return process_rxed_802_11_packet(priv, skb); 75 ret = process_rxed_802_11_packet(priv, skb);
76 goto done;
77 }
76 78
77 p_rx_pd = (struct rxpd *) skb->data; 79 p_rx_pd = (struct rxpd *) skb->data;
78 p_rx_pkt = (struct rxpackethdr *) ((u8 *)p_rx_pd + 80 p_rx_pkt = (struct rxpackethdr *) ((u8 *)p_rx_pd +
@@ -86,7 +88,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
86 if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) { 88 if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) {
87 lbs_deb_rx("rx err: frame received with bad length\n"); 89 lbs_deb_rx("rx err: frame received with bad length\n");
88 dev->stats.rx_length_errors++; 90 dev->stats.rx_length_errors++;
89 ret = 0; 91 ret = -EINVAL;
90 dev_kfree_skb(skb); 92 dev_kfree_skb(skb);
91 goto done; 93 goto done;
92 } 94 }
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9d7a52f5a410..a312c653d116 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1676,7 +1676,9 @@ static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
1676 return 0; 1676 return 0;
1677} 1677}
1678 1678
1679static void mac80211_hwsim_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 1679static void mac80211_hwsim_flush(struct ieee80211_hw *hw,
1680 struct ieee80211_vif *vif,
1681 u32 queues, bool drop)
1680{ 1682{
1681 /* Not implemented, queues only on kernel side */ 1683 /* Not implemented, queues only on kernel side */
1682} 1684}
@@ -2056,6 +2058,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
2056 WIPHY_FLAG_AP_UAPSD | 2058 WIPHY_FLAG_AP_UAPSD |
2057 WIPHY_FLAG_HAS_CHANNEL_SWITCH; 2059 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
2058 hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; 2060 hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
2061 hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
2059 2062
2060 /* ask mac80211 to reserve space for magic */ 2063 /* ask mac80211 to reserve space for magic */
2061 hw->vif_data_size = sizeof(struct hwsim_vif_priv); 2064 hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index c92f27aa71ed..706831df1fa2 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -212,8 +212,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
212 sizeof(struct mwifiex_ie_types_header)); 212 sizeof(struct mwifiex_ie_types_header));
213 memcpy((u8 *)vht_op + 213 memcpy((u8 *)vht_op +
214 sizeof(struct mwifiex_ie_types_header), 214 sizeof(struct mwifiex_ie_types_header),
215 (u8 *)bss_desc->bcn_vht_oper + 215 (u8 *)bss_desc->bcn_vht_oper,
216 sizeof(struct ieee_types_header),
217 le16_to_cpu(vht_op->header.len)); 216 le16_to_cpu(vht_op->header.len));
218 217
219 /* negotiate the channel width and central freq 218 /* negotiate the channel width and central freq
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index d14ead8beca8..e1c2f67ae85e 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -345,8 +345,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
345 345
346 memcpy((u8 *) ht_info + 346 memcpy((u8 *) ht_info +
347 sizeof(struct mwifiex_ie_types_header), 347 sizeof(struct mwifiex_ie_types_header),
348 (u8 *) bss_desc->bcn_ht_oper + 348 (u8 *)bss_desc->bcn_ht_oper,
349 sizeof(struct ieee_types_header),
350 le16_to_cpu(ht_info->header.len)); 349 le16_to_cpu(ht_info->header.len));
351 350
352 if (!(sband->ht_cap.cap & 351 if (!(sband->ht_cap.cap &
@@ -750,3 +749,45 @@ void mwifiex_set_ba_params(struct mwifiex_private *priv)
750 749
751 return; 750 return;
752} 751}
752
753u8 mwifiex_get_sec_chan_offset(int chan)
754{
755 u8 sec_offset;
756
757 switch (chan) {
758 case 36:
759 case 44:
760 case 52:
761 case 60:
762 case 100:
763 case 108:
764 case 116:
765 case 124:
766 case 132:
767 case 140:
768 case 149:
769 case 157:
770 sec_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
771 break;
772 case 40:
773 case 48:
774 case 56:
775 case 64:
776 case 104:
777 case 112:
778 case 120:
779 case 128:
780 case 136:
781 case 144:
782 case 153:
783 case 161:
784 sec_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
785 break;
786 case 165:
787 default:
788 sec_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
789 break;
790 }
791
792 return sec_offset;
793}
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 40b007a00f4b..0b73fa08f5d4 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -63,6 +63,7 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
63 int cmd_action, 63 int cmd_action,
64 struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl); 64 struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
65void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra); 65void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
66u8 mwifiex_get_sec_chan_offset(int chan);
66 67
67static inline u8 68static inline u8
68mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv, 69mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
@@ -199,7 +200,7 @@ static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
199} 200}
200 201
201static inline u8 202static inline u8
202mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, u8 *ra) 203mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, const u8 *ra)
203{ 204{
204 struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ra); 205 struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ra);
205 if (node) 206 if (node)
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 63211707f939..5b32106182f8 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -100,6 +100,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
100 struct sk_buff *skb) 100 struct sk_buff *skb)
101{ 101{
102 struct txpd *local_tx_pd; 102 struct txpd *local_tx_pd;
103 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
103 104
104 skb_push(skb, sizeof(*local_tx_pd)); 105 skb_push(skb, sizeof(*local_tx_pd));
105 106
@@ -118,6 +119,9 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
118 local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len - 119 local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
119 sizeof(*local_tx_pd)); 120 sizeof(*local_tx_pd));
120 121
122 if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
123 local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
124
121 if (local_tx_pd->tx_control == 0) 125 if (local_tx_pd->tx_control == 0)
122 /* TxCtrl set by user or default */ 126 /* TxCtrl set by user or default */
123 local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); 127 local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
@@ -160,6 +164,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
160 int pad = 0, ret; 164 int pad = 0, ret;
161 struct mwifiex_tx_param tx_param; 165 struct mwifiex_tx_param tx_param;
162 struct txpd *ptx_pd = NULL; 166 struct txpd *ptx_pd = NULL;
167 struct timeval tv;
163 int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN; 168 int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
164 169
165 skb_src = skb_peek(&pra_list->skb_head); 170 skb_src = skb_peek(&pra_list->skb_head);
@@ -182,8 +187,14 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
182 187
183 tx_info_aggr->bss_type = tx_info_src->bss_type; 188 tx_info_aggr->bss_type = tx_info_src->bss_type;
184 tx_info_aggr->bss_num = tx_info_src->bss_num; 189 tx_info_aggr->bss_num = tx_info_src->bss_num;
190
191 if (tx_info_src->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
192 tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
185 skb_aggr->priority = skb_src->priority; 193 skb_aggr->priority = skb_src->priority;
186 194
195 do_gettimeofday(&tv);
196 skb_aggr->tstamp = timeval_to_ktime(tv);
197
187 do { 198 do {
188 /* Check if AMSDU can accommodate this MSDU */ 199 /* Check if AMSDU can accommodate this MSDU */
189 if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN)) 200 if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
@@ -236,18 +247,11 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
236 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA, 247 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
237 skb_aggr, NULL); 248 skb_aggr, NULL);
238 } else { 249 } else {
239 /* 250 if (skb_src)
240 * Padding per MSDU will affect the length of next 251 tx_param.next_pkt_len =
241 * packet and hence the exact length of next packet 252 skb_src->len + sizeof(struct txpd);
242 * is uncertain here. 253 else
243 * 254 tx_param.next_pkt_len = 0;
244 * Also, aggregation of transmission buffer, while
245 * downloading the data to the card, wont gain much
246 * on the AMSDU packets as the AMSDU packets utilizes
247 * the transmission buffer space to the maximum
248 * (adapter->tx_buf_size).
249 */
250 tx_param.next_pkt_len = 0;
251 255
252 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, 256 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
253 skb_aggr, &tx_param); 257 skb_aggr, &tx_param);
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
index b9242c3dca43..3b55ce5690a5 100644
--- a/drivers/net/wireless/mwifiex/README
+++ b/drivers/net/wireless/mwifiex/README
@@ -200,4 +200,11 @@ getlog
200 200
201 cat getlog 201 cat getlog
202 202
203fw_dump
204 This command is used to dump firmware memory into files.
205 Separate file will be created for each memory segment.
206 Usage:
207
208 cat fw_dump
209
203=============================================================================== 210===============================================================================
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 21ee27ab7b74..e95dec91a561 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -994,7 +994,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
994 */ 994 */
995static int 995static int
996mwifiex_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, 996mwifiex_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
997 u8 *mac, struct station_info *sinfo) 997 const u8 *mac, struct station_info *sinfo)
998{ 998{
999 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 999 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1000 1000
@@ -1270,7 +1270,7 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
1270 */ 1270 */
1271static int 1271static int
1272mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, 1272mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
1273 u8 *mac) 1273 const u8 *mac)
1274{ 1274{
1275 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1275 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1276 struct mwifiex_sta_node *sta_node; 1276 struct mwifiex_sta_node *sta_node;
@@ -2629,7 +2629,7 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
2629 */ 2629 */
2630static int 2630static int
2631mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, 2631mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
2632 u8 *peer, u8 action_code, u8 dialog_token, 2632 const u8 *peer, u8 action_code, u8 dialog_token,
2633 u16 status_code, u32 peer_capability, 2633 u16 status_code, u32 peer_capability,
2634 const u8 *extra_ies, size_t extra_ies_len) 2634 const u8 *extra_ies, size_t extra_ies_len)
2635{ 2635{
@@ -2701,7 +2701,7 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
2701 2701
2702static int 2702static int
2703mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, 2703mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
2704 u8 *peer, enum nl80211_tdls_operation action) 2704 const u8 *peer, enum nl80211_tdls_operation action)
2705{ 2705{
2706 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 2706 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
2707 2707
@@ -2748,9 +2748,8 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
2748} 2748}
2749 2749
2750static int 2750static int
2751mwifiex_cfg80211_add_station(struct wiphy *wiphy, 2751mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
2752 struct net_device *dev, 2752 const u8 *mac, struct station_parameters *params)
2753 u8 *mac, struct station_parameters *params)
2754{ 2753{
2755 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 2754 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
2756 2755
@@ -2765,9 +2764,9 @@ mwifiex_cfg80211_add_station(struct wiphy *wiphy,
2765} 2764}
2766 2765
2767static int 2766static int
2768mwifiex_cfg80211_change_station(struct wiphy *wiphy, 2767mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev,
2769 struct net_device *dev, 2768 const u8 *mac,
2770 u8 *mac, struct station_parameters *params) 2769 struct station_parameters *params)
2771{ 2770{
2772 int ret; 2771 int ret;
2773 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 2772 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 1062c918a7bf..8dee6c86f4f1 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -955,8 +955,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
955 adapter->cmd_wait_q.status = -ETIMEDOUT; 955 adapter->cmd_wait_q.status = -ETIMEDOUT;
956 wake_up_interruptible(&adapter->cmd_wait_q.wait); 956 wake_up_interruptible(&adapter->cmd_wait_q.wait);
957 mwifiex_cancel_pending_ioctl(adapter); 957 mwifiex_cancel_pending_ioctl(adapter);
958 /* reset cmd_sent flag to unblock new commands */
959 adapter->cmd_sent = false;
960 } 958 }
961 } 959 }
962 if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) 960 if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index b8a49aad12fd..7b419bbcd544 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -257,6 +257,29 @@ free_and_exit:
257} 257}
258 258
259/* 259/*
260 * Proc firmware dump read handler.
261 *
262 * This function is called when the 'fw_dump' file is opened for
263 * reading.
264 * This function dumps firmware memory in different files
265 * (ex. DTCM, ITCM, SQRAM etc.) based on the the segments for
266 * debugging.
267 */
268static ssize_t
269mwifiex_fw_dump_read(struct file *file, char __user *ubuf,
270 size_t count, loff_t *ppos)
271{
272 struct mwifiex_private *priv = file->private_data;
273
274 if (!priv->adapter->if_ops.fw_dump)
275 return -EIO;
276
277 priv->adapter->if_ops.fw_dump(priv->adapter);
278
279 return 0;
280}
281
282/*
260 * Proc getlog file read handler. 283 * Proc getlog file read handler.
261 * 284 *
262 * This function is called when the 'getlog' file is opened for reading 285 * This function is called when the 'getlog' file is opened for reading
@@ -699,6 +722,7 @@ static const struct file_operations mwifiex_dfs_##name##_fops = { \
699MWIFIEX_DFS_FILE_READ_OPS(info); 722MWIFIEX_DFS_FILE_READ_OPS(info);
700MWIFIEX_DFS_FILE_READ_OPS(debug); 723MWIFIEX_DFS_FILE_READ_OPS(debug);
701MWIFIEX_DFS_FILE_READ_OPS(getlog); 724MWIFIEX_DFS_FILE_READ_OPS(getlog);
725MWIFIEX_DFS_FILE_READ_OPS(fw_dump);
702MWIFIEX_DFS_FILE_OPS(regrdwr); 726MWIFIEX_DFS_FILE_OPS(regrdwr);
703MWIFIEX_DFS_FILE_OPS(rdeeprom); 727MWIFIEX_DFS_FILE_OPS(rdeeprom);
704 728
@@ -722,6 +746,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
722 MWIFIEX_DFS_ADD_FILE(getlog); 746 MWIFIEX_DFS_ADD_FILE(getlog);
723 MWIFIEX_DFS_ADD_FILE(regrdwr); 747 MWIFIEX_DFS_ADD_FILE(regrdwr);
724 MWIFIEX_DFS_ADD_FILE(rdeeprom); 748 MWIFIEX_DFS_ADD_FILE(rdeeprom);
749 MWIFIEX_DFS_ADD_FILE(fw_dump);
725} 750}
726 751
727/* 752/*
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index e7b3e16e5d34..38da6ff6f416 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -42,12 +42,12 @@
42#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2 42#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
43#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16 43#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
44 44
45#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE 16 45#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE 64
46#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE 32 46#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE 64
47#define MWIFIEX_UAP_AMPDU_DEF_TXWINSIZE 32 47#define MWIFIEX_UAP_AMPDU_DEF_TXWINSIZE 32
48#define MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE 16 48#define MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE 16
49#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE 32 49#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE 64
50#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE 48 50#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE 64
51#define MWIFIEX_11AC_UAP_AMPDU_DEF_TXWINSIZE 48 51#define MWIFIEX_11AC_UAP_AMPDU_DEF_TXWINSIZE 48
52#define MWIFIEX_11AC_UAP_AMPDU_DEF_RXWINSIZE 32 52#define MWIFIEX_11AC_UAP_AMPDU_DEF_RXWINSIZE 32
53 53
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index b485dc1ae5eb..3175dd04834b 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -169,6 +169,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
169#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146) 169#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
170#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154) 170#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
171#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156) 171#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
172#define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194)
172#define TLV_TYPE_FW_API_REV (PROPRIETARY_TLV_BASE_ID + 199) 173#define TLV_TYPE_FW_API_REV (PROPRIETARY_TLV_BASE_ID + 199)
173 174
174#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048 175#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -229,6 +230,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
229#define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8)) 230#define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8))
230#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22)) 231#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
231#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30)) 232#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30))
233#define ISALLOWED_CHANWIDTH40(ht_param) (ht_param & BIT(2))
232 234
233/* httxcfg bitmap 235/* httxcfg bitmap
234 * 0 reserved 236 * 0 reserved
@@ -403,7 +405,7 @@ enum P2P_MODES {
403#define HS_CFG_CANCEL 0xffffffff 405#define HS_CFG_CANCEL 0xffffffff
404#define HS_CFG_COND_DEF 0x00000000 406#define HS_CFG_COND_DEF 0x00000000
405#define HS_CFG_GPIO_DEF 0xff 407#define HS_CFG_GPIO_DEF 0xff
406#define HS_CFG_GAP_DEF 0 408#define HS_CFG_GAP_DEF 0xff
407#define HS_CFG_COND_BROADCAST_DATA 0x00000001 409#define HS_CFG_COND_BROADCAST_DATA 0x00000001
408#define HS_CFG_COND_UNICAST_DATA 0x00000002 410#define HS_CFG_COND_UNICAST_DATA 0x00000002
409#define HS_CFG_COND_MAC_EVENT 0x00000004 411#define HS_CFG_COND_MAC_EVENT 0x00000004
@@ -487,6 +489,7 @@ enum P2P_MODES {
487#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c 489#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
488#define EVENT_HOSTWAKE_STAIE 0x0000004d 490#define EVENT_HOSTWAKE_STAIE 0x0000004d
489#define EVENT_CHANNEL_SWITCH_ANN 0x00000050 491#define EVENT_CHANNEL_SWITCH_ANN 0x00000050
492#define EVENT_TDLS_GENERIC_EVENT 0x00000052
490#define EVENT_EXT_SCAN_REPORT 0x00000058 493#define EVENT_EXT_SCAN_REPORT 0x00000058
491#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f 494#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
492 495
@@ -519,6 +522,7 @@ enum P2P_MODES {
519#define ACT_TDLS_DELETE 0x00 522#define ACT_TDLS_DELETE 0x00
520#define ACT_TDLS_CREATE 0x01 523#define ACT_TDLS_CREATE 0x01
521#define ACT_TDLS_CONFIG 0x02 524#define ACT_TDLS_CONFIG 0x02
525#define TDLS_EVENT_LINK_TEAR_DOWN 3
522 526
523#define MWIFIEX_FW_V15 15 527#define MWIFIEX_FW_V15 15
524 528
@@ -535,6 +539,7 @@ struct mwifiex_ie_types_data {
535#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01 539#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
536#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08 540#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
537#define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10 541#define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10
542#define MWIFIEX_RXPD_FLAGS_TDLS_PACKET 0x01
538 543
539struct txpd { 544struct txpd {
540 u8 bss_type; 545 u8 bss_type;
@@ -577,7 +582,7 @@ struct rxpd {
577 * [Bit 7] Reserved 582 * [Bit 7] Reserved
578 */ 583 */
579 u8 ht_info; 584 u8 ht_info;
580 u8 reserved; 585 u8 flags;
581} __packed; 586} __packed;
582 587
583struct uap_txpd { 588struct uap_txpd {
@@ -708,6 +713,13 @@ struct mwifiex_ie_types_vendor_param_set {
708 u8 ie[MWIFIEX_MAX_VSIE_LEN]; 713 u8 ie[MWIFIEX_MAX_VSIE_LEN];
709}; 714};
710 715
716#define MWIFIEX_TDLS_IDLE_TIMEOUT 60
717
718struct mwifiex_ie_types_tdls_idle_timeout {
719 struct mwifiex_ie_types_header header;
720 __le16 value;
721} __packed;
722
711struct mwifiex_ie_types_rsn_param_set { 723struct mwifiex_ie_types_rsn_param_set {
712 struct mwifiex_ie_types_header header; 724 struct mwifiex_ie_types_header header;
713 u8 rsn_ie[1]; 725 u8 rsn_ie[1];
@@ -1745,6 +1757,15 @@ struct host_cmd_ds_802_11_subsc_evt {
1745 __le16 events; 1757 __le16 events;
1746} __packed; 1758} __packed;
1747 1759
1760struct mwifiex_tdls_generic_event {
1761 __le16 type;
1762 u8 peer_mac[ETH_ALEN];
1763 union {
1764 __le16 reason_code;
1765 __le16 reserved;
1766 } u;
1767} __packed;
1768
1748struct mwifiex_ie { 1769struct mwifiex_ie {
1749 __le16 ie_index; 1770 __le16 ie_index;
1750 __le16 mgmt_subtype_mask; 1771 __le16 mgmt_subtype_mask;
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index ee494db54060..1b576722671d 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -303,7 +303,7 @@ struct mwifiex_ds_ant_cfg {
303 u32 rx_ant; 303 u32 rx_ant;
304}; 304};
305 305
306#define MWIFIEX_NUM_OF_CMD_BUFFER 20 306#define MWIFIEX_NUM_OF_CMD_BUFFER 50
307#define MWIFIEX_SIZE_OF_CMD_BUFFER 2048 307#define MWIFIEX_SIZE_OF_CMD_BUFFER 2048
308 308
309enum { 309enum {
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9c771b3e9918..cbabc12fbda3 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -521,7 +521,6 @@ done:
521 release_firmware(adapter->firmware); 521 release_firmware(adapter->firmware);
522 adapter->firmware = NULL; 522 adapter->firmware = NULL;
523 } 523 }
524 complete(&adapter->fw_load);
525 if (init_failed) 524 if (init_failed)
526 mwifiex_free_adapter(adapter); 525 mwifiex_free_adapter(adapter);
527 up(sem); 526 up(sem);
@@ -535,7 +534,6 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
535{ 534{
536 int ret; 535 int ret;
537 536
538 init_completion(&adapter->fw_load);
539 ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name, 537 ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
540 adapter->dev, GFP_KERNEL, adapter, 538 adapter->dev, GFP_KERNEL, adapter,
541 mwifiex_fw_dpc); 539 mwifiex_fw_dpc);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index d53e1e8c9467..1398afa84064 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -672,6 +672,7 @@ struct mwifiex_if_ops {
672 int (*init_fw_port) (struct mwifiex_adapter *); 672 int (*init_fw_port) (struct mwifiex_adapter *);
673 int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); 673 int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
674 void (*card_reset) (struct mwifiex_adapter *); 674 void (*card_reset) (struct mwifiex_adapter *);
675 void (*fw_dump)(struct mwifiex_adapter *);
675 int (*clean_pcie_ring) (struct mwifiex_adapter *adapter); 676 int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
676}; 677};
677 678
@@ -787,7 +788,6 @@ struct mwifiex_adapter {
787 struct mwifiex_wait_queue cmd_wait_q; 788 struct mwifiex_wait_queue cmd_wait_q;
788 u8 scan_wait_q_woken; 789 u8 scan_wait_q_woken;
789 spinlock_t queue_lock; /* lock for tx queues */ 790 spinlock_t queue_lock; /* lock for tx queues */
790 struct completion fw_load;
791 u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; 791 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
792 u16 max_mgmt_ie_index; 792 u16 max_mgmt_ie_index;
793 u8 scan_delay_cnt; 793 u8 scan_delay_cnt;
@@ -910,8 +910,6 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
910 struct sk_buff *skb); 910 struct sk_buff *skb);
911int mwifiex_process_sta_event(struct mwifiex_private *); 911int mwifiex_process_sta_event(struct mwifiex_private *);
912int mwifiex_process_uap_event(struct mwifiex_private *); 912int mwifiex_process_uap_event(struct mwifiex_private *);
913struct mwifiex_sta_node *
914mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
915void mwifiex_delete_all_station_list(struct mwifiex_private *priv); 913void mwifiex_delete_all_station_list(struct mwifiex_private *priv);
916void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb); 914void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
917void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb); 915void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
@@ -1101,7 +1099,7 @@ mwifiex_11h_get_csa_closed_channel(struct mwifiex_private *priv)
1101 return 0; 1099 return 0;
1102 1100
1103 /* Clear csa channel, if DFS channel move time has passed */ 1101 /* Clear csa channel, if DFS channel move time has passed */
1104 if (jiffies > priv->csa_expire_time) { 1102 if (time_after(jiffies, priv->csa_expire_time)) {
1105 priv->csa_chan = 0; 1103 priv->csa_chan = 0;
1106 priv->csa_expire_time = 0; 1104 priv->csa_expire_time = 0;
1107 } 1105 }
@@ -1220,26 +1218,26 @@ void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
1220extern const struct ethtool_ops mwifiex_ethtool_ops; 1218extern const struct ethtool_ops mwifiex_ethtool_ops;
1221 1219
1222void mwifiex_del_all_sta_list(struct mwifiex_private *priv); 1220void mwifiex_del_all_sta_list(struct mwifiex_private *priv);
1223void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac); 1221void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac);
1224void 1222void
1225mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies, 1223mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
1226 int ies_len, struct mwifiex_sta_node *node); 1224 int ies_len, struct mwifiex_sta_node *node);
1227struct mwifiex_sta_node * 1225struct mwifiex_sta_node *
1228mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac); 1226mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac);
1229struct mwifiex_sta_node * 1227struct mwifiex_sta_node *
1230mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac); 1228mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac);
1231int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, u8 *peer, 1229int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
1232 u8 action_code, u8 dialog_token, 1230 u8 action_code, u8 dialog_token,
1233 u16 status_code, const u8 *extra_ies, 1231 u16 status_code, const u8 *extra_ies,
1234 size_t extra_ies_len); 1232 size_t extra_ies_len);
1235int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, 1233int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
1236 u8 *peer, u8 action_code, u8 dialog_token, 1234 u8 action_code, u8 dialog_token,
1237 u16 status_code, const u8 *extra_ies, 1235 u16 status_code, const u8 *extra_ies,
1238 size_t extra_ies_len); 1236 size_t extra_ies_len);
1239void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, 1237void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
1240 u8 *buf, int len); 1238 u8 *buf, int len);
1241int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action); 1239int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action);
1242int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac); 1240int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac);
1243void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv); 1241void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv);
1244bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv); 1242bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv);
1245u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band, 1243u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index a7e8b96b2d90..574d4b597468 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -221,9 +221,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
221 if (!adapter || !adapter->priv_num) 221 if (!adapter || !adapter->priv_num)
222 return; 222 return;
223 223
224 /* In case driver is removed when asynchronous FW load is in progress */
225 wait_for_completion(&adapter->fw_load);
226
227 if (user_rmmod) { 224 if (user_rmmod) {
228#ifdef CONFIG_PM_SLEEP 225#ifdef CONFIG_PM_SLEEP
229 if (adapter->is_suspended) 226 if (adapter->is_suspended)
@@ -1074,6 +1071,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
1074 * is mapped to PCI device memory. Tx ring pointers are advanced accordingly. 1071 * is mapped to PCI device memory. Tx ring pointers are advanced accordingly.
1075 * Download ready interrupt to FW is deffered if Tx ring is not full and 1072 * Download ready interrupt to FW is deffered if Tx ring is not full and
1076 * additional payload can be accomodated. 1073 * additional payload can be accomodated.
1074 * Caller must ensure tx_param parameter to this function is not NULL.
1077 */ 1075 */
1078static int 1076static int
1079mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, 1077mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 7b3af3d29ded..45c5b3450cf5 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -29,9 +29,6 @@
29#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14 29#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14
30 30
31#define MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD 4 31#define MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD 4
32#define MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD 15
33#define MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD 27
34#define MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD 35
35 32
36/* Memory needed to store a max sized Channel List TLV for a firmware scan */ 33/* Memory needed to store a max sized Channel List TLV for a firmware scan */
37#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \ 34#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \
@@ -1055,20 +1052,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
1055 1052
1056 /* 1053 /*
1057 * In associated state we will reduce the number of channels scanned per 1054 * In associated state we will reduce the number of channels scanned per
1058 * scan command to avoid any traffic delay/loss. This number is decided 1055 * scan command to 1 to avoid any traffic delay/loss.
1059 * based on total number of channels to be scanned due to constraints
1060 * of command buffers.
1061 */ 1056 */
1062 if (priv->media_connected) { 1057 if (priv->media_connected)
1063 if (chan_num < MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD)
1064 *max_chan_per_scan = 1; 1058 *max_chan_per_scan = 1;
1065 else if (chan_num < MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD)
1066 *max_chan_per_scan = 2;
1067 else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
1068 *max_chan_per_scan = 3;
1069 else
1070 *max_chan_per_scan = 4;
1071 }
1072} 1059}
1073 1060
1074/* 1061/*
@@ -1353,23 +1340,17 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
1353 bss_entry->beacon_buf); 1340 bss_entry->beacon_buf);
1354 break; 1341 break;
1355 case WLAN_EID_BSS_COEX_2040: 1342 case WLAN_EID_BSS_COEX_2040:
1356 bss_entry->bcn_bss_co_2040 = current_ptr + 1343 bss_entry->bcn_bss_co_2040 = current_ptr;
1357 sizeof(struct ieee_types_header); 1344 bss_entry->bss_co_2040_offset =
1358 bss_entry->bss_co_2040_offset = (u16) (current_ptr + 1345 (u16) (current_ptr - bss_entry->beacon_buf);
1359 sizeof(struct ieee_types_header) -
1360 bss_entry->beacon_buf);
1361 break; 1346 break;
1362 case WLAN_EID_EXT_CAPABILITY: 1347 case WLAN_EID_EXT_CAPABILITY:
1363 bss_entry->bcn_ext_cap = current_ptr + 1348 bss_entry->bcn_ext_cap = current_ptr;
1364 sizeof(struct ieee_types_header); 1349 bss_entry->ext_cap_offset =
1365 bss_entry->ext_cap_offset = (u16) (current_ptr + 1350 (u16) (current_ptr - bss_entry->beacon_buf);
1366 sizeof(struct ieee_types_header) -
1367 bss_entry->beacon_buf);
1368 break; 1351 break;
1369 case WLAN_EID_OPMODE_NOTIF: 1352 case WLAN_EID_OPMODE_NOTIF:
1370 bss_entry->oper_mode = 1353 bss_entry->oper_mode = (void *)current_ptr;
1371 (void *)(current_ptr +
1372 sizeof(struct ieee_types_header));
1373 bss_entry->oper_mode_offset = 1354 bss_entry->oper_mode_offset =
1374 (u16)((u8 *)bss_entry->oper_mode - 1355 (u16)((u8 *)bss_entry->oper_mode -
1375 bss_entry->beacon_buf); 1356 bss_entry->beacon_buf);
@@ -1757,6 +1738,19 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
1757 return 0; 1738 return 0;
1758} 1739}
1759 1740
1741static void mwifiex_complete_scan(struct mwifiex_private *priv)
1742{
1743 struct mwifiex_adapter *adapter = priv->adapter;
1744
1745 if (adapter->curr_cmd->wait_q_enabled) {
1746 adapter->cmd_wait_q.status = 0;
1747 if (!priv->scan_request) {
1748 dev_dbg(adapter->dev, "complete internal scan\n");
1749 mwifiex_complete_cmd(adapter, adapter->curr_cmd);
1750 }
1751 }
1752}
1753
1760static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) 1754static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
1761{ 1755{
1762 struct mwifiex_adapter *adapter = priv->adapter; 1756 struct mwifiex_adapter *adapter = priv->adapter;
@@ -1770,16 +1764,9 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
1770 adapter->scan_processing = false; 1764 adapter->scan_processing = false;
1771 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); 1765 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
1772 1766
1773 /* Need to indicate IOCTL complete */ 1767 if (!adapter->ext_scan)
1774 if (adapter->curr_cmd->wait_q_enabled) { 1768 mwifiex_complete_scan(priv);
1775 adapter->cmd_wait_q.status = 0; 1769
1776 if (!priv->scan_request) {
1777 dev_dbg(adapter->dev,
1778 "complete internal scan\n");
1779 mwifiex_complete_cmd(adapter,
1780 adapter->curr_cmd);
1781 }
1782 }
1783 if (priv->report_scan_result) 1770 if (priv->report_scan_result)
1784 priv->report_scan_result = false; 1771 priv->report_scan_result = false;
1785 1772
@@ -1984,6 +1971,9 @@ int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
1984int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv) 1971int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv)
1985{ 1972{
1986 dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n"); 1973 dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
1974
1975 mwifiex_complete_scan(priv);
1976
1987 return 0; 1977 return 0;
1988} 1978}
1989 1979
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index d206f04d4994..4ce3d7b33991 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -85,6 +85,8 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
85 card->supports_sdio_new_mode = data->supports_sdio_new_mode; 85 card->supports_sdio_new_mode = data->supports_sdio_new_mode;
86 card->has_control_mask = data->has_control_mask; 86 card->has_control_mask = data->has_control_mask;
87 card->tx_buf_size = data->tx_buf_size; 87 card->tx_buf_size = data->tx_buf_size;
88 card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
89 card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
88 } 90 }
89 91
90 sdio_claim_host(func); 92 sdio_claim_host(func);
@@ -177,9 +179,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
177 if (!adapter || !adapter->priv_num) 179 if (!adapter || !adapter->priv_num)
178 return; 180 return;
179 181
180 /* In case driver is removed when asynchronous FW load is in progress */
181 wait_for_completion(&adapter->fw_load);
182
183 if (user_rmmod) { 182 if (user_rmmod) {
184 if (adapter->is_suspended) 183 if (adapter->is_suspended)
185 mwifiex_sdio_resume(adapter->dev); 184 mwifiex_sdio_resume(adapter->dev);
@@ -1679,8 +1678,12 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
1679 if (ret) { 1678 if (ret) {
1680 if (type == MWIFIEX_TYPE_CMD) 1679 if (type == MWIFIEX_TYPE_CMD)
1681 adapter->cmd_sent = false; 1680 adapter->cmd_sent = false;
1682 if (type == MWIFIEX_TYPE_DATA) 1681 if (type == MWIFIEX_TYPE_DATA) {
1683 adapter->data_sent = false; 1682 adapter->data_sent = false;
1683 /* restore curr_wr_port in error cases */
1684 card->curr_wr_port = port;
1685 card->mp_wr_bitmap |= (u32)(1 << card->curr_wr_port);
1686 }
1684 } else { 1687 } else {
1685 if (type == MWIFIEX_TYPE_DATA) { 1688 if (type == MWIFIEX_TYPE_DATA) {
1686 if (!(card->mp_wr_bitmap & (1 << card->curr_wr_port))) 1689 if (!(card->mp_wr_bitmap & (1 << card->curr_wr_port)))
@@ -1842,8 +1845,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
1842 card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) * 1845 card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) *
1843 card->mp_agg_pkt_limit, GFP_KERNEL); 1846 card->mp_agg_pkt_limit, GFP_KERNEL);
1844 ret = mwifiex_alloc_sdio_mpa_buffers(adapter, 1847 ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
1845 SDIO_MP_TX_AGGR_DEF_BUF_SIZE, 1848 card->mp_tx_agg_buf_size,
1846 SDIO_MP_RX_AGGR_DEF_BUF_SIZE); 1849 card->mp_rx_agg_buf_size);
1847 if (ret) { 1850 if (ret) {
1848 dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n"); 1851 dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n");
1849 kfree(card->mp_regs); 1852 kfree(card->mp_regs);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index c71201b2e2a3..6eea30b43ed7 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -64,10 +64,8 @@
64#define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U) 64#define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U)
65#define DN_LD_CMD_PORT_HOST_INT_STATUS (0x80U) 65#define DN_LD_CMD_PORT_HOST_INT_STATUS (0x80U)
66 66
67#define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (8192) /* 8K */ 67#define MWIFIEX_MP_AGGR_BUF_SIZE_16K (16384)
68 68#define MWIFIEX_MP_AGGR_BUF_SIZE_32K (32768)
69/* Multi port RX aggregation buffer size */
70#define SDIO_MP_RX_AGGR_DEF_BUF_SIZE (16384) /* 16K */
71 69
72/* Misc. Config Register : Auto Re-enable interrupts */ 70/* Misc. Config Register : Auto Re-enable interrupts */
73#define AUTO_RE_ENABLE_INT BIT(4) 71#define AUTO_RE_ENABLE_INT BIT(4)
@@ -234,6 +232,8 @@ struct sdio_mmc_card {
234 bool supports_sdio_new_mode; 232 bool supports_sdio_new_mode;
235 bool has_control_mask; 233 bool has_control_mask;
236 u16 tx_buf_size; 234 u16 tx_buf_size;
235 u32 mp_tx_agg_buf_size;
236 u32 mp_rx_agg_buf_size;
237 237
238 u32 mp_rd_bitmap; 238 u32 mp_rd_bitmap;
239 u32 mp_wr_bitmap; 239 u32 mp_wr_bitmap;
@@ -258,6 +258,8 @@ struct mwifiex_sdio_device {
258 bool supports_sdio_new_mode; 258 bool supports_sdio_new_mode;
259 bool has_control_mask; 259 bool has_control_mask;
260 u16 tx_buf_size; 260 u16 tx_buf_size;
261 u32 mp_tx_agg_buf_size;
262 u32 mp_rx_agg_buf_size;
261}; 263};
262 264
263static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = { 265static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -315,6 +317,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
315 .supports_sdio_new_mode = false, 317 .supports_sdio_new_mode = false,
316 .has_control_mask = true, 318 .has_control_mask = true,
317 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, 319 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
320 .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
321 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
318}; 322};
319 323
320static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = { 324static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -325,6 +329,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
325 .supports_sdio_new_mode = false, 329 .supports_sdio_new_mode = false,
326 .has_control_mask = true, 330 .has_control_mask = true,
327 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, 331 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
332 .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
333 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
328}; 334};
329 335
330static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = { 336static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -335,6 +341,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
335 .supports_sdio_new_mode = false, 341 .supports_sdio_new_mode = false,
336 .has_control_mask = true, 342 .has_control_mask = true,
337 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, 343 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
344 .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
345 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
338}; 346};
339 347
340static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { 348static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -345,6 +353,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
345 .supports_sdio_new_mode = true, 353 .supports_sdio_new_mode = true,
346 .has_control_mask = false, 354 .has_control_mask = false,
347 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, 355 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
356 .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
357 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
348}; 358};
349 359
350/* 360/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index e3cac1495cc7..88202ce0c139 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -1546,6 +1546,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
1546 struct mwifiex_ie_types_extcap *extcap; 1546 struct mwifiex_ie_types_extcap *extcap;
1547 struct mwifiex_ie_types_vhtcap *vht_capab; 1547 struct mwifiex_ie_types_vhtcap *vht_capab;
1548 struct mwifiex_ie_types_aid *aid; 1548 struct mwifiex_ie_types_aid *aid;
1549 struct mwifiex_ie_types_tdls_idle_timeout *timeout;
1549 u8 *pos, qos_info; 1550 u8 *pos, qos_info;
1550 u16 config_len = 0; 1551 u16 config_len = 0;
1551 struct station_parameters *params = priv->sta_params; 1552 struct station_parameters *params = priv->sta_params;
@@ -1643,6 +1644,12 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
1643 config_len += sizeof(struct mwifiex_ie_types_aid); 1644 config_len += sizeof(struct mwifiex_ie_types_aid);
1644 } 1645 }
1645 1646
1647 timeout = (void *)(pos + config_len);
1648 timeout->header.type = cpu_to_le16(TLV_TYPE_TDLS_IDLE_TIMEOUT);
1649 timeout->header.len = cpu_to_le16(sizeof(timeout->value));
1650 timeout->value = cpu_to_le16(MWIFIEX_TDLS_IDLE_TIMEOUT);
1651 config_len += sizeof(struct mwifiex_ie_types_tdls_idle_timeout);
1652
1646 break; 1653 break;
1647 default: 1654 default:
1648 dev_err(priv->adapter->dev, "Unknown TDLS operation\n"); 1655 dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index bfebb0144df5..577f2979ed8f 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -865,14 +865,20 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
865 865
866 switch (action) { 866 switch (action) {
867 case ACT_TDLS_DELETE: 867 case ACT_TDLS_DELETE:
868 if (reason) 868 if (reason) {
869 dev_err(priv->adapter->dev, 869 if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
870 "TDLS link delete for %pM failed: reason %d\n", 870 dev_dbg(priv->adapter->dev,
871 cmd_tdls_oper->peer_mac, reason); 871 "TDLS link delete for %pM failed: reason %d\n",
872 else 872 cmd_tdls_oper->peer_mac, reason);
873 else
874 dev_err(priv->adapter->dev,
875 "TDLS link delete for %pM failed: reason %d\n",
876 cmd_tdls_oper->peer_mac, reason);
877 } else {
873 dev_dbg(priv->adapter->dev, 878 dev_dbg(priv->adapter->dev,
874 "TDLS link config for %pM successful\n", 879 "TDLS link delete for %pM successful\n",
875 cmd_tdls_oper->peer_mac); 880 cmd_tdls_oper->peer_mac);
881 }
876 break; 882 break;
877 case ACT_TDLS_CREATE: 883 case ACT_TDLS_CREATE:
878 if (reason) { 884 if (reason) {
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 368450cc56c7..f6395ef11a72 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -134,6 +134,46 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
134 netif_carrier_off(priv->netdev); 134 netif_carrier_off(priv->netdev);
135} 135}
136 136
137static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
138 struct sk_buff *event_skb)
139{
140 int ret = 0;
141 struct mwifiex_adapter *adapter = priv->adapter;
142 struct mwifiex_sta_node *sta_ptr;
143 struct mwifiex_tdls_generic_event *tdls_evt =
144 (void *)event_skb->data + sizeof(adapter->event_cause);
145
146 /* reserved 2 bytes are not mandatory in tdls event */
147 if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
148 sizeof(u16) - sizeof(adapter->event_cause))) {
149 dev_err(adapter->dev, "Invalid event length!\n");
150 return -1;
151 }
152
153 sta_ptr = mwifiex_get_sta_entry(priv, tdls_evt->peer_mac);
154 if (!sta_ptr) {
155 dev_err(adapter->dev, "cannot get sta entry!\n");
156 return -1;
157 }
158
159 switch (le16_to_cpu(tdls_evt->type)) {
160 case TDLS_EVENT_LINK_TEAR_DOWN:
161 cfg80211_tdls_oper_request(priv->netdev,
162 tdls_evt->peer_mac,
163 NL80211_TDLS_TEARDOWN,
164 le16_to_cpu(tdls_evt->u.reason_code),
165 GFP_KERNEL);
166 ret = mwifiex_tdls_oper(priv, tdls_evt->peer_mac,
167 MWIFIEX_TDLS_DISABLE_LINK);
168 queue_work(adapter->workqueue, &adapter->main_work);
169 break;
170 default:
171 break;
172 }
173
174 return ret;
175}
176
137/* 177/*
138 * This function handles events generated by firmware. 178 * This function handles events generated by firmware.
139 * 179 *
@@ -459,6 +499,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
459 false); 499 false);
460 break; 500 break;
461 501
502 case EVENT_TDLS_GENERIC_EVENT:
503 ret = mwifiex_parse_tdls_event(priv, adapter->event_skb);
504 break;
505
462 default: 506 default:
463 dev_dbg(adapter->dev, "event: unknown event id: %#x\n", 507 dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
464 eventcause); 508 eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index ed26387eccf5..8b639d7fe6df 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -183,6 +183,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
183 struct rx_packet_hdr *rx_pkt_hdr; 183 struct rx_packet_hdr *rx_pkt_hdr;
184 u8 ta[ETH_ALEN]; 184 u8 ta[ETH_ALEN];
185 u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num; 185 u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
186 struct mwifiex_sta_node *sta_ptr;
186 187
187 local_rx_pd = (struct rxpd *) (skb->data); 188 local_rx_pd = (struct rxpd *) (skb->data);
188 rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type); 189 rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
@@ -213,14 +214,25 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
213 * If the packet is not an unicast packet then send the packet 214 * If the packet is not an unicast packet then send the packet
214 * directly to os. Don't pass thru rx reordering 215 * directly to os. Don't pass thru rx reordering
215 */ 216 */
216 if (!IS_11N_ENABLED(priv) || 217 if ((!IS_11N_ENABLED(priv) &&
218 !(ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
219 !(local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET))) ||
217 !ether_addr_equal_unaligned(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest)) { 220 !ether_addr_equal_unaligned(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest)) {
218 mwifiex_process_rx_packet(priv, skb); 221 mwifiex_process_rx_packet(priv, skb);
219 return ret; 222 return ret;
220 } 223 }
221 224
222 if (mwifiex_queuing_ra_based(priv)) { 225 if (mwifiex_queuing_ra_based(priv) ||
226 (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
227 local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET)) {
223 memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN); 228 memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
229 if (local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET &&
230 local_rx_pd->priority < MAX_NUM_TID) {
231 sta_ptr = mwifiex_get_sta_entry(priv, ta);
232 if (sta_ptr)
233 sta_ptr->rx_seq[local_rx_pd->priority] =
234 le16_to_cpu(local_rx_pd->seq_num);
235 }
224 } else { 236 } else {
225 if (rx_pkt_type != PKT_TYPE_BAR) 237 if (rx_pkt_type != PKT_TYPE_BAR)
226 priv->rx_seq[local_rx_pd->priority] = seq_num; 238 priv->rx_seq[local_rx_pd->priority] = seq_num;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 1236a5de7bca..5fce7e78a36e 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -128,6 +128,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
128{ 128{
129 struct mwifiex_adapter *adapter = priv->adapter; 129 struct mwifiex_adapter *adapter = priv->adapter;
130 struct txpd *local_tx_pd; 130 struct txpd *local_tx_pd;
131 struct mwifiex_tx_param tx_param;
131/* sizeof(struct txpd) + Interface specific header */ 132/* sizeof(struct txpd) + Interface specific header */
132#define NULL_PACKET_HDR 64 133#define NULL_PACKET_HDR 64
133 u32 data_len = NULL_PACKET_HDR; 134 u32 data_len = NULL_PACKET_HDR;
@@ -168,8 +169,9 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
168 skb, NULL); 169 skb, NULL);
169 } else { 170 } else {
170 skb_push(skb, INTF_HEADER_LEN); 171 skb_push(skb, INTF_HEADER_LEN);
172 tx_param.next_pkt_len = 0;
171 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, 173 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
172 skb, NULL); 174 skb, &tx_param);
173 } 175 }
174 switch (ret) { 176 switch (ret) {
175 case -EBUSY: 177 case -EBUSY:
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index 97662a1ba58c..e73034fbbde9 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -25,8 +25,8 @@
25#define TDLS_RESP_FIX_LEN 8 25#define TDLS_RESP_FIX_LEN 8
26#define TDLS_CONFIRM_FIX_LEN 6 26#define TDLS_CONFIRM_FIX_LEN 6
27 27
28static void 28static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
29mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status) 29 const u8 *mac, u8 status)
30{ 30{
31 struct mwifiex_ra_list_tbl *ra_list; 31 struct mwifiex_ra_list_tbl *ra_list;
32 struct list_head *tid_list; 32 struct list_head *tid_list;
@@ -84,7 +84,8 @@ mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status)
84 return; 84 return;
85} 85}
86 86
87static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv, u8 *mac) 87static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv,
88 const u8 *mac)
88{ 89{
89 struct mwifiex_ra_list_tbl *ra_list; 90 struct mwifiex_ra_list_tbl *ra_list;
90 struct list_head *ra_list_head; 91 struct list_head *ra_list_head;
@@ -185,8 +186,50 @@ static int mwifiex_tdls_add_vht_capab(struct mwifiex_private *priv,
185 return 0; 186 return 0;
186} 187}
187 188
189static int
190mwifiex_tdls_add_ht_oper(struct mwifiex_private *priv, const u8 *mac,
191 u8 vht_enabled, struct sk_buff *skb)
192{
193 struct ieee80211_ht_operation *ht_oper;
194 struct mwifiex_sta_node *sta_ptr;
195 struct mwifiex_bssdescriptor *bss_desc =
196 &priv->curr_bss_params.bss_descriptor;
197 u8 *pos;
198
199 sta_ptr = mwifiex_get_sta_entry(priv, mac);
200 if (unlikely(!sta_ptr)) {
201 dev_warn(priv->adapter->dev,
202 "TDLS peer station not found in list\n");
203 return -1;
204 }
205
206 pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_operation) + 2);
207 *pos++ = WLAN_EID_HT_OPERATION;
208 *pos++ = sizeof(struct ieee80211_ht_operation);
209 ht_oper = (void *)pos;
210
211 ht_oper->primary_chan = bss_desc->channel;
212
213 /* follow AP's channel bandwidth */
214 if (ISSUPP_CHANWIDTH40(priv->adapter->hw_dot_11n_dev_cap) &&
215 bss_desc->bcn_ht_cap &&
216 ISALLOWED_CHANWIDTH40(bss_desc->bcn_ht_oper->ht_param))
217 ht_oper->ht_param = bss_desc->bcn_ht_oper->ht_param;
218
219 if (vht_enabled) {
220 ht_oper->ht_param =
221 mwifiex_get_sec_chan_offset(bss_desc->channel);
222 ht_oper->ht_param |= BIT(2);
223 }
224
225 memcpy(&sta_ptr->tdls_cap.ht_oper, ht_oper,
226 sizeof(struct ieee80211_ht_operation));
227
228 return 0;
229}
230
188static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv, 231static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
189 u8 *mac, struct sk_buff *skb) 232 const u8 *mac, struct sk_buff *skb)
190{ 233{
191 struct mwifiex_bssdescriptor *bss_desc; 234 struct mwifiex_bssdescriptor *bss_desc;
192 struct ieee80211_vht_operation *vht_oper; 235 struct ieee80211_vht_operation *vht_oper;
@@ -325,8 +368,9 @@ static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
325} 368}
326 369
327static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv, 370static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
328 u8 *peer, u8 action_code, u8 dialog_token, 371 const u8 *peer, u8 action_code,
329 u16 status_code, struct sk_buff *skb) 372 u8 dialog_token,
373 u16 status_code, struct sk_buff *skb)
330{ 374{
331 struct ieee80211_tdls_data *tf; 375 struct ieee80211_tdls_data *tf;
332 int ret; 376 int ret;
@@ -428,6 +472,17 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
428 dev_kfree_skb_any(skb); 472 dev_kfree_skb_any(skb);
429 return ret; 473 return ret;
430 } 474 }
475 ret = mwifiex_tdls_add_ht_oper(priv, peer, 1, skb);
476 if (ret) {
477 dev_kfree_skb_any(skb);
478 return ret;
479 }
480 } else {
481 ret = mwifiex_tdls_add_ht_oper(priv, peer, 0, skb);
482 if (ret) {
483 dev_kfree_skb_any(skb);
484 return ret;
485 }
431 } 486 }
432 break; 487 break;
433 488
@@ -453,7 +508,8 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
453} 508}
454 509
455static void 510static void
456mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid) 511mwifiex_tdls_add_link_ie(struct sk_buff *skb, const u8 *src_addr,
512 const u8 *peer, const u8 *bssid)
457{ 513{
458 struct ieee80211_tdls_lnkie *lnkid; 514 struct ieee80211_tdls_lnkie *lnkid;
459 515
@@ -467,8 +523,8 @@ mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid)
467 memcpy(lnkid->resp_sta, peer, ETH_ALEN); 523 memcpy(lnkid->resp_sta, peer, ETH_ALEN);
468} 524}
469 525
470int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, 526int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
471 u8 *peer, u8 action_code, u8 dialog_token, 527 u8 action_code, u8 dialog_token,
472 u16 status_code, const u8 *extra_ies, 528 u16 status_code, const u8 *extra_ies,
473 size_t extra_ies_len) 529 size_t extra_ies_len)
474{ 530{
@@ -560,7 +616,8 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv,
560} 616}
561 617
562static int 618static int
563mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer, 619mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
620 const u8 *peer,
564 u8 action_code, u8 dialog_token, 621 u8 action_code, u8 dialog_token,
565 u16 status_code, struct sk_buff *skb) 622 u16 status_code, struct sk_buff *skb)
566{ 623{
@@ -638,10 +695,10 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer,
638 return 0; 695 return 0;
639} 696}
640 697
641int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, 698int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
642 u8 *peer, u8 action_code, u8 dialog_token, 699 u8 action_code, u8 dialog_token,
643 u16 status_code, const u8 *extra_ies, 700 u16 status_code, const u8 *extra_ies,
644 size_t extra_ies_len) 701 size_t extra_ies_len)
645{ 702{
646 struct sk_buff *skb; 703 struct sk_buff *skb;
647 struct mwifiex_txinfo *tx_info; 704 struct mwifiex_txinfo *tx_info;
@@ -848,7 +905,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
848} 905}
849 906
850static int 907static int
851mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer) 908mwifiex_tdls_process_config_link(struct mwifiex_private *priv, const u8 *peer)
852{ 909{
853 struct mwifiex_sta_node *sta_ptr; 910 struct mwifiex_sta_node *sta_ptr;
854 struct mwifiex_ds_tdls_oper tdls_oper; 911 struct mwifiex_ds_tdls_oper tdls_oper;
@@ -869,7 +926,7 @@ mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer)
869} 926}
870 927
871static int 928static int
872mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer) 929mwifiex_tdls_process_create_link(struct mwifiex_private *priv, const u8 *peer)
873{ 930{
874 struct mwifiex_sta_node *sta_ptr; 931 struct mwifiex_sta_node *sta_ptr;
875 struct mwifiex_ds_tdls_oper tdls_oper; 932 struct mwifiex_ds_tdls_oper tdls_oper;
@@ -896,7 +953,7 @@ mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer)
896} 953}
897 954
898static int 955static int
899mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer) 956mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, const u8 *peer)
900{ 957{
901 struct mwifiex_sta_node *sta_ptr; 958 struct mwifiex_sta_node *sta_ptr;
902 struct mwifiex_ds_tdls_oper tdls_oper; 959 struct mwifiex_ds_tdls_oper tdls_oper;
@@ -925,7 +982,7 @@ mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer)
925} 982}
926 983
927static int 984static int
928mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer) 985mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
929{ 986{
930 struct mwifiex_sta_node *sta_ptr; 987 struct mwifiex_sta_node *sta_ptr;
931 struct ieee80211_mcs_info mcs; 988 struct ieee80211_mcs_info mcs;
@@ -982,7 +1039,7 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer)
982 return 0; 1039 return 0;
983} 1040}
984 1041
985int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action) 1042int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action)
986{ 1043{
987 switch (action) { 1044 switch (action) {
988 case MWIFIEX_TDLS_ENABLE_LINK: 1045 case MWIFIEX_TDLS_ENABLE_LINK:
@@ -997,7 +1054,7 @@ int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action)
997 return 0; 1054 return 0;
998} 1055}
999 1056
1000int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac) 1057int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac)
1001{ 1058{
1002 struct mwifiex_sta_node *sta_ptr; 1059 struct mwifiex_sta_node *sta_ptr;
1003 1060
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 9be6544bdded..32643555dd2a 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -175,17 +175,19 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
175 switch (GET_RXSTBC(cap_info)) { 175 switch (GET_RXSTBC(cap_info)) {
176 case MWIFIEX_RX_STBC1: 176 case MWIFIEX_RX_STBC1:
177 /* HT_CAP 1X1 mode */ 177 /* HT_CAP 1X1 mode */
178 memset(&bss_cfg->ht_cap.mcs, 0xff, 1); 178 bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
179 break; 179 break;
180 case MWIFIEX_RX_STBC12: /* fall through */ 180 case MWIFIEX_RX_STBC12: /* fall through */
181 case MWIFIEX_RX_STBC123: 181 case MWIFIEX_RX_STBC123:
182 /* HT_CAP 2X2 mode */ 182 /* HT_CAP 2X2 mode */
183 memset(&bss_cfg->ht_cap.mcs, 0xff, 2); 183 bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
184 bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
184 break; 185 break;
185 default: 186 default:
186 dev_warn(priv->adapter->dev, 187 dev_warn(priv->adapter->dev,
187 "Unsupported RX-STBC, default to 2x2\n"); 188 "Unsupported RX-STBC, default to 2x2\n");
188 memset(&bss_cfg->ht_cap.mcs, 0xff, 2); 189 bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
190 bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
189 break; 191 break;
190 } 192 }
191 priv->ap_11n_enabled = 1; 193 priv->ap_11n_enabled = 1;
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index edbe4aff00d8..a8ce8130cfae 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -22,9 +22,9 @@
22 22
23#define USB_VERSION "1.0" 23#define USB_VERSION "1.0"
24 24
25static u8 user_rmmod;
25static struct mwifiex_if_ops usb_ops; 26static struct mwifiex_if_ops usb_ops;
26static struct semaphore add_remove_card_sem; 27static struct semaphore add_remove_card_sem;
27static struct usb_card_rec *usb_card;
28 28
29static struct usb_device_id mwifiex_usb_table[] = { 29static struct usb_device_id mwifiex_usb_table[] = {
30 /* 8797 */ 30 /* 8797 */
@@ -532,28 +532,38 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
532static void mwifiex_usb_disconnect(struct usb_interface *intf) 532static void mwifiex_usb_disconnect(struct usb_interface *intf)
533{ 533{
534 struct usb_card_rec *card = usb_get_intfdata(intf); 534 struct usb_card_rec *card = usb_get_intfdata(intf);
535 struct mwifiex_adapter *adapter;
535 536
536 if (!card) { 537 if (!card || !card->adapter) {
537 pr_err("%s: card is NULL\n", __func__); 538 pr_err("%s: card or card->adapter is NULL\n", __func__);
538 return; 539 return;
539 } 540 }
540 541
541 mwifiex_usb_free(card); 542 adapter = card->adapter;
543 if (!adapter->priv_num)
544 return;
542 545
543 if (card->adapter) { 546 if (user_rmmod) {
544 struct mwifiex_adapter *adapter = card->adapter; 547#ifdef CONFIG_PM
548 if (adapter->is_suspended)
549 mwifiex_usb_resume(intf);
550#endif
545 551
546 if (!adapter->priv_num) 552 mwifiex_deauthenticate_all(adapter);
547 return;
548 553
549 dev_dbg(adapter->dev, "%s: removing card\n", __func__); 554 mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
550 mwifiex_remove_card(adapter, &add_remove_card_sem); 555 MWIFIEX_BSS_ROLE_ANY),
556 MWIFIEX_FUNC_SHUTDOWN);
551 } 557 }
552 558
559 mwifiex_usb_free(card);
560
561 dev_dbg(adapter->dev, "%s: removing card\n", __func__);
562 mwifiex_remove_card(adapter, &add_remove_card_sem);
563
553 usb_set_intfdata(intf, NULL); 564 usb_set_intfdata(intf, NULL);
554 usb_put_dev(interface_to_usbdev(intf)); 565 usb_put_dev(interface_to_usbdev(intf));
555 kfree(card); 566 kfree(card);
556 usb_card = NULL;
557 567
558 return; 568 return;
559} 569}
@@ -565,6 +575,7 @@ static struct usb_driver mwifiex_usb_driver = {
565 .id_table = mwifiex_usb_table, 575 .id_table = mwifiex_usb_table,
566 .suspend = mwifiex_usb_suspend, 576 .suspend = mwifiex_usb_suspend,
567 .resume = mwifiex_usb_resume, 577 .resume = mwifiex_usb_resume,
578 .soft_unbind = 1,
568}; 579};
569 580
570static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) 581static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
@@ -762,7 +773,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
762 773
763 card->adapter = adapter; 774 card->adapter = adapter;
764 adapter->dev = &card->udev->dev; 775 adapter->dev = &card->udev->dev;
765 usb_card = card;
766 776
767 switch (le16_to_cpu(card->udev->descriptor.idProduct)) { 777 switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
768 case USB8897_PID_1: 778 case USB8897_PID_1:
@@ -1025,25 +1035,8 @@ static void mwifiex_usb_cleanup_module(void)
1025 if (!down_interruptible(&add_remove_card_sem)) 1035 if (!down_interruptible(&add_remove_card_sem))
1026 up(&add_remove_card_sem); 1036 up(&add_remove_card_sem);
1027 1037
1028 if (usb_card && usb_card->adapter) { 1038 /* set the flag as user is removing this module */
1029 struct mwifiex_adapter *adapter = usb_card->adapter; 1039 user_rmmod = 1;
1030
1031 /* In case driver is removed when asynchronous FW downloading is
1032 * in progress
1033 */
1034 wait_for_completion(&adapter->fw_load);
1035
1036#ifdef CONFIG_PM
1037 if (adapter->is_suspended)
1038 mwifiex_usb_resume(usb_card->intf);
1039#endif
1040
1041 mwifiex_deauthenticate_all(adapter);
1042
1043 mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
1044 MWIFIEX_BSS_ROLE_ANY),
1045 MWIFIEX_FUNC_SHUTDOWN);
1046 }
1047 1040
1048 usb_deregister(&mwifiex_usb_driver); 1041 usb_deregister(&mwifiex_usb_driver);
1049} 1042}
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index c3824e37f3f2..6da5abf52e61 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -259,7 +259,7 @@ int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
259 * NULL is returned if station entry is not found in associated STA list. 259 * NULL is returned if station entry is not found in associated STA list.
260 */ 260 */
261struct mwifiex_sta_node * 261struct mwifiex_sta_node *
262mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac) 262mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac)
263{ 263{
264 struct mwifiex_sta_node *node; 264 struct mwifiex_sta_node *node;
265 265
@@ -280,7 +280,7 @@ mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
280 * If received mac address is NULL, NULL is returned. 280 * If received mac address is NULL, NULL is returned.
281 */ 281 */
282struct mwifiex_sta_node * 282struct mwifiex_sta_node *
283mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac) 283mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac)
284{ 284{
285 struct mwifiex_sta_node *node; 285 struct mwifiex_sta_node *node;
286 unsigned long flags; 286 unsigned long flags;
@@ -332,7 +332,7 @@ mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
332} 332}
333 333
334/* This function will delete a station entry from station list */ 334/* This function will delete a station entry from station list */
335void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac) 335void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac)
336{ 336{
337 struct mwifiex_sta_node *node; 337 struct mwifiex_sta_node *node;
338 unsigned long flags; 338 unsigned long flags;
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 0a7cc742aed7..d3671d009f6c 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -92,7 +92,7 @@ mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
92 * The function also initializes the list with the provided RA. 92 * The function also initializes the list with the provided RA.
93 */ 93 */
94static struct mwifiex_ra_list_tbl * 94static struct mwifiex_ra_list_tbl *
95mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra) 95mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
96{ 96{
97 struct mwifiex_ra_list_tbl *ra_list; 97 struct mwifiex_ra_list_tbl *ra_list;
98 98
@@ -139,8 +139,7 @@ static u8 mwifiex_get_random_ba_threshold(void)
139 * This function allocates and adds a RA list for all TIDs 139 * This function allocates and adds a RA list for all TIDs
140 * with the given RA. 140 * with the given RA.
141 */ 141 */
142void 142void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
143mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
144{ 143{
145 int i; 144 int i;
146 struct mwifiex_ra_list_tbl *ra_list; 145 struct mwifiex_ra_list_tbl *ra_list;
@@ -164,6 +163,7 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
164 if (!mwifiex_queuing_ra_based(priv)) { 163 if (!mwifiex_queuing_ra_based(priv)) {
165 if (mwifiex_get_tdls_link_status(priv, ra) == 164 if (mwifiex_get_tdls_link_status(priv, ra) ==
166 TDLS_SETUP_COMPLETE) { 165 TDLS_SETUP_COMPLETE) {
166 ra_list->tdls_link = true;
167 ra_list->is_11n_enabled = 167 ra_list->is_11n_enabled =
168 mwifiex_tdls_peer_11n_enabled(priv, ra); 168 mwifiex_tdls_peer_11n_enabled(priv, ra);
169 } else { 169 } else {
@@ -426,15 +426,6 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
426 priv->tos_to_tid_inv[i]; 426 priv->tos_to_tid_inv[i];
427 } 427 }
428 428
429 priv->aggr_prio_tbl[6].amsdu
430 = priv->aggr_prio_tbl[6].ampdu_ap
431 = priv->aggr_prio_tbl[6].ampdu_user
432 = BA_STREAM_NOT_ALLOWED;
433
434 priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
435 = priv->aggr_prio_tbl[7].ampdu_user
436 = BA_STREAM_NOT_ALLOWED;
437
438 mwifiex_set_ba_params(priv); 429 mwifiex_set_ba_params(priv);
439 mwifiex_reset_11n_rx_seq_num(priv); 430 mwifiex_reset_11n_rx_seq_num(priv);
440 431
@@ -575,7 +566,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
575 */ 566 */
576static struct mwifiex_ra_list_tbl * 567static struct mwifiex_ra_list_tbl *
577mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid, 568mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
578 u8 *ra_addr) 569 const u8 *ra_addr)
579{ 570{
580 struct mwifiex_ra_list_tbl *ra_list; 571 struct mwifiex_ra_list_tbl *ra_list;
581 572
@@ -596,7 +587,8 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
596 * retrieved. 587 * retrieved.
597 */ 588 */
598struct mwifiex_ra_list_tbl * 589struct mwifiex_ra_list_tbl *
599mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr) 590mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
591 const u8 *ra_addr)
600{ 592{
601 struct mwifiex_ra_list_tbl *ra_list; 593 struct mwifiex_ra_list_tbl *ra_list;
602 594
@@ -657,7 +649,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
657 if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS) 649 if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
658 dev_dbg(adapter->dev, 650 dev_dbg(adapter->dev,
659 "TDLS setup packet for %pM. Don't block\n", ra); 651 "TDLS setup packet for %pM. Don't block\n", ra);
660 else 652 else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
661 tdls_status = mwifiex_get_tdls_link_status(priv, ra); 653 tdls_status = mwifiex_get_tdls_link_status(priv, ra);
662 } 654 }
663 655
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 83e42083ebff..eca56e371a57 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -99,7 +99,7 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
99 99
100void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, 100void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
101 struct sk_buff *skb); 101 struct sk_buff *skb);
102void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra); 102void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra);
103void mwifiex_rotate_priolists(struct mwifiex_private *priv, 103void mwifiex_rotate_priolists(struct mwifiex_private *priv,
104 struct mwifiex_ra_list_tbl *ra, int tid); 104 struct mwifiex_ra_list_tbl *ra, int tid);
105 105
@@ -123,7 +123,8 @@ void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
123int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, 123int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
124 const struct host_cmd_ds_command *resp); 124 const struct host_cmd_ds_command *resp);
125struct mwifiex_ra_list_tbl * 125struct mwifiex_ra_list_tbl *
126mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr); 126mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
127 const u8 *ra_addr);
127u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid); 128u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
128 129
129#endif /* !_MWIFIEX_WMM_H_ */ 130#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 49300d04efdf..e27e32851f1e 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -988,8 +988,8 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv)
988 * tsc must be NULL or up to 8 bytes 988 * tsc must be NULL or up to 8 bytes
989 */ 989 */
990int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx, 990int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
991 int set_tx, u8 *key, u8 *rsc, size_t rsc_len, 991 int set_tx, const u8 *key, const u8 *rsc,
992 u8 *tsc, size_t tsc_len) 992 size_t rsc_len, const u8 *tsc, size_t tsc_len)
993{ 993{
994 struct { 994 struct {
995 __le16 idx; 995 __le16 idx;
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 8f6831f4e328..466d1ede76f1 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -38,8 +38,8 @@ int __orinoco_hw_set_wap(struct orinoco_private *priv);
38int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv); 38int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv);
39int __orinoco_hw_setup_enc(struct orinoco_private *priv); 39int __orinoco_hw_setup_enc(struct orinoco_private *priv);
40int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx, 40int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
41 int set_tx, u8 *key, u8 *rsc, size_t rsc_len, 41 int set_tx, const u8 *key, const u8 *rsc,
42 u8 *tsc, size_t tsc_len); 42 size_t rsc_len, const u8 *tsc, size_t tsc_len);
43int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx); 43int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx);
44int __orinoco_hw_set_multicast_list(struct orinoco_private *priv, 44int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
45 struct net_device *dev, 45 struct net_device *dev,
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 3ac71339d040..c90939ced0e4 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1673,7 +1673,7 @@ static int ezusb_probe(struct usb_interface *interface,
1673 firmware.code = fw_entry->data; 1673 firmware.code = fw_entry->data;
1674 } 1674 }
1675 if (firmware.size && firmware.code) { 1675 if (firmware.size && firmware.code) {
1676 if (ezusb_firmware_download(upriv, &firmware)) 1676 if (ezusb_firmware_download(upriv, &firmware) < 0)
1677 goto error; 1677 goto error;
1678 } else { 1678 } else {
1679 err("No firmware to download"); 1679 err("No firmware to download");
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index b7a867b50b94..6abdaf0aa052 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -52,9 +52,9 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
52 priv->keys[index].seq_len = seq_len; 52 priv->keys[index].seq_len = seq_len;
53 53
54 if (key_len) 54 if (key_len)
55 memcpy(priv->keys[index].key, key, key_len); 55 memcpy((void *)priv->keys[index].key, key, key_len);
56 if (seq_len) 56 if (seq_len)
57 memcpy(priv->keys[index].seq, seq, seq_len); 57 memcpy((void *)priv->keys[index].seq, seq, seq_len);
58 58
59 switch (alg) { 59 switch (alg) {
60 case ORINOCO_ALG_TKIP: 60 case ORINOCO_ALG_TKIP:
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index eede90b63f84..7be3a4839640 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -669,7 +669,8 @@ static unsigned int p54_flush_count(struct p54_common *priv)
669 return total; 669 return total;
670} 670}
671 671
672static void p54_flush(struct ieee80211_hw *dev, u32 queues, bool drop) 672static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
673 u32 queues, bool drop)
673{ 674{
674 struct p54_common *priv = dev->priv; 675 struct p54_common *priv = dev->priv;
675 unsigned int total, i; 676 unsigned int total, i;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index cbf0a589d32a..8330fa33e50b 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -343,7 +343,7 @@ static void ray_detach(struct pcmcia_device *link)
343 ray_release(link); 343 ray_release(link);
344 344
345 local = netdev_priv(dev); 345 local = netdev_priv(dev);
346 del_timer(&local->timer); 346 del_timer_sync(&local->timer);
347 347
348 if (link->priv) { 348 if (link->priv) {
349 unregister_netdev(dev); 349 unregister_netdev(dev);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 39d22a154341..d2a9a08210be 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -517,7 +517,7 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
517 u8 key_index, bool unicast, bool multicast); 517 u8 key_index, bool unicast, bool multicast);
518 518
519static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev, 519static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
520 u8 *mac, struct station_info *sinfo); 520 const u8 *mac, struct station_info *sinfo);
521 521
522static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev, 522static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
523 int idx, u8 *mac, struct station_info *sinfo); 523 int idx, u8 *mac, struct station_info *sinfo);
@@ -2490,7 +2490,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
2490} 2490}
2491 2491
2492static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev, 2492static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
2493 u8 *mac, struct station_info *sinfo) 2493 const u8 *mac, struct station_info *sinfo)
2494{ 2494{
2495 struct rndis_wlan_private *priv = wiphy_priv(wiphy); 2495 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2496 struct usbnet *usbdev = priv->usbdev; 2496 struct usbnet *usbdev = priv->usbdev;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 84164747ace0..54aaeb09debf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -656,6 +656,7 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
656 case IEEE80211_AMPDU_TX_START: 656 case IEEE80211_AMPDU_TX_START:
657 common->vif_info[ii].seq_start = seq_no; 657 common->vif_info[ii].seq_start = seq_no;
658 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 658 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
659 status = 0;
659 break; 660 break;
660 661
661 case IEEE80211_AMPDU_TX_STOP_CONT: 662 case IEEE80211_AMPDU_TX_STOP_CONT:
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 1b28cda6ca88..2eefbf159bc0 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1083,7 +1083,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
1083{ 1083{
1084 if (status) { 1084 if (status) {
1085 rsi_hal_send_sta_notify_frame(common, 1085 rsi_hal_send_sta_notify_frame(common,
1086 NL80211_IFTYPE_STATION, 1086 RSI_IFTYPE_STATION,
1087 STA_CONNECTED, 1087 STA_CONNECTED,
1088 bssid, 1088 bssid,
1089 qos_enable, 1089 qos_enable,
@@ -1092,7 +1092,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
1092 rsi_send_auto_rate_request(common); 1092 rsi_send_auto_rate_request(common);
1093 } else { 1093 } else {
1094 rsi_hal_send_sta_notify_frame(common, 1094 rsi_hal_send_sta_notify_frame(common,
1095 NL80211_IFTYPE_STATION, 1095 RSI_IFTYPE_STATION,
1096 STA_DISCONNECTED, 1096 STA_DISCONNECTED,
1097 bssid, 1097 bssid,
1098 qos_enable, 1098 qos_enable,
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
index f2f70784d4ad..d3fbe33d2324 100644
--- a/drivers/net/wireless/rsi/rsi_common.h
+++ b/drivers/net/wireless/rsi/rsi_common.h
@@ -63,7 +63,7 @@ static inline int rsi_create_kthread(struct rsi_common *common,
63 u8 *name) 63 u8 *name)
64{ 64{
65 init_completion(&thread->completion); 65 init_completion(&thread->completion);
66 thread->task = kthread_run(func_ptr, common, name); 66 thread->task = kthread_run(func_ptr, common, "%s", name);
67 if (IS_ERR(thread->task)) 67 if (IS_ERR(thread->task))
68 return (int)PTR_ERR(thread->task); 68 return (int)PTR_ERR(thread->task);
69 69
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index ac67c4ad63c2..225215a3b8bb 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -73,6 +73,7 @@
73#define RX_BA_INDICATION 1 73#define RX_BA_INDICATION 1
74#define RSI_TBL_SZ 40 74#define RSI_TBL_SZ 40
75#define MAX_RETRIES 8 75#define MAX_RETRIES 8
76#define RSI_IFTYPE_STATION 0
76 77
77#define STD_RATE_MCS7 0x07 78#define STD_RATE_MCS7 0x07
78#define STD_RATE_MCS6 0x06 79#define STD_RATE_MCS6 0x06
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 41d4a8167dc3..c17fcf272728 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1005,10 +1005,9 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
1005 entry->skb->len + padding_len); 1005 entry->skb->len + padding_len);
1006 1006
1007 /* 1007 /*
1008 * Enable beaconing again. 1008 * Restore beaconing state.
1009 */ 1009 */
1010 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); 1010 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
1011 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1012 1011
1013 /* 1012 /*
1014 * Clean up beacon skb. 1013 * Clean up beacon skb.
@@ -1039,13 +1038,14 @@ static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
1039void rt2800_clear_beacon(struct queue_entry *entry) 1038void rt2800_clear_beacon(struct queue_entry *entry)
1040{ 1039{
1041 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1040 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1042 u32 reg; 1041 u32 orig_reg, reg;
1043 1042
1044 /* 1043 /*
1045 * Disable beaconing while we are reloading the beacon data, 1044 * Disable beaconing while we are reloading the beacon data,
1046 * otherwise we might be sending out invalid data. 1045 * otherwise we might be sending out invalid data.
1047 */ 1046 */
1048 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 1047 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &orig_reg);
1048 reg = orig_reg;
1049 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); 1049 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
1050 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 1050 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1051 1051
@@ -1055,10 +1055,9 @@ void rt2800_clear_beacon(struct queue_entry *entry)
1055 rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx); 1055 rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx);
1056 1056
1057 /* 1057 /*
1058 * Enabled beaconing again. 1058 * Restore beaconing state.
1059 */ 1059 */
1060 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); 1060 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
1061 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1062} 1061}
1063EXPORT_SYMBOL_GPL(rt2800_clear_beacon); 1062EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
1064 1063
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index e3b885d8f7db..010b76505243 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1448,7 +1448,8 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
1448 struct ieee80211_vif *vif, u16 queue, 1448 struct ieee80211_vif *vif, u16 queue,
1449 const struct ieee80211_tx_queue_params *params); 1449 const struct ieee80211_tx_queue_params *params);
1450void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw); 1450void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
1451void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop); 1451void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1452 u32 queues, bool drop);
1452int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant); 1453int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
1453int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); 1454int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
1454void rt2x00mac_get_ringparam(struct ieee80211_hw *hw, 1455void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index a87ee9b6585a..212ac4842c16 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -749,7 +749,8 @@ void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw)
749} 749}
750EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll); 750EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
751 751
752void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 752void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
753 u32 queues, bool drop)
753{ 754{
754 struct rt2x00_dev *rt2x00dev = hw->priv; 755 struct rt2x00_dev *rt2x00dev = hw->priv;
755 struct data_queue *queue; 756 struct data_queue *queue;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 10572452cc21..86c43d112a4b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -68,6 +68,12 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
68 } 68 }
69 } 69 }
70 70
71 /* If the port is powered down, we get a -EPROTO error, and this
72 * leads to a endless loop. So just say that the device is gone.
73 */
74 if (status == -EPROTO)
75 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
76
71 rt2x00_err(rt2x00dev, 77 rt2x00_err(rt2x00dev,
72 "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n", 78 "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n",
73 request, offset, status); 79 request, offset, status);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 24402984ee57..9048a9cbe52c 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2031,13 +2031,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
2031static void rt61pci_clear_beacon(struct queue_entry *entry) 2031static void rt61pci_clear_beacon(struct queue_entry *entry)
2032{ 2032{
2033 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 2033 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
2034 u32 reg; 2034 u32 orig_reg, reg;
2035 2035
2036 /* 2036 /*
2037 * Disable beaconing while we are reloading the beacon data, 2037 * Disable beaconing while we are reloading the beacon data,
2038 * otherwise we might be sending out invalid data. 2038 * otherwise we might be sending out invalid data.
2039 */ 2039 */
2040 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &reg); 2040 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
2041 reg = orig_reg;
2041 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 2042 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
2042 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg); 2043 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
2043 2044
@@ -2048,10 +2049,9 @@ static void rt61pci_clear_beacon(struct queue_entry *entry)
2048 HW_BEACON_OFFSET(entry->entry_idx), 0); 2049 HW_BEACON_OFFSET(entry->entry_idx), 0);
2049 2050
2050 /* 2051 /*
2051 * Enable beaconing again. 2052 * Restore global beaconing state.
2052 */ 2053 */
2053 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); 2054 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
2054 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
2055} 2055}
2056 2056
2057/* 2057/*
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index a140170b1eb3..95724ff9c726 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1597,13 +1597,14 @@ static void rt73usb_clear_beacon(struct queue_entry *entry)
1597{ 1597{
1598 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1598 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1599 unsigned int beacon_base; 1599 unsigned int beacon_base;
1600 u32 reg; 1600 u32 orig_reg, reg;
1601 1601
1602 /* 1602 /*
1603 * Disable beaconing while we are reloading the beacon data, 1603 * Disable beaconing while we are reloading the beacon data,
1604 * otherwise we might be sending out invalid data. 1604 * otherwise we might be sending out invalid data.
1605 */ 1605 */
1606 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 1606 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
1607 reg = orig_reg;
1607 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 1608 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1608 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); 1609 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1609 1610
@@ -1614,10 +1615,9 @@ static void rt73usb_clear_beacon(struct queue_entry *entry)
1614 rt2x00usb_register_write(rt2x00dev, beacon_base, 0); 1615 rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
1615 1616
1616 /* 1617 /*
1617 * Enable beaconing again. 1618 * Restore beaconing state.
1618 */ 1619 */
1619 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); 1620 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
1620 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1621} 1621}
1622 1622
1623static int rt73usb_get_tx_data_len(struct queue_entry *entry) 1623static int rt73usb_get_tx_data_len(struct queue_entry *entry)
diff --git a/drivers/net/wireless/rtl818x/rtl8180/Makefile b/drivers/net/wireless/rtl818x/rtl8180/Makefile
index 08b056db4a3b..21005bd8b43c 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/rtl818x/rtl8180/Makefile
@@ -1,5 +1,5 @@
1rtl8180-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o 1rtl818x_pci-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
2 2
3obj-$(CONFIG_RTL8180) += rtl8180.o 3obj-$(CONFIG_RTL8180) += rtl818x_pci.o
4 4
5ccflags-y += -Idrivers/net/wireless/rtl818x 5ccflags-y += -Idrivers/net/wireless/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 98d8256f0377..2c1c02bafa10 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -284,6 +284,8 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
284 rx_status.band = dev->conf.chandef.chan->band; 284 rx_status.band = dev->conf.chandef.chan->band;
285 rx_status.mactime = tsft; 285 rx_status.mactime = tsft;
286 rx_status.flag |= RX_FLAG_MACTIME_START; 286 rx_status.flag |= RX_FLAG_MACTIME_START;
287 if (flags & RTL818X_RX_DESC_FLAG_SPLCP)
288 rx_status.flag |= RX_FLAG_SHORTPRE;
287 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) 289 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
288 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 290 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
289 291
@@ -461,18 +463,23 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
461 RTL818X_TX_DESC_FLAG_NO_ENC; 463 RTL818X_TX_DESC_FLAG_NO_ENC;
462 464
463 rc_flags = info->control.rates[0].flags; 465 rc_flags = info->control.rates[0].flags;
466
467 /* HW will perform RTS-CTS when only RTS flags is set.
468 * HW will perform CTS-to-self when both RTS and CTS flags are set.
469 * RTS rate and RTS duration will be used also for CTS-to-self.
470 */
464 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { 471 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
465 tx_flags |= RTL818X_TX_DESC_FLAG_RTS; 472 tx_flags |= RTL818X_TX_DESC_FLAG_RTS;
466 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 473 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
474 rts_duration = ieee80211_rts_duration(dev, priv->vif,
475 skb->len, info);
467 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 476 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
468 tx_flags |= RTL818X_TX_DESC_FLAG_CTS; 477 tx_flags |= RTL818X_TX_DESC_FLAG_RTS | RTL818X_TX_DESC_FLAG_CTS;
469 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 478 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
479 rts_duration = ieee80211_ctstoself_duration(dev, priv->vif,
480 skb->len, info);
470 } 481 }
471 482
472 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS)
473 rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len,
474 info);
475
476 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) { 483 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
477 unsigned int remainder; 484 unsigned int remainder;
478 485
@@ -683,9 +690,8 @@ static void rtl8180_int_enable(struct ieee80211_hw *dev)
683 struct rtl8180_priv *priv = dev->priv; 690 struct rtl8180_priv *priv = dev->priv;
684 691
685 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) { 692 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
686 rtl818x_iowrite32(priv, &priv->map->IMR, IMR_TMGDOK | 693 rtl818x_iowrite32(priv, &priv->map->IMR,
687 IMR_TBDER | IMR_THPDER | 694 IMR_TBDER | IMR_TBDOK |
688 IMR_THPDER | IMR_THPDOK |
689 IMR_TVODER | IMR_TVODOK | 695 IMR_TVODER | IMR_TVODOK |
690 IMR_TVIDER | IMR_TVIDOK | 696 IMR_TVIDER | IMR_TVIDOK |
691 IMR_TBEDER | IMR_TBEDOK | 697 IMR_TBEDER | IMR_TBEDOK |
@@ -911,7 +917,10 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
911 reg32 &= 0x00ffff00; 917 reg32 &= 0x00ffff00;
912 reg32 |= 0xb8000054; 918 reg32 |= 0xb8000054;
913 rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32); 919 rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32);
914 } 920 } else
921 /* stop unused queus (no dma alloc) */
922 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
923 (1<<1) | (1<<2));
915 924
916 priv->rf->init(dev); 925 priv->rf->init(dev);
917 926
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 0ca17cda48fa..629ad8cfa17b 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -253,14 +253,21 @@ static void rtl8187_tx(struct ieee80211_hw *dev,
253 flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24; 253 flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24;
254 if (ieee80211_has_morefrags(tx_hdr->frame_control)) 254 if (ieee80211_has_morefrags(tx_hdr->frame_control))
255 flags |= RTL818X_TX_DESC_FLAG_MOREFRAG; 255 flags |= RTL818X_TX_DESC_FLAG_MOREFRAG;
256
257 /* HW will perform RTS-CTS when only RTS flags is set.
258 * HW will perform CTS-to-self when both RTS and CTS flags are set.
259 * RTS rate and RTS duration will be used also for CTS-to-self.
260 */
256 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 261 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
257 flags |= RTL818X_TX_DESC_FLAG_RTS; 262 flags |= RTL818X_TX_DESC_FLAG_RTS;
258 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 263 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
259 rts_dur = ieee80211_rts_duration(dev, priv->vif, 264 rts_dur = ieee80211_rts_duration(dev, priv->vif,
260 skb->len, info); 265 skb->len, info);
261 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 266 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
262 flags |= RTL818X_TX_DESC_FLAG_CTS; 267 flags |= RTL818X_TX_DESC_FLAG_RTS | RTL818X_TX_DESC_FLAG_CTS;
263 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 268 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
269 rts_dur = ieee80211_ctstoself_duration(dev, priv->vif,
270 skb->len, info);
264 } 271 }
265 272
266 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 273 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -381,6 +388,8 @@ static void rtl8187_rx_cb(struct urb *urb)
381 rx_status.freq = dev->conf.chandef.chan->center_freq; 388 rx_status.freq = dev->conf.chandef.chan->center_freq;
382 rx_status.band = dev->conf.chandef.chan->band; 389 rx_status.band = dev->conf.chandef.chan->band;
383 rx_status.flag |= RX_FLAG_MACTIME_START; 390 rx_status.flag |= RX_FLAG_MACTIME_START;
391 if (flags & RTL818X_RX_DESC_FLAG_SPLCP)
392 rx_status.flag |= RX_FLAG_SHORTPRE;
384 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) 393 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
385 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 394 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
386 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 395 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index 45ea4e1c4abe..7abef95d278b 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -334,9 +334,9 @@ struct rtl818x_csr {
334 * I don't like to introduce a ton of "reserved".. 334 * I don't like to introduce a ton of "reserved"..
335 * They are for RTL8187SE 335 * They are for RTL8187SE
336 */ 336 */
337#define REG_ADDR1(addr) ((u8 __iomem *)priv->map + addr) 337#define REG_ADDR1(addr) ((u8 __iomem *)priv->map + (addr))
338#define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + (addr >> 1)) 338#define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + ((addr) >> 1))
339#define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + (addr >> 2)) 339#define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + ((addr) >> 2))
340 340
341#define FEMR_SE REG_ADDR2(0x1D4) 341#define FEMR_SE REG_ADDR2(0x1D4)
342#define ARFR REG_ADDR2(0x1E0) 342#define ARFR REG_ADDR2(0x1E0)
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 4ec424f26672..b1ed6d0796f6 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1387,7 +1387,8 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
1387 * before switch channel or power save, or tx buffer packet 1387 * before switch channel or power save, or tx buffer packet
1388 * maybe send after offchannel or rf sleep, this may cause 1388 * maybe send after offchannel or rf sleep, this may cause
1389 * dis-association by AP */ 1389 * dis-association by AP */
1390static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 1390static void rtl_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1391 u32 queues, bool drop)
1391{ 1392{
1392 struct rtl_priv *rtlpriv = rtl_priv(hw); 1393 struct rtl_priv *rtlpriv = rtl_priv(hw);
1393 1394
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index 94cd9df98381..b14cf5a10f44 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -2515,23 +2515,3 @@ void rtl88ee_suspend(struct ieee80211_hw *hw)
2515void rtl88ee_resume(struct ieee80211_hw *hw) 2515void rtl88ee_resume(struct ieee80211_hw *hw)
2516{ 2516{
2517} 2517}
2518
2519/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2520void rtl88ee_allow_all_destaddr(struct ieee80211_hw *hw,
2521 bool allow_all_da, bool write_into_reg)
2522{
2523 struct rtl_priv *rtlpriv = rtl_priv(hw);
2524 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2525
2526 if (allow_all_da) /* Set BIT0 */
2527 rtlpci->receive_config |= RCR_AAP;
2528 else /* Clear BIT0 */
2529 rtlpci->receive_config &= ~RCR_AAP;
2530
2531 if (write_into_reg)
2532 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
2533
2534 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2535 "receive_config = 0x%08X, write_into_reg =%d\n",
2536 rtlpci->receive_config, write_into_reg);
2537}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
index b4460a41bd01..1850fde881b5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
@@ -61,8 +61,6 @@ void rtl8188ee_bt_reg_init(struct ieee80211_hw *hw);
61void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw); 61void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw);
62void rtl88ee_suspend(struct ieee80211_hw *hw); 62void rtl88ee_suspend(struct ieee80211_hw *hw);
63void rtl88ee_resume(struct ieee80211_hw *hw); 63void rtl88ee_resume(struct ieee80211_hw *hw);
64void rtl88ee_allow_all_destaddr(struct ieee80211_hw *hw,
65 bool allow_all_da, bool write_into_reg);
66void rtl88ee_fw_clk_off_timer_callback(unsigned long data); 64void rtl88ee_fw_clk_off_timer_callback(unsigned long data);
67 65
68#endif 66#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
index 1b4101bf9974..842d69349a37 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -93,7 +93,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
93 u8 tid; 93 u8 tid;
94 94
95 rtl8188ee_bt_reg_init(hw); 95 rtl8188ee_bt_reg_init(hw);
96 rtlpci->msi_support = true; 96 rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
97 97
98 rtlpriv->dm.dm_initialgain_enable = 1; 98 rtlpriv->dm.dm_initialgain_enable = 1;
99 rtlpriv->dm.dm_flag = 0; 99 rtlpriv->dm.dm_flag = 0;
@@ -255,7 +255,6 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = {
255 .enable_hw_sec = rtl88ee_enable_hw_security_config, 255 .enable_hw_sec = rtl88ee_enable_hw_security_config,
256 .set_key = rtl88ee_set_key, 256 .set_key = rtl88ee_set_key,
257 .init_sw_leds = rtl88ee_init_sw_leds, 257 .init_sw_leds = rtl88ee_init_sw_leds,
258 .allow_all_destaddr = rtl88ee_allow_all_destaddr,
259 .get_bbreg = rtl88e_phy_query_bb_reg, 258 .get_bbreg = rtl88e_phy_query_bb_reg,
260 .set_bbreg = rtl88e_phy_set_bb_reg, 259 .set_bbreg = rtl88e_phy_set_bb_reg,
261 .get_rfreg = rtl88e_phy_query_rf_reg, 260 .get_rfreg = rtl88e_phy_query_rf_reg,
@@ -267,6 +266,7 @@ static struct rtl_mod_params rtl88ee_mod_params = {
267 .inactiveps = true, 266 .inactiveps = true,
268 .swctrl_lps = false, 267 .swctrl_lps = false,
269 .fwctrl_lps = true, 268 .fwctrl_lps = true,
269 .msi_support = false,
270 .debug = DBG_EMERG, 270 .debug = DBG_EMERG,
271}; 271};
272 272
@@ -383,10 +383,12 @@ module_param_named(debug, rtl88ee_mod_params.debug, int, 0444);
383module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444); 383module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444);
384module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444); 384module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444);
385module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444); 385module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444);
386module_param_named(msi, rtl88ee_mod_params.msi_support, bool, 0444);
386MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); 387MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
387MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); 388MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
388MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); 389MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
389MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); 390MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
391MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
390MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); 392MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
391 393
392static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); 394static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 55adf043aef7..cdecb0fd4d8e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -2423,24 +2423,3 @@ void rtl92ce_suspend(struct ieee80211_hw *hw)
2423void rtl92ce_resume(struct ieee80211_hw *hw) 2423void rtl92ce_resume(struct ieee80211_hw *hw)
2424{ 2424{
2425} 2425}
2426
2427/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2428void rtl92ce_allow_all_destaddr(struct ieee80211_hw *hw,
2429 bool allow_all_da, bool write_into_reg)
2430{
2431 struct rtl_priv *rtlpriv = rtl_priv(hw);
2432 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2433
2434 if (allow_all_da) {/* Set BIT0 */
2435 rtlpci->receive_config |= RCR_AAP;
2436 } else {/* Clear BIT0 */
2437 rtlpci->receive_config &= ~RCR_AAP;
2438 }
2439
2440 if (write_into_reg)
2441 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
2442
2443 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2444 "receive_config=0x%08X, write_into_reg=%d\n",
2445 rtlpci->receive_config, write_into_reg);
2446}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
index 2d063b0c7760..5533070f266c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
@@ -76,7 +76,5 @@ void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw);
76void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw); 76void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw);
77void rtl92ce_suspend(struct ieee80211_hw *hw); 77void rtl92ce_suspend(struct ieee80211_hw *hw);
78void rtl92ce_resume(struct ieee80211_hw *hw); 78void rtl92ce_resume(struct ieee80211_hw *hw);
79void rtl92ce_allow_all_destaddr(struct ieee80211_hw *hw,
80 bool allow_all_da, bool write_into_reg);
81 79
82#endif 80#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index b790320d2030..12f21f4073e8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -229,7 +229,6 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
229 .enable_hw_sec = rtl92ce_enable_hw_security_config, 229 .enable_hw_sec = rtl92ce_enable_hw_security_config,
230 .set_key = rtl92ce_set_key, 230 .set_key = rtl92ce_set_key,
231 .init_sw_leds = rtl92ce_init_sw_leds, 231 .init_sw_leds = rtl92ce_init_sw_leds,
232 .allow_all_destaddr = rtl92ce_allow_all_destaddr,
233 .get_bbreg = rtl92c_phy_query_bb_reg, 232 .get_bbreg = rtl92c_phy_query_bb_reg,
234 .set_bbreg = rtl92c_phy_set_bb_reg, 233 .set_bbreg = rtl92c_phy_set_bb_reg,
235 .set_rfreg = rtl92ce_phy_set_rf_reg, 234 .set_rfreg = rtl92ce_phy_set_rf_reg,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 07cb06da6729..a903c2671b4d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -511,7 +511,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
511 pr_info("MAC auto ON okay!\n"); 511 pr_info("MAC auto ON okay!\n");
512 break; 512 break;
513 } 513 }
514 if (pollingCount++ > 100) { 514 if (pollingCount++ > 1000) {
515 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, 515 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
516 "Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n"); 516 "Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n");
517 return -ENODEV; 517 return -ENODEV;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index c61311084d7e..361435f8608a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -395,9 +395,6 @@ static struct usb_driver rtl8192cu_driver = {
395 /* .resume = rtl_usb_resume, */ 395 /* .resume = rtl_usb_resume, */
396 /* .reset_resume = rtl8192c_resume, */ 396 /* .reset_resume = rtl8192c_resume, */
397#endif /* CONFIG_PM */ 397#endif /* CONFIG_PM */
398#ifdef CONFIG_AUTOSUSPEND
399 .supports_autosuspend = 1,
400#endif
401 .disable_hub_initiated_lpm = 1, 398 .disable_hub_initiated_lpm = 1,
402}; 399};
403 400
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 9098558d916d..1c7101bcd790 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -2544,23 +2544,3 @@ void rtl92se_resume(struct ieee80211_hw *hw)
2544 pci_write_config_dword(rtlpci->pdev, 0x40, 2544 pci_write_config_dword(rtlpci->pdev, 0x40,
2545 val & 0xffff00ff); 2545 val & 0xffff00ff);
2546} 2546}
2547
2548/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2549void rtl92se_allow_all_destaddr(struct ieee80211_hw *hw,
2550 bool allow_all_da, bool write_into_reg)
2551{
2552 struct rtl_priv *rtlpriv = rtl_priv(hw);
2553 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2554
2555 if (allow_all_da) /* Set BIT0 */
2556 rtlpci->receive_config |= RCR_AAP;
2557 else /* Clear BIT0 */
2558 rtlpci->receive_config &= ~RCR_AAP;
2559
2560 if (write_into_reg)
2561 rtl_write_dword(rtlpriv, RCR, rtlpci->receive_config);
2562
2563 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2564 "receive_config=0x%08X, write_into_reg=%d\n",
2565 rtlpci->receive_config, write_into_reg);
2566}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
index da48aa8cbe6f..4cacee10f31e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
@@ -74,7 +74,5 @@ void rtl92se_set_key(struct ieee80211_hw *hw,
74 u8 enc_algo, bool is_wepkey, bool clear_all); 74 u8 enc_algo, bool is_wepkey, bool clear_all);
75void rtl92se_suspend(struct ieee80211_hw *hw); 75void rtl92se_suspend(struct ieee80211_hw *hw);
76void rtl92se_resume(struct ieee80211_hw *hw); 76void rtl92se_resume(struct ieee80211_hw *hw);
77void rtl92se_allow_all_destaddr(struct ieee80211_hw *hw,
78 bool allow_all_da, bool write_into_reg);
79 77
80#endif 78#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 2e8e6f8d2d51..1bff2a0f7600 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -290,7 +290,6 @@ static struct rtl_hal_ops rtl8192se_hal_ops = {
290 .enable_hw_sec = rtl92se_enable_hw_security_config, 290 .enable_hw_sec = rtl92se_enable_hw_security_config,
291 .set_key = rtl92se_set_key, 291 .set_key = rtl92se_set_key,
292 .init_sw_leds = rtl92se_init_sw_leds, 292 .init_sw_leds = rtl92se_init_sw_leds,
293 .allow_all_destaddr = rtl92se_allow_all_destaddr,
294 .get_bbreg = rtl92s_phy_query_bb_reg, 293 .get_bbreg = rtl92s_phy_query_bb_reg,
295 .set_bbreg = rtl92s_phy_set_bb_reg, 294 .set_bbreg = rtl92s_phy_set_bb_reg,
296 .get_rfreg = rtl92s_phy_query_rf_reg, 295 .get_rfreg = rtl92s_phy_query_rf_reg,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
index 48fee1be78c2..5b4a714f3c8c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
@@ -32,7 +32,6 @@
32#include "dm.h" 32#include "dm.h"
33#include "fw.h" 33#include "fw.h"
34#include "../rtl8723com/fw_common.h" 34#include "../rtl8723com/fw_common.h"
35#include "../rtl8723com/fw_common.h"
36#include "phy.h" 35#include "phy.h"
37#include "reg.h" 36#include "reg.h"
38#include "hal_btc.h" 37#include "hal_btc.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index 65c9e80e1f78..87f69166a7ed 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -2383,24 +2383,3 @@ void rtl8723ae_suspend(struct ieee80211_hw *hw)
2383void rtl8723ae_resume(struct ieee80211_hw *hw) 2383void rtl8723ae_resume(struct ieee80211_hw *hw)
2384{ 2384{
2385} 2385}
2386
2387/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2388void rtl8723ae_allow_all_destaddr(struct ieee80211_hw *hw,
2389 bool allow_all_da, bool write_into_reg)
2390{
2391 struct rtl_priv *rtlpriv = rtl_priv(hw);
2392 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2393
2394 if (allow_all_da) /* Set BIT0 */
2395 rtlpci->receive_config |= RCR_AAP;
2396 else /* Clear BIT0 */
2397 rtlpci->receive_config &= ~RCR_AAP;
2398
2399 if (write_into_reg)
2400 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
2401
2402
2403 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2404 "receive_config=0x%08X, write_into_reg=%d\n",
2405 rtlpci->receive_config, write_into_reg);
2406}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
index 6fa24f79b1d7..d3bc39fb27a5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
@@ -67,7 +67,5 @@ void rtl8723ae_bt_reg_init(struct ieee80211_hw *hw);
67void rtl8723ae_bt_hw_init(struct ieee80211_hw *hw); 67void rtl8723ae_bt_hw_init(struct ieee80211_hw *hw);
68void rtl8723ae_suspend(struct ieee80211_hw *hw); 68void rtl8723ae_suspend(struct ieee80211_hw *hw);
69void rtl8723ae_resume(struct ieee80211_hw *hw); 69void rtl8723ae_resume(struct ieee80211_hw *hw);
70void rtl8723ae_allow_all_destaddr(struct ieee80211_hw *hw,
71 bool allow_all_da, bool write_into_reg);
72 70
73#endif 71#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index 1087a3bd07fa..73cba1eec8cf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -238,7 +238,6 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
238 .enable_hw_sec = rtl8723ae_enable_hw_security_config, 238 .enable_hw_sec = rtl8723ae_enable_hw_security_config,
239 .set_key = rtl8723ae_set_key, 239 .set_key = rtl8723ae_set_key,
240 .init_sw_leds = rtl8723ae_init_sw_leds, 240 .init_sw_leds = rtl8723ae_init_sw_leds,
241 .allow_all_destaddr = rtl8723ae_allow_all_destaddr,
242 .get_bbreg = rtl8723_phy_query_bb_reg, 241 .get_bbreg = rtl8723_phy_query_bb_reg,
243 .set_bbreg = rtl8723_phy_set_bb_reg, 242 .set_bbreg = rtl8723_phy_set_bb_reg,
244 .get_rfreg = rtl8723ae_phy_query_rf_reg, 243 .get_rfreg = rtl8723ae_phy_query_rf_reg,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
index 0fdf0909321f..3d555495b453 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
@@ -2501,23 +2501,3 @@ void rtl8723be_suspend(struct ieee80211_hw *hw)
2501void rtl8723be_resume(struct ieee80211_hw *hw) 2501void rtl8723be_resume(struct ieee80211_hw *hw)
2502{ 2502{
2503} 2503}
2504
2505/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2506void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
2507 bool write_into_reg)
2508{
2509 struct rtl_priv *rtlpriv = rtl_priv(hw);
2510 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2511
2512 if (allow_all_da) /* Set BIT0 */
2513 rtlpci->receive_config |= RCR_AAP;
2514 else /* Clear BIT0 */
2515 rtlpci->receive_config &= ~RCR_AAP;
2516
2517 if (write_into_reg)
2518 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
2519
2520 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2521 "receive_config = 0x%08X, write_into_reg =%d\n",
2522 rtlpci->receive_config, write_into_reg);
2523}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
index b7449a9b57e4..64c7551af6b7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
@@ -59,6 +59,4 @@ void rtl8723be_bt_reg_init(struct ieee80211_hw *hw);
59void rtl8723be_bt_hw_init(struct ieee80211_hw *hw); 59void rtl8723be_bt_hw_init(struct ieee80211_hw *hw);
60void rtl8723be_suspend(struct ieee80211_hw *hw); 60void rtl8723be_suspend(struct ieee80211_hw *hw);
61void rtl8723be_resume(struct ieee80211_hw *hw); 61void rtl8723be_resume(struct ieee80211_hw *hw);
62void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
63 bool write_into_reg);
64#endif 62#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index b4577ebc4bb0..ff12bf41644b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -92,7 +92,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
92 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 92 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
93 93
94 rtl8723be_bt_reg_init(hw); 94 rtl8723be_bt_reg_init(hw);
95 rtlpci->msi_support = true; 95 rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
96 rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); 96 rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
97 97
98 rtlpriv->dm.dm_initialgain_enable = 1; 98 rtlpriv->dm.dm_initialgain_enable = 1;
@@ -253,6 +253,7 @@ static struct rtl_mod_params rtl8723be_mod_params = {
253 .inactiveps = true, 253 .inactiveps = true,
254 .swctrl_lps = false, 254 .swctrl_lps = false,
255 .fwctrl_lps = true, 255 .fwctrl_lps = true,
256 .msi_support = false,
256 .debug = DBG_EMERG, 257 .debug = DBG_EMERG,
257}; 258};
258 259
@@ -365,9 +366,11 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
365module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); 366module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
366module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); 367module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
367module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); 368module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
369module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
368MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n"); 370MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
369MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n"); 371MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
370MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n"); 372MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n");
373MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
371MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); 374MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
372 375
373static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); 376static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
index e0a0d8c8fed5..969eaea5eddd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
@@ -33,7 +33,6 @@
33#include "trx.h" 33#include "trx.h"
34#include "led.h" 34#include "led.h"
35#include "dm.h" 35#include "dm.h"
36#include "phy.h"
37 36
38static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) 37static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
39{ 38{
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 6965afdf572a..407a7936d364 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1960,8 +1960,6 @@ struct rtl_hal_ops {
1960 u32 regaddr, u32 bitmask); 1960 u32 regaddr, u32 bitmask);
1961 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, 1961 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
1962 u32 regaddr, u32 bitmask, u32 data); 1962 u32 regaddr, u32 bitmask, u32 data);
1963 void (*allow_all_destaddr)(struct ieee80211_hw *hw,
1964 bool allow_all_da, bool write_into_reg);
1965 void (*linked_set_reg) (struct ieee80211_hw *hw); 1963 void (*linked_set_reg) (struct ieee80211_hw *hw);
1966 void (*chk_switch_dmdp) (struct ieee80211_hw *hw); 1964 void (*chk_switch_dmdp) (struct ieee80211_hw *hw);
1967 void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw); 1965 void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
@@ -2030,6 +2028,10 @@ struct rtl_mod_params {
2030 2028
2031 /* default: 1 = using linked fw power save */ 2029 /* default: 1 = using linked fw power save */
2032 bool fwctrl_lps; 2030 bool fwctrl_lps;
2031
2032 /* default: 0 = not using MSI interrupts mode */
2033 /* submodules should set their own defalut value */
2034 bool msi_support;
2033}; 2035};
2034 2036
2035struct rtl_hal_usbint_cfg { 2037struct rtl_hal_usbint_cfg {
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index 5a4ec56c83d0..5695628757ee 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -2,7 +2,6 @@
2 2
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <linux/crc7.h>
6 5
7#include "wl1251.h" 6#include "wl1251.h"
8#include "reg.h" 7#include "reg.h"
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index bf1fa18b9786..ede31f048ef9 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -2,7 +2,6 @@
2 2
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <linux/crc7.h>
6#include <linux/etherdevice.h> 5#include <linux/etherdevice.h>
7 6
8#include "wl1251.h" 7#include "wl1251.h"
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index db0105313745..c98630394a1a 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -124,11 +124,12 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
124 return ret; 124 return ret;
125 } 125 }
126 126
127 if (wl->vif && vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) { 127 if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
128 wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT"); 128 wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
129 129
130 /* indicate to the stack, that beacons have been lost */ 130 /* indicate to the stack, that beacons have been lost */
131 ieee80211_beacon_loss(wl->vif); 131 if (wl->vif && wl->vif->type == NL80211_IFTYPE_STATION)
132 ieee80211_beacon_loss(wl->vif);
132 } 133 }
133 134
134 if (vector & REGAINED_BSS_EVENT_ID) { 135 if (vector & REGAINED_BSS_EVENT_ID) {
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 757e25784a8a..4e782f18ae34 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -550,6 +550,34 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
550 mutex_unlock(&wl->mutex); 550 mutex_unlock(&wl->mutex);
551} 551}
552 552
553static int wl1251_build_null_data(struct wl1251 *wl)
554{
555 struct sk_buff *skb = NULL;
556 int size;
557 void *ptr;
558 int ret = -ENOMEM;
559
560 if (wl->bss_type == BSS_TYPE_IBSS) {
561 size = sizeof(struct wl12xx_null_data_template);
562 ptr = NULL;
563 } else {
564 skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
565 if (!skb)
566 goto out;
567 size = skb->len;
568 ptr = skb->data;
569 }
570
571 ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, ptr, size);
572
573out:
574 dev_kfree_skb(skb);
575 if (ret)
576 wl1251_warning("cmd buld null data failed: %d", ret);
577
578 return ret;
579}
580
553static int wl1251_build_qos_null_data(struct wl1251 *wl) 581static int wl1251_build_qos_null_data(struct wl1251 *wl)
554{ 582{
555 struct ieee80211_qos_hdr template; 583 struct ieee80211_qos_hdr template;
@@ -687,16 +715,6 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
687 wl->power_level = conf->power_level; 715 wl->power_level = conf->power_level;
688 } 716 }
689 717
690 /*
691 * Tell stack that connection is lost because hw encryption isn't
692 * supported in monitor mode.
693 * This requires temporary enabling of the hw connection monitor flag
694 */
695 if ((changed & IEEE80211_CONF_CHANGE_MONITOR) && wl->vif) {
696 wl->hw->flags |= IEEE80211_HW_CONNECTION_MONITOR;
697 ieee80211_connection_loss(wl->vif);
698 }
699
700out_sleep: 718out_sleep:
701 wl1251_ps_elp_sleep(wl); 719 wl1251_ps_elp_sleep(wl);
702 720
@@ -1103,24 +1121,19 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1103 wl->rssi_thold = bss_conf->cqm_rssi_thold; 1121 wl->rssi_thold = bss_conf->cqm_rssi_thold;
1104 } 1122 }
1105 1123
1106 if (changed & BSS_CHANGED_BSSID) { 1124 if ((changed & BSS_CHANGED_BSSID) &&
1125 memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
1107 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); 1126 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
1108 1127
1109 skb = ieee80211_nullfunc_get(wl->hw, wl->vif); 1128 if (!is_zero_ether_addr(wl->bssid)) {
1110 if (!skb) 1129 ret = wl1251_build_null_data(wl);
1111 goto out_sleep; 1130 if (ret < 0)
1112 1131 goto out_sleep;
1113 ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
1114 skb->data, skb->len);
1115 dev_kfree_skb(skb);
1116 if (ret < 0)
1117 goto out_sleep;
1118 1132
1119 ret = wl1251_build_qos_null_data(wl); 1133 ret = wl1251_build_qos_null_data(wl);
1120 if (ret < 0) 1134 if (ret < 0)
1121 goto out; 1135 goto out_sleep;
1122 1136
1123 if (wl->bss_type != BSS_TYPE_IBSS) {
1124 ret = wl1251_join(wl, wl->bss_type, wl->channel, 1137 ret = wl1251_join(wl, wl->bss_type, wl->channel,
1125 wl->beacon_int, wl->dtim_period); 1138 wl->beacon_int, wl->dtim_period);
1126 if (ret < 0) 1139 if (ret < 0)
@@ -1129,9 +1142,6 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1129 } 1142 }
1130 1143
1131 if (changed & BSS_CHANGED_ASSOC) { 1144 if (changed & BSS_CHANGED_ASSOC) {
1132 /* Disable temporary enabled hw connection monitor flag */
1133 wl->hw->flags &= ~IEEE80211_HW_CONNECTION_MONITOR;
1134
1135 if (bss_conf->assoc) { 1145 if (bss_conf->assoc) {
1136 wl->beacon_int = bss_conf->beacon_int; 1146 wl->beacon_int = bss_conf->beacon_int;
1137 1147
@@ -1216,8 +1226,8 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1216 if (ret < 0) 1226 if (ret < 0)
1217 goto out_sleep; 1227 goto out_sleep;
1218 1228
1219 ret = wl1251_join(wl, wl->bss_type, wl->beacon_int, 1229 ret = wl1251_join(wl, wl->bss_type, wl->channel,
1220 wl->channel, wl->dtim_period); 1230 wl->beacon_int, wl->dtim_period);
1221 1231
1222 if (ret < 0) 1232 if (ret < 0)
1223 goto out_sleep; 1233 goto out_sleep;
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index b06d36d99362..a0aa8fa72392 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -23,6 +23,7 @@
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/swab.h>
26#include <linux/crc7.h> 27#include <linux/crc7.h>
27#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
28#include <linux/wl12xx.h> 29#include <linux/wl12xx.h>
@@ -83,47 +84,44 @@ static void wl1251_spi_reset(struct wl1251 *wl)
83 84
84static void wl1251_spi_wake(struct wl1251 *wl) 85static void wl1251_spi_wake(struct wl1251 *wl)
85{ 86{
86 u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
87 struct spi_transfer t; 87 struct spi_transfer t;
88 struct spi_message m; 88 struct spi_message m;
89 u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
89 90
90 cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
91 if (!cmd) { 91 if (!cmd) {
92 wl1251_error("could not allocate cmd for spi init"); 92 wl1251_error("could not allocate cmd for spi init");
93 return; 93 return;
94 } 94 }
95 95
96 memset(crc, 0, sizeof(crc));
97 memset(&t, 0, sizeof(t)); 96 memset(&t, 0, sizeof(t));
98 spi_message_init(&m); 97 spi_message_init(&m);
99 98
100 /* Set WSPI_INIT_COMMAND 99 /* Set WSPI_INIT_COMMAND
101 * the data is being send from the MSB to LSB 100 * the data is being send from the MSB to LSB
102 */ 101 */
103 cmd[2] = 0xff; 102 cmd[0] = 0xff;
104 cmd[3] = 0xff; 103 cmd[1] = 0xff;
105 cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX; 104 cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
106 cmd[0] = 0; 105 cmd[3] = 0;
107 cmd[7] = 0; 106 cmd[4] = 0;
108 cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3; 107 cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
109 cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN; 108 cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
109
110 cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
111 | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
110 112
111 if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0) 113 if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0)
112 cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY; 114 cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
113 else 115 else
114 cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY; 116 cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
115
116 cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
117 | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
118
119 crc[0] = cmd[1];
120 crc[1] = cmd[0];
121 crc[2] = cmd[7];
122 crc[3] = cmd[6];
123 crc[4] = cmd[5];
124 117
125 cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1; 118 cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END;
126 cmd[4] |= WSPI_INIT_CMD_END; 119 /*
120 * The above is the logical order; it must actually be stored
121 * in the buffer byte-swapped.
122 */
123 __swab32s((u32 *)cmd);
124 __swab32s((u32 *)cmd+1);
127 125
128 t.tx_buf = cmd; 126 t.tx_buf = cmd;
129 t.len = WSPI_INIT_CMD_LEN; 127 t.len = WSPI_INIT_CMD_LEN;
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index f7381dd69009..0f2cfb0d2a9e 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -57,7 +57,7 @@ static const struct file_operations name## _ops = { \
57 wl, &name## _ops); \ 57 wl, &name## _ops); \
58 if (!entry || IS_ERR(entry)) \ 58 if (!entry || IS_ERR(entry)) \
59 goto err; \ 59 goto err; \
60 } while (0); 60 } while (0)
61 61
62 62
63#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \ 63#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
@@ -66,7 +66,7 @@ static const struct file_operations name## _ops = { \
66 wl, &prefix## _## name## _ops); \ 66 wl, &prefix## _## name## _ops); \
67 if (!entry || IS_ERR(entry)) \ 67 if (!entry || IS_ERR(entry)) \
68 goto err; \ 68 goto err; \
69 } while (0); 69 } while (0)
70 70
71#define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \ 71#define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \
72static ssize_t sub## _ ##name## _read(struct file *file, \ 72static ssize_t sub## _ ##name## _read(struct file *file, \
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index e71eae353368..3d6028e62750 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1416,7 +1416,7 @@ void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1416 1416
1417int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter, 1417int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1418 u16 offset, u8 flags, 1418 u16 offset, u8 flags,
1419 u8 *pattern, u8 len) 1419 const u8 *pattern, u8 len)
1420{ 1420{
1421 struct wl12xx_rx_filter_field *field; 1421 struct wl12xx_rx_filter_field *field;
1422 1422
@@ -5184,7 +5184,8 @@ out:
5184 mutex_unlock(&wl->mutex); 5184 mutex_unlock(&wl->mutex);
5185} 5185}
5186 5186
5187static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 5187static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5188 u32 queues, bool drop)
5188{ 5189{
5189 struct wl1271 *wl = hw->priv; 5190 struct wl1271 *wl = hw->priv;
5190 5191
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 29ef2492951f..d3dd7bfdf3f1 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -217,7 +217,7 @@ static struct wl1271_if_operations sdio_ops = {
217static int wl1271_probe(struct sdio_func *func, 217static int wl1271_probe(struct sdio_func *func,
218 const struct sdio_device_id *id) 218 const struct sdio_device_id *id)
219{ 219{
220 struct wlcore_platdev_data *pdev_data; 220 struct wlcore_platdev_data pdev_data;
221 struct wl12xx_sdio_glue *glue; 221 struct wl12xx_sdio_glue *glue;
222 struct resource res[1]; 222 struct resource res[1];
223 mmc_pm_flag_t mmcflags; 223 mmc_pm_flag_t mmcflags;
@@ -228,16 +228,13 @@ static int wl1271_probe(struct sdio_func *func,
228 if (func->num != 0x02) 228 if (func->num != 0x02)
229 return -ENODEV; 229 return -ENODEV;
230 230
231 pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL); 231 memset(&pdev_data, 0x00, sizeof(pdev_data));
232 if (!pdev_data) 232 pdev_data.if_ops = &sdio_ops;
233 goto out;
234
235 pdev_data->if_ops = &sdio_ops;
236 233
237 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 234 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
238 if (!glue) { 235 if (!glue) {
239 dev_err(&func->dev, "can't allocate glue\n"); 236 dev_err(&func->dev, "can't allocate glue\n");
240 goto out_free_pdev_data; 237 goto out;
241 } 238 }
242 239
243 glue->dev = &func->dev; 240 glue->dev = &func->dev;
@@ -248,9 +245,9 @@ static int wl1271_probe(struct sdio_func *func,
248 /* Use block mode for transferring over one block size of data */ 245 /* Use block mode for transferring over one block size of data */
249 func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; 246 func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
250 247
251 pdev_data->pdata = wl12xx_get_platform_data(); 248 pdev_data.pdata = wl12xx_get_platform_data();
252 if (IS_ERR(pdev_data->pdata)) { 249 if (IS_ERR(pdev_data.pdata)) {
253 ret = PTR_ERR(pdev_data->pdata); 250 ret = PTR_ERR(pdev_data.pdata);
254 dev_err(glue->dev, "missing wlan platform data: %d\n", ret); 251 dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
255 goto out_free_glue; 252 goto out_free_glue;
256 } 253 }
@@ -260,7 +257,7 @@ static int wl1271_probe(struct sdio_func *func,
260 dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags); 257 dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
261 258
262 if (mmcflags & MMC_PM_KEEP_POWER) 259 if (mmcflags & MMC_PM_KEEP_POWER)
263 pdev_data->pdata->pwr_in_suspend = true; 260 pdev_data.pdata->pwr_in_suspend = true;
264 261
265 sdio_set_drvdata(func, glue); 262 sdio_set_drvdata(func, glue);
266 263
@@ -289,7 +286,7 @@ static int wl1271_probe(struct sdio_func *func,
289 286
290 memset(res, 0x00, sizeof(res)); 287 memset(res, 0x00, sizeof(res));
291 288
292 res[0].start = pdev_data->pdata->irq; 289 res[0].start = pdev_data.pdata->irq;
293 res[0].flags = IORESOURCE_IRQ; 290 res[0].flags = IORESOURCE_IRQ;
294 res[0].name = "irq"; 291 res[0].name = "irq";
295 292
@@ -299,8 +296,8 @@ static int wl1271_probe(struct sdio_func *func,
299 goto out_dev_put; 296 goto out_dev_put;
300 } 297 }
301 298
302 ret = platform_device_add_data(glue->core, pdev_data, 299 ret = platform_device_add_data(glue->core, &pdev_data,
303 sizeof(*pdev_data)); 300 sizeof(pdev_data));
304 if (ret) { 301 if (ret) {
305 dev_err(glue->dev, "can't add platform data\n"); 302 dev_err(glue->dev, "can't add platform data\n");
306 goto out_dev_put; 303 goto out_dev_put;
@@ -319,9 +316,6 @@ out_dev_put:
319out_free_glue: 316out_free_glue:
320 kfree(glue); 317 kfree(glue);
321 318
322out_free_pdev_data:
323 kfree(pdev_data);
324
325out: 319out:
326 return ret; 320 return ret;
327} 321}
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index dbe826dd7c23..392c882b28f0 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -24,11 +24,12 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/swab.h>
27#include <linux/crc7.h> 29#include <linux/crc7.h>
28#include <linux/spi/spi.h> 30#include <linux/spi/spi.h>
29#include <linux/wl12xx.h> 31#include <linux/wl12xx.h>
30#include <linux/platform_device.h> 32#include <linux/platform_device.h>
31#include <linux/slab.h>
32 33
33#include "wlcore.h" 34#include "wlcore.h"
34#include "wl12xx_80211.h" 35#include "wl12xx_80211.h"
@@ -110,18 +111,16 @@ static void wl12xx_spi_reset(struct device *child)
110static void wl12xx_spi_init(struct device *child) 111static void wl12xx_spi_init(struct device *child)
111{ 112{
112 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); 113 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
113 u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
114 struct spi_transfer t; 114 struct spi_transfer t;
115 struct spi_message m; 115 struct spi_message m;
116 u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
116 117
117 cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
118 if (!cmd) { 118 if (!cmd) {
119 dev_err(child->parent, 119 dev_err(child->parent,
120 "could not allocate cmd for spi init\n"); 120 "could not allocate cmd for spi init\n");
121 return; 121 return;
122 } 122 }
123 123
124 memset(crc, 0, sizeof(crc));
125 memset(&t, 0, sizeof(t)); 124 memset(&t, 0, sizeof(t));
126 spi_message_init(&m); 125 spi_message_init(&m);
127 126
@@ -129,30 +128,29 @@ static void wl12xx_spi_init(struct device *child)
129 * Set WSPI_INIT_COMMAND 128 * Set WSPI_INIT_COMMAND
130 * the data is being send from the MSB to LSB 129 * the data is being send from the MSB to LSB
131 */ 130 */
132 cmd[2] = 0xff; 131 cmd[0] = 0xff;
133 cmd[3] = 0xff; 132 cmd[1] = 0xff;
134 cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX; 133 cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
135 cmd[0] = 0; 134 cmd[3] = 0;
136 cmd[7] = 0; 135 cmd[4] = 0;
137 cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3; 136 cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
138 cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN; 137 cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
138
139 cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
140 | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
139 141
140 if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0) 142 if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0)
141 cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY; 143 cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
142 else 144 else
143 cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY; 145 cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
144
145 cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
146 | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
147
148 crc[0] = cmd[1];
149 crc[1] = cmd[0];
150 crc[2] = cmd[7];
151 crc[3] = cmd[6];
152 crc[4] = cmd[5];
153 146
154 cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1; 147 cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END;
155 cmd[4] |= WSPI_INIT_CMD_END; 148 /*
149 * The above is the logical order; it must actually be stored
150 * in the buffer byte-swapped.
151 */
152 __swab32s((u32 *)cmd);
153 __swab32s((u32 *)cmd+1);
156 154
157 t.tx_buf = cmd; 155 t.tx_buf = cmd;
158 t.len = WSPI_INIT_CMD_LEN; 156 t.len = WSPI_INIT_CMD_LEN;
@@ -327,27 +325,25 @@ static struct wl1271_if_operations spi_ops = {
327static int wl1271_probe(struct spi_device *spi) 325static int wl1271_probe(struct spi_device *spi)
328{ 326{
329 struct wl12xx_spi_glue *glue; 327 struct wl12xx_spi_glue *glue;
330 struct wlcore_platdev_data *pdev_data; 328 struct wlcore_platdev_data pdev_data;
331 struct resource res[1]; 329 struct resource res[1];
332 int ret = -ENOMEM; 330 int ret = -ENOMEM;
333 331
334 pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL); 332 memset(&pdev_data, 0x00, sizeof(pdev_data));
335 if (!pdev_data)
336 goto out;
337 333
338 pdev_data->pdata = dev_get_platdata(&spi->dev); 334 pdev_data.pdata = dev_get_platdata(&spi->dev);
339 if (!pdev_data->pdata) { 335 if (!pdev_data.pdata) {
340 dev_err(&spi->dev, "no platform data\n"); 336 dev_err(&spi->dev, "no platform data\n");
341 ret = -ENODEV; 337 ret = -ENODEV;
342 goto out_free_pdev_data; 338 goto out;
343 } 339 }
344 340
345 pdev_data->if_ops = &spi_ops; 341 pdev_data.if_ops = &spi_ops;
346 342
347 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 343 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
348 if (!glue) { 344 if (!glue) {
349 dev_err(&spi->dev, "can't allocate glue\n"); 345 dev_err(&spi->dev, "can't allocate glue\n");
350 goto out_free_pdev_data; 346 goto out;
351 } 347 }
352 348
353 glue->dev = &spi->dev; 349 glue->dev = &spi->dev;
@@ -385,8 +381,8 @@ static int wl1271_probe(struct spi_device *spi)
385 goto out_dev_put; 381 goto out_dev_put;
386 } 382 }
387 383
388 ret = platform_device_add_data(glue->core, pdev_data, 384 ret = platform_device_add_data(glue->core, &pdev_data,
389 sizeof(*pdev_data)); 385 sizeof(pdev_data));
390 if (ret) { 386 if (ret) {
391 dev_err(glue->dev, "can't add platform data\n"); 387 dev_err(glue->dev, "can't add platform data\n");
392 goto out_dev_put; 388 goto out_dev_put;
@@ -406,9 +402,6 @@ out_dev_put:
406out_free_glue: 402out_free_glue:
407 kfree(glue); 403 kfree(glue);
408 404
409out_free_pdev_data:
410 kfree(pdev_data);
411
412out: 405out:
413 return ret; 406 return ret;
414} 407}
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 756e890bc5ee..c2c34a84ff3d 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -512,8 +512,8 @@ int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
512void wl12xx_queue_recovery_work(struct wl1271 *wl); 512void wl12xx_queue_recovery_work(struct wl1271 *wl);
513size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen); 513size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
514int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter, 514int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
515 u16 offset, u8 flags, 515 u16 offset, u8 flags,
516 u8 *pattern, u8 len); 516 const u8 *pattern, u8 len);
517void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter); 517void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter);
518struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void); 518struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void);
519int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter); 519int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 0d4a285cbd7e..4dd7c4a1923b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -99,22 +99,43 @@ struct xenvif_rx_meta {
99 */ 99 */
100#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN 100#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
101 101
102struct xenvif { 102/* Queue name is interface name with "-qNNN" appended */
103 /* Unique identifier for this interface. */ 103#define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
104 domid_t domid;
105 unsigned int handle;
106 104
107 /* Is this interface disabled? True when backend discovers 105/* IRQ name is queue name with "-tx" or "-rx" appended */
108 * frontend is rogue. 106#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
107
108struct xenvif;
109
110struct xenvif_stats {
111 /* Stats fields to be updated per-queue.
112 * A subset of struct net_device_stats that contains only the
113 * fields that are updated in netback.c for each queue.
109 */ 114 */
110 bool disabled; 115 unsigned int rx_bytes;
116 unsigned int rx_packets;
117 unsigned int tx_bytes;
118 unsigned int tx_packets;
119
120 /* Additional stats used by xenvif */
121 unsigned long rx_gso_checksum_fixup;
122 unsigned long tx_zerocopy_sent;
123 unsigned long tx_zerocopy_success;
124 unsigned long tx_zerocopy_fail;
125 unsigned long tx_frag_overflow;
126};
127
128struct xenvif_queue { /* Per-queue data for xenvif */
129 unsigned int id; /* Queue ID, 0-based */
130 char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
131 struct xenvif *vif; /* Parent VIF */
111 132
112 /* Use NAPI for guest TX */ 133 /* Use NAPI for guest TX */
113 struct napi_struct napi; 134 struct napi_struct napi;
114 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ 135 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
115 unsigned int tx_irq; 136 unsigned int tx_irq;
116 /* Only used when feature-split-event-channels = 1 */ 137 /* Only used when feature-split-event-channels = 1 */
117 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */ 138 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
118 struct xen_netif_tx_back_ring tx; 139 struct xen_netif_tx_back_ring tx;
119 struct sk_buff_head tx_queue; 140 struct sk_buff_head tx_queue;
120 struct page *mmap_pages[MAX_PENDING_REQS]; 141 struct page *mmap_pages[MAX_PENDING_REQS];
@@ -150,7 +171,7 @@ struct xenvif {
150 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ 171 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
151 unsigned int rx_irq; 172 unsigned int rx_irq;
152 /* Only used when feature-split-event-channels = 1 */ 173 /* Only used when feature-split-event-channels = 1 */
153 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ 174 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
154 struct xen_netif_rx_back_ring rx; 175 struct xen_netif_rx_back_ring rx;
155 struct sk_buff_head rx_queue; 176 struct sk_buff_head rx_queue;
156 RING_IDX rx_last_skb_slots; 177 RING_IDX rx_last_skb_slots;
@@ -158,14 +179,29 @@ struct xenvif {
158 179
159 struct timer_list wake_queue; 180 struct timer_list wake_queue;
160 181
161 /* This array is allocated seperately as it is large */ 182 struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
162 struct gnttab_copy *grant_copy_op;
163 183
164 /* We create one meta structure per ring request we consume, so 184 /* We create one meta structure per ring request we consume, so
165 * the maximum number is the same as the ring size. 185 * the maximum number is the same as the ring size.
166 */ 186 */
167 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; 187 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
168 188
189 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
190 unsigned long credit_bytes;
191 unsigned long credit_usec;
192 unsigned long remaining_credit;
193 struct timer_list credit_timeout;
194 u64 credit_window_start;
195
196 /* Statistics */
197 struct xenvif_stats stats;
198};
199
200struct xenvif {
201 /* Unique identifier for this interface. */
202 domid_t domid;
203 unsigned int handle;
204
169 u8 fe_dev_addr[6]; 205 u8 fe_dev_addr[6];
170 206
171 /* Frontend feature information. */ 207 /* Frontend feature information. */
@@ -179,19 +215,13 @@ struct xenvif {
179 /* Internal feature information. */ 215 /* Internal feature information. */
180 u8 can_queue:1; /* can queue packets for receiver? */ 216 u8 can_queue:1; /* can queue packets for receiver? */
181 217
182 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ 218 /* Is this interface disabled? True when backend discovers
183 unsigned long credit_bytes; 219 * frontend is rogue.
184 unsigned long credit_usec; 220 */
185 unsigned long remaining_credit; 221 bool disabled;
186 struct timer_list credit_timeout;
187 u64 credit_window_start;
188 222
189 /* Statistics */ 223 /* Queues */
190 unsigned long rx_gso_checksum_fixup; 224 struct xenvif_queue *queues;
191 unsigned long tx_zerocopy_sent;
192 unsigned long tx_zerocopy_success;
193 unsigned long tx_zerocopy_fail;
194 unsigned long tx_frag_overflow;
195 225
196 /* Miscellaneous private stuff. */ 226 /* Miscellaneous private stuff. */
197 struct net_device *dev; 227 struct net_device *dev;
@@ -206,7 +236,10 @@ struct xenvif *xenvif_alloc(struct device *parent,
206 domid_t domid, 236 domid_t domid,
207 unsigned int handle); 237 unsigned int handle);
208 238
209int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 239int xenvif_init_queue(struct xenvif_queue *queue);
240void xenvif_deinit_queue(struct xenvif_queue *queue);
241
242int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
210 unsigned long rx_ring_ref, unsigned int tx_evtchn, 243 unsigned long rx_ring_ref, unsigned int tx_evtchn,
211 unsigned int rx_evtchn); 244 unsigned int rx_evtchn);
212void xenvif_disconnect(struct xenvif *vif); 245void xenvif_disconnect(struct xenvif *vif);
@@ -217,44 +250,47 @@ void xenvif_xenbus_fini(void);
217 250
218int xenvif_schedulable(struct xenvif *vif); 251int xenvif_schedulable(struct xenvif *vif);
219 252
220int xenvif_must_stop_queue(struct xenvif *vif); 253int xenvif_must_stop_queue(struct xenvif_queue *queue);
254
255int xenvif_queue_stopped(struct xenvif_queue *queue);
256void xenvif_wake_queue(struct xenvif_queue *queue);
221 257
222/* (Un)Map communication rings. */ 258/* (Un)Map communication rings. */
223void xenvif_unmap_frontend_rings(struct xenvif *vif); 259void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
224int xenvif_map_frontend_rings(struct xenvif *vif, 260int xenvif_map_frontend_rings(struct xenvif_queue *queue,
225 grant_ref_t tx_ring_ref, 261 grant_ref_t tx_ring_ref,
226 grant_ref_t rx_ring_ref); 262 grant_ref_t rx_ring_ref);
227 263
228/* Check for SKBs from frontend and schedule backend processing */ 264/* Check for SKBs from frontend and schedule backend processing */
229void xenvif_napi_schedule_or_enable_events(struct xenvif *vif); 265void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
230 266
231/* Prevent the device from generating any further traffic. */ 267/* Prevent the device from generating any further traffic. */
232void xenvif_carrier_off(struct xenvif *vif); 268void xenvif_carrier_off(struct xenvif *vif);
233 269
234int xenvif_tx_action(struct xenvif *vif, int budget); 270int xenvif_tx_action(struct xenvif_queue *queue, int budget);
235 271
236int xenvif_kthread_guest_rx(void *data); 272int xenvif_kthread_guest_rx(void *data);
237void xenvif_kick_thread(struct xenvif *vif); 273void xenvif_kick_thread(struct xenvif_queue *queue);
238 274
239int xenvif_dealloc_kthread(void *data); 275int xenvif_dealloc_kthread(void *data);
240 276
241/* Determine whether the needed number of slots (req) are available, 277/* Determine whether the needed number of slots (req) are available,
242 * and set req_event if not. 278 * and set req_event if not.
243 */ 279 */
244bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed); 280bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed);
245 281
246void xenvif_stop_queue(struct xenvif *vif); 282void xenvif_carrier_on(struct xenvif *vif);
247 283
248/* Callback from stack when TX packet can be released */ 284/* Callback from stack when TX packet can be released */
249void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); 285void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
250 286
251/* Unmap a pending page and release it back to the guest */ 287/* Unmap a pending page and release it back to the guest */
252void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx); 288void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
253 289
254static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) 290static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
255{ 291{
256 return MAX_PENDING_REQS - 292 return MAX_PENDING_REQS -
257 vif->pending_prod + vif->pending_cons; 293 queue->pending_prod + queue->pending_cons;
258} 294}
259 295
260/* Callback from stack when TX packet can be released */ 296/* Callback from stack when TX packet can be released */
@@ -264,5 +300,6 @@ extern bool separate_tx_rx_irq;
264 300
265extern unsigned int rx_drain_timeout_msecs; 301extern unsigned int rx_drain_timeout_msecs;
266extern unsigned int rx_drain_timeout_jiffies; 302extern unsigned int rx_drain_timeout_jiffies;
303extern unsigned int xenvif_max_queues;
267 304
268#endif /* __XEN_NETBACK__COMMON_H__ */ 305#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 20e9defa1060..852da34b8961 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -43,6 +43,16 @@
43#define XENVIF_QUEUE_LENGTH 32 43#define XENVIF_QUEUE_LENGTH 32
44#define XENVIF_NAPI_WEIGHT 64 44#define XENVIF_NAPI_WEIGHT 64
45 45
46static inline void xenvif_stop_queue(struct xenvif_queue *queue)
47{
48 struct net_device *dev = queue->vif->dev;
49
50 if (!queue->vif->can_queue)
51 return;
52
53 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
54}
55
46int xenvif_schedulable(struct xenvif *vif) 56int xenvif_schedulable(struct xenvif *vif)
47{ 57{
48 return netif_running(vif->dev) && netif_carrier_ok(vif->dev); 58 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
@@ -50,33 +60,34 @@ int xenvif_schedulable(struct xenvif *vif)
50 60
51static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 61static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
52{ 62{
53 struct xenvif *vif = dev_id; 63 struct xenvif_queue *queue = dev_id;
54 64
55 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) 65 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
56 napi_schedule(&vif->napi); 66 napi_schedule(&queue->napi);
57 67
58 return IRQ_HANDLED; 68 return IRQ_HANDLED;
59} 69}
60 70
61static int xenvif_poll(struct napi_struct *napi, int budget) 71int xenvif_poll(struct napi_struct *napi, int budget)
62{ 72{
63 struct xenvif *vif = container_of(napi, struct xenvif, napi); 73 struct xenvif_queue *queue =
74 container_of(napi, struct xenvif_queue, napi);
64 int work_done; 75 int work_done;
65 76
66 /* This vif is rogue, we pretend we've there is nothing to do 77 /* This vif is rogue, we pretend we've there is nothing to do
67 * for this vif to deschedule it from NAPI. But this interface 78 * for this vif to deschedule it from NAPI. But this interface
68 * will be turned off in thread context later. 79 * will be turned off in thread context later.
69 */ 80 */
70 if (unlikely(vif->disabled)) { 81 if (unlikely(queue->vif->disabled)) {
71 napi_complete(napi); 82 napi_complete(napi);
72 return 0; 83 return 0;
73 } 84 }
74 85
75 work_done = xenvif_tx_action(vif, budget); 86 work_done = xenvif_tx_action(queue, budget);
76 87
77 if (work_done < budget) { 88 if (work_done < budget) {
78 napi_complete(napi); 89 napi_complete(napi);
79 xenvif_napi_schedule_or_enable_events(vif); 90 xenvif_napi_schedule_or_enable_events(queue);
80 } 91 }
81 92
82 return work_done; 93 return work_done;
@@ -84,9 +95,9 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
84 95
85static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 96static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
86{ 97{
87 struct xenvif *vif = dev_id; 98 struct xenvif_queue *queue = dev_id;
88 99
89 xenvif_kick_thread(vif); 100 xenvif_kick_thread(queue);
90 101
91 return IRQ_HANDLED; 102 return IRQ_HANDLED;
92} 103}
@@ -99,28 +110,80 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
99 return IRQ_HANDLED; 110 return IRQ_HANDLED;
100} 111}
101 112
102static void xenvif_wake_queue(unsigned long data) 113int xenvif_queue_stopped(struct xenvif_queue *queue)
103{ 114{
104 struct xenvif *vif = (struct xenvif *)data; 115 struct net_device *dev = queue->vif->dev;
116 unsigned int id = queue->id;
117 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
118}
105 119
106 if (netif_queue_stopped(vif->dev)) { 120void xenvif_wake_queue(struct xenvif_queue *queue)
107 netdev_err(vif->dev, "draining TX queue\n"); 121{
108 vif->rx_queue_purge = true; 122 struct net_device *dev = queue->vif->dev;
109 xenvif_kick_thread(vif); 123 unsigned int id = queue->id;
110 netif_wake_queue(vif->dev); 124 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
125}
126
127/* Callback to wake the queue and drain it on timeout */
128static void xenvif_wake_queue_callback(unsigned long data)
129{
130 struct xenvif_queue *queue = (struct xenvif_queue *)data;
131
132 if (xenvif_queue_stopped(queue)) {
133 netdev_err(queue->vif->dev, "draining TX queue\n");
134 queue->rx_queue_purge = true;
135 xenvif_kick_thread(queue);
136 xenvif_wake_queue(queue);
111 } 137 }
112} 138}
113 139
140static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
141 void *accel_priv, select_queue_fallback_t fallback)
142{
143 unsigned int num_queues = dev->real_num_tx_queues;
144 u32 hash;
145 u16 queue_index;
146
147 /* First, check if there is only one queue to optimise the
148 * single-queue or old frontend scenario.
149 */
150 if (num_queues == 1) {
151 queue_index = 0;
152 } else {
153 /* Use skb_get_hash to obtain an L4 hash if available */
154 hash = skb_get_hash(skb);
155 queue_index = hash % num_queues;
156 }
157
158 return queue_index;
159}
160
114static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 161static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
115{ 162{
116 struct xenvif *vif = netdev_priv(dev); 163 struct xenvif *vif = netdev_priv(dev);
164 struct xenvif_queue *queue = NULL;
165 unsigned int num_queues = dev->real_num_tx_queues;
166 u16 index;
117 int min_slots_needed; 167 int min_slots_needed;
118 168
119 BUG_ON(skb->dev != dev); 169 BUG_ON(skb->dev != dev);
120 170
121 /* Drop the packet if vif is not ready */ 171 /* Drop the packet if queues are not set up */
122 if (vif->task == NULL || 172 if (num_queues < 1)
123 vif->dealloc_task == NULL || 173 goto drop;
174
175 /* Obtain the queue to be used to transmit this packet */
176 index = skb_get_queue_mapping(skb);
177 if (index >= num_queues) {
178 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
179 index, vif->dev->name);
180 index %= num_queues;
181 }
182 queue = &vif->queues[index];
183
184 /* Drop the packet if queue is not ready */
185 if (queue->task == NULL ||
186 queue->dealloc_task == NULL ||
124 !xenvif_schedulable(vif)) 187 !xenvif_schedulable(vif))
125 goto drop; 188 goto drop;
126 189
@@ -139,16 +202,16 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
139 * then turn off the queue to give the ring a chance to 202 * then turn off the queue to give the ring a chance to
140 * drain. 203 * drain.
141 */ 204 */
142 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) { 205 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
143 vif->wake_queue.function = xenvif_wake_queue; 206 queue->wake_queue.function = xenvif_wake_queue_callback;
144 vif->wake_queue.data = (unsigned long)vif; 207 queue->wake_queue.data = (unsigned long)queue;
145 xenvif_stop_queue(vif); 208 xenvif_stop_queue(queue);
146 mod_timer(&vif->wake_queue, 209 mod_timer(&queue->wake_queue,
147 jiffies + rx_drain_timeout_jiffies); 210 jiffies + rx_drain_timeout_jiffies);
148 } 211 }
149 212
150 skb_queue_tail(&vif->rx_queue, skb); 213 skb_queue_tail(&queue->rx_queue, skb);
151 xenvif_kick_thread(vif); 214 xenvif_kick_thread(queue);
152 215
153 return NETDEV_TX_OK; 216 return NETDEV_TX_OK;
154 217
@@ -161,25 +224,65 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
161static struct net_device_stats *xenvif_get_stats(struct net_device *dev) 224static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
162{ 225{
163 struct xenvif *vif = netdev_priv(dev); 226 struct xenvif *vif = netdev_priv(dev);
227 struct xenvif_queue *queue = NULL;
228 unsigned int num_queues = dev->real_num_tx_queues;
229 unsigned long rx_bytes = 0;
230 unsigned long rx_packets = 0;
231 unsigned long tx_bytes = 0;
232 unsigned long tx_packets = 0;
233 unsigned int index;
234
235 if (vif->queues == NULL)
236 goto out;
237
238 /* Aggregate tx and rx stats from each queue */
239 for (index = 0; index < num_queues; ++index) {
240 queue = &vif->queues[index];
241 rx_bytes += queue->stats.rx_bytes;
242 rx_packets += queue->stats.rx_packets;
243 tx_bytes += queue->stats.tx_bytes;
244 tx_packets += queue->stats.tx_packets;
245 }
246
247out:
248 vif->dev->stats.rx_bytes = rx_bytes;
249 vif->dev->stats.rx_packets = rx_packets;
250 vif->dev->stats.tx_bytes = tx_bytes;
251 vif->dev->stats.tx_packets = tx_packets;
252
164 return &vif->dev->stats; 253 return &vif->dev->stats;
165} 254}
166 255
167static void xenvif_up(struct xenvif *vif) 256static void xenvif_up(struct xenvif *vif)
168{ 257{
169 napi_enable(&vif->napi); 258 struct xenvif_queue *queue = NULL;
170 enable_irq(vif->tx_irq); 259 unsigned int num_queues = vif->dev->real_num_tx_queues;
171 if (vif->tx_irq != vif->rx_irq) 260 unsigned int queue_index;
172 enable_irq(vif->rx_irq); 261
173 xenvif_napi_schedule_or_enable_events(vif); 262 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
263 queue = &vif->queues[queue_index];
264 napi_enable(&queue->napi);
265 enable_irq(queue->tx_irq);
266 if (queue->tx_irq != queue->rx_irq)
267 enable_irq(queue->rx_irq);
268 xenvif_napi_schedule_or_enable_events(queue);
269 }
174} 270}
175 271
176static void xenvif_down(struct xenvif *vif) 272static void xenvif_down(struct xenvif *vif)
177{ 273{
178 napi_disable(&vif->napi); 274 struct xenvif_queue *queue = NULL;
179 disable_irq(vif->tx_irq); 275 unsigned int num_queues = vif->dev->real_num_tx_queues;
180 if (vif->tx_irq != vif->rx_irq) 276 unsigned int queue_index;
181 disable_irq(vif->rx_irq); 277
182 del_timer_sync(&vif->credit_timeout); 278 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
279 queue = &vif->queues[queue_index];
280 napi_disable(&queue->napi);
281 disable_irq(queue->tx_irq);
282 if (queue->tx_irq != queue->rx_irq)
283 disable_irq(queue->rx_irq);
284 del_timer_sync(&queue->credit_timeout);
285 }
183} 286}
184 287
185static int xenvif_open(struct net_device *dev) 288static int xenvif_open(struct net_device *dev)
@@ -187,7 +290,7 @@ static int xenvif_open(struct net_device *dev)
187 struct xenvif *vif = netdev_priv(dev); 290 struct xenvif *vif = netdev_priv(dev);
188 if (netif_carrier_ok(dev)) 291 if (netif_carrier_ok(dev))
189 xenvif_up(vif); 292 xenvif_up(vif);
190 netif_start_queue(dev); 293 netif_tx_start_all_queues(dev);
191 return 0; 294 return 0;
192} 295}
193 296
@@ -196,7 +299,7 @@ static int xenvif_close(struct net_device *dev)
196 struct xenvif *vif = netdev_priv(dev); 299 struct xenvif *vif = netdev_priv(dev);
197 if (netif_carrier_ok(dev)) 300 if (netif_carrier_ok(dev))
198 xenvif_down(vif); 301 xenvif_down(vif);
199 netif_stop_queue(dev); 302 netif_tx_stop_all_queues(dev);
200 return 0; 303 return 0;
201} 304}
202 305
@@ -236,29 +339,29 @@ static const struct xenvif_stat {
236} xenvif_stats[] = { 339} xenvif_stats[] = {
237 { 340 {
238 "rx_gso_checksum_fixup", 341 "rx_gso_checksum_fixup",
239 offsetof(struct xenvif, rx_gso_checksum_fixup) 342 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
240 }, 343 },
241 /* If (sent != success + fail), there are probably packets never 344 /* If (sent != success + fail), there are probably packets never
242 * freed up properly! 345 * freed up properly!
243 */ 346 */
244 { 347 {
245 "tx_zerocopy_sent", 348 "tx_zerocopy_sent",
246 offsetof(struct xenvif, tx_zerocopy_sent), 349 offsetof(struct xenvif_stats, tx_zerocopy_sent),
247 }, 350 },
248 { 351 {
249 "tx_zerocopy_success", 352 "tx_zerocopy_success",
250 offsetof(struct xenvif, tx_zerocopy_success), 353 offsetof(struct xenvif_stats, tx_zerocopy_success),
251 }, 354 },
252 { 355 {
253 "tx_zerocopy_fail", 356 "tx_zerocopy_fail",
254 offsetof(struct xenvif, tx_zerocopy_fail) 357 offsetof(struct xenvif_stats, tx_zerocopy_fail)
255 }, 358 },
256 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use 359 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
257 * a guest with the same MAX_SKB_FRAG 360 * a guest with the same MAX_SKB_FRAG
258 */ 361 */
259 { 362 {
260 "tx_frag_overflow", 363 "tx_frag_overflow",
261 offsetof(struct xenvif, tx_frag_overflow) 364 offsetof(struct xenvif_stats, tx_frag_overflow)
262 }, 365 },
263}; 366};
264 367
@@ -275,11 +378,20 @@ static int xenvif_get_sset_count(struct net_device *dev, int string_set)
275static void xenvif_get_ethtool_stats(struct net_device *dev, 378static void xenvif_get_ethtool_stats(struct net_device *dev,
276 struct ethtool_stats *stats, u64 * data) 379 struct ethtool_stats *stats, u64 * data)
277{ 380{
278 void *vif = netdev_priv(dev); 381 struct xenvif *vif = netdev_priv(dev);
382 unsigned int num_queues = dev->real_num_tx_queues;
279 int i; 383 int i;
280 384 unsigned int queue_index;
281 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) 385 struct xenvif_stats *vif_stats;
282 data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset); 386
387 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
388 unsigned long accum = 0;
389 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
390 vif_stats = &vif->queues[queue_index].stats;
391 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
392 }
393 data[i] = accum;
394 }
283} 395}
284 396
285static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) 397static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -312,6 +424,7 @@ static const struct net_device_ops xenvif_netdev_ops = {
312 .ndo_fix_features = xenvif_fix_features, 424 .ndo_fix_features = xenvif_fix_features,
313 .ndo_set_mac_address = eth_mac_addr, 425 .ndo_set_mac_address = eth_mac_addr,
314 .ndo_validate_addr = eth_validate_addr, 426 .ndo_validate_addr = eth_validate_addr,
427 .ndo_select_queue = xenvif_select_queue,
315}; 428};
316 429
317struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, 430struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -321,10 +434,14 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
321 struct net_device *dev; 434 struct net_device *dev;
322 struct xenvif *vif; 435 struct xenvif *vif;
323 char name[IFNAMSIZ] = {}; 436 char name[IFNAMSIZ] = {};
324 int i;
325 437
326 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 438 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
327 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); 439 /* Allocate a netdev with the max. supported number of queues.
440 * When the guest selects the desired number, it will be updated
441 * via netif_set_real_num_tx_queues().
442 */
443 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
444 xenvif_max_queues);
328 if (dev == NULL) { 445 if (dev == NULL) {
329 pr_warn("Could not allocate netdev for %s\n", name); 446 pr_warn("Could not allocate netdev for %s\n", name);
330 return ERR_PTR(-ENOMEM); 447 return ERR_PTR(-ENOMEM);
@@ -334,66 +451,28 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
334 451
335 vif = netdev_priv(dev); 452 vif = netdev_priv(dev);
336 453
337 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
338 MAX_GRANT_COPY_OPS);
339 if (vif->grant_copy_op == NULL) {
340 pr_warn("Could not allocate grant copy space for %s\n", name);
341 free_netdev(dev);
342 return ERR_PTR(-ENOMEM);
343 }
344
345 vif->domid = domid; 454 vif->domid = domid;
346 vif->handle = handle; 455 vif->handle = handle;
347 vif->can_sg = 1; 456 vif->can_sg = 1;
348 vif->ip_csum = 1; 457 vif->ip_csum = 1;
349 vif->dev = dev; 458 vif->dev = dev;
350
351 vif->disabled = false; 459 vif->disabled = false;
352 460
353 vif->credit_bytes = vif->remaining_credit = ~0UL; 461 /* Start out with no queues. The call below does not require
354 vif->credit_usec = 0UL; 462 * rtnl_lock() as it happens before register_netdev().
355 init_timer(&vif->credit_timeout); 463 */
356 vif->credit_window_start = get_jiffies_64(); 464 vif->queues = NULL;
357 465 netif_set_real_num_tx_queues(dev, 0);
358 init_timer(&vif->wake_queue);
359 466
360 dev->netdev_ops = &xenvif_netdev_ops; 467 dev->netdev_ops = &xenvif_netdev_ops;
361 dev->hw_features = NETIF_F_SG | 468 dev->hw_features = NETIF_F_SG |
362 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 469 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
363 NETIF_F_TSO | NETIF_F_TSO6; 470 NETIF_F_TSO | NETIF_F_TSO6;
364 dev->features = dev->hw_features | NETIF_F_RXCSUM; 471 dev->features = dev->hw_features | NETIF_F_RXCSUM;
365 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); 472 dev->ethtool_ops = &xenvif_ethtool_ops;
366 473
367 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 474 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
368 475
369 skb_queue_head_init(&vif->rx_queue);
370 skb_queue_head_init(&vif->tx_queue);
371
372 vif->pending_cons = 0;
373 vif->pending_prod = MAX_PENDING_REQS;
374 for (i = 0; i < MAX_PENDING_REQS; i++)
375 vif->pending_ring[i] = i;
376 spin_lock_init(&vif->callback_lock);
377 spin_lock_init(&vif->response_lock);
378 /* If ballooning is disabled, this will consume real memory, so you
379 * better enable it. The long term solution would be to use just a
380 * bunch of valid page descriptors, without dependency on ballooning
381 */
382 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
383 vif->mmap_pages,
384 false);
385 if (err) {
386 netdev_err(dev, "Could not reserve mmap_pages\n");
387 return ERR_PTR(-ENOMEM);
388 }
389 for (i = 0; i < MAX_PENDING_REQS; i++) {
390 vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
391 { .callback = xenvif_zerocopy_callback,
392 .ctx = NULL,
393 .desc = i };
394 vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
395 }
396
397 /* 476 /*
398 * Initialise a dummy MAC address. We choose the numerically 477 * Initialise a dummy MAC address. We choose the numerically
399 * largest non-broadcast address to prevent the address getting 478 * largest non-broadcast address to prevent the address getting
@@ -403,8 +482,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
403 memset(dev->dev_addr, 0xFF, ETH_ALEN); 482 memset(dev->dev_addr, 0xFF, ETH_ALEN);
404 dev->dev_addr[0] &= ~0x01; 483 dev->dev_addr[0] &= ~0x01;
405 484
406 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
407
408 netif_carrier_off(dev); 485 netif_carrier_off(dev);
409 486
410 err = register_netdev(dev); 487 err = register_netdev(dev);
@@ -421,98 +498,147 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
421 return vif; 498 return vif;
422} 499}
423 500
424int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 501int xenvif_init_queue(struct xenvif_queue *queue)
502{
503 int err, i;
504
505 queue->credit_bytes = queue->remaining_credit = ~0UL;
506 queue->credit_usec = 0UL;
507 init_timer(&queue->credit_timeout);
508 queue->credit_window_start = get_jiffies_64();
509
510 skb_queue_head_init(&queue->rx_queue);
511 skb_queue_head_init(&queue->tx_queue);
512
513 queue->pending_cons = 0;
514 queue->pending_prod = MAX_PENDING_REQS;
515 for (i = 0; i < MAX_PENDING_REQS; ++i)
516 queue->pending_ring[i] = i;
517
518 spin_lock_init(&queue->callback_lock);
519 spin_lock_init(&queue->response_lock);
520
521 /* If ballooning is disabled, this will consume real memory, so you
522 * better enable it. The long term solution would be to use just a
523 * bunch of valid page descriptors, without dependency on ballooning
524 */
525 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
526 queue->mmap_pages,
527 false);
528 if (err) {
529 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
530 return -ENOMEM;
531 }
532
533 for (i = 0; i < MAX_PENDING_REQS; i++) {
534 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
535 { .callback = xenvif_zerocopy_callback,
536 .ctx = NULL,
537 .desc = i };
538 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
539 }
540
541 init_timer(&queue->wake_queue);
542
543 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
544 XENVIF_NAPI_WEIGHT);
545
546 return 0;
547}
548
549void xenvif_carrier_on(struct xenvif *vif)
550{
551 rtnl_lock();
552 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
553 dev_set_mtu(vif->dev, ETH_DATA_LEN);
554 netdev_update_features(vif->dev);
555 netif_carrier_on(vif->dev);
556 if (netif_running(vif->dev))
557 xenvif_up(vif);
558 rtnl_unlock();
559}
560
561int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
425 unsigned long rx_ring_ref, unsigned int tx_evtchn, 562 unsigned long rx_ring_ref, unsigned int tx_evtchn,
426 unsigned int rx_evtchn) 563 unsigned int rx_evtchn)
427{ 564{
428 struct task_struct *task; 565 struct task_struct *task;
429 int err = -ENOMEM; 566 int err = -ENOMEM;
430 567
431 BUG_ON(vif->tx_irq); 568 BUG_ON(queue->tx_irq);
432 BUG_ON(vif->task); 569 BUG_ON(queue->task);
433 BUG_ON(vif->dealloc_task); 570 BUG_ON(queue->dealloc_task);
434 571
435 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 572 err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
436 if (err < 0) 573 if (err < 0)
437 goto err; 574 goto err;
438 575
439 init_waitqueue_head(&vif->wq); 576 init_waitqueue_head(&queue->wq);
440 init_waitqueue_head(&vif->dealloc_wq); 577 init_waitqueue_head(&queue->dealloc_wq);
441 578
442 if (tx_evtchn == rx_evtchn) { 579 if (tx_evtchn == rx_evtchn) {
443 /* feature-split-event-channels == 0 */ 580 /* feature-split-event-channels == 0 */
444 err = bind_interdomain_evtchn_to_irqhandler( 581 err = bind_interdomain_evtchn_to_irqhandler(
445 vif->domid, tx_evtchn, xenvif_interrupt, 0, 582 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
446 vif->dev->name, vif); 583 queue->name, queue);
447 if (err < 0) 584 if (err < 0)
448 goto err_unmap; 585 goto err_unmap;
449 vif->tx_irq = vif->rx_irq = err; 586 queue->tx_irq = queue->rx_irq = err;
450 disable_irq(vif->tx_irq); 587 disable_irq(queue->tx_irq);
451 } else { 588 } else {
452 /* feature-split-event-channels == 1 */ 589 /* feature-split-event-channels == 1 */
453 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name), 590 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
454 "%s-tx", vif->dev->name); 591 "%s-tx", queue->name);
455 err = bind_interdomain_evtchn_to_irqhandler( 592 err = bind_interdomain_evtchn_to_irqhandler(
456 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, 593 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
457 vif->tx_irq_name, vif); 594 queue->tx_irq_name, queue);
458 if (err < 0) 595 if (err < 0)
459 goto err_unmap; 596 goto err_unmap;
460 vif->tx_irq = err; 597 queue->tx_irq = err;
461 disable_irq(vif->tx_irq); 598 disable_irq(queue->tx_irq);
462 599
463 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name), 600 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
464 "%s-rx", vif->dev->name); 601 "%s-rx", queue->name);
465 err = bind_interdomain_evtchn_to_irqhandler( 602 err = bind_interdomain_evtchn_to_irqhandler(
466 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, 603 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
467 vif->rx_irq_name, vif); 604 queue->rx_irq_name, queue);
468 if (err < 0) 605 if (err < 0)
469 goto err_tx_unbind; 606 goto err_tx_unbind;
470 vif->rx_irq = err; 607 queue->rx_irq = err;
471 disable_irq(vif->rx_irq); 608 disable_irq(queue->rx_irq);
472 } 609 }
473 610
474 task = kthread_create(xenvif_kthread_guest_rx, 611 task = kthread_create(xenvif_kthread_guest_rx,
475 (void *)vif, "%s-guest-rx", vif->dev->name); 612 (void *)queue, "%s-guest-rx", queue->name);
476 if (IS_ERR(task)) { 613 if (IS_ERR(task)) {
477 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 614 pr_warn("Could not allocate kthread for %s\n", queue->name);
478 err = PTR_ERR(task); 615 err = PTR_ERR(task);
479 goto err_rx_unbind; 616 goto err_rx_unbind;
480 } 617 }
481 618 queue->task = task;
482 vif->task = task;
483 619
484 task = kthread_create(xenvif_dealloc_kthread, 620 task = kthread_create(xenvif_dealloc_kthread,
485 (void *)vif, "%s-dealloc", vif->dev->name); 621 (void *)queue, "%s-dealloc", queue->name);
486 if (IS_ERR(task)) { 622 if (IS_ERR(task)) {
487 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 623 pr_warn("Could not allocate kthread for %s\n", queue->name);
488 err = PTR_ERR(task); 624 err = PTR_ERR(task);
489 goto err_rx_unbind; 625 goto err_rx_unbind;
490 } 626 }
627 queue->dealloc_task = task;
491 628
492 vif->dealloc_task = task; 629 wake_up_process(queue->task);
493 630 wake_up_process(queue->dealloc_task);
494 rtnl_lock();
495 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
496 dev_set_mtu(vif->dev, ETH_DATA_LEN);
497 netdev_update_features(vif->dev);
498 netif_carrier_on(vif->dev);
499 if (netif_running(vif->dev))
500 xenvif_up(vif);
501 rtnl_unlock();
502
503 wake_up_process(vif->task);
504 wake_up_process(vif->dealloc_task);
505 631
506 return 0; 632 return 0;
507 633
508err_rx_unbind: 634err_rx_unbind:
509 unbind_from_irqhandler(vif->rx_irq, vif); 635 unbind_from_irqhandler(queue->rx_irq, queue);
510 vif->rx_irq = 0; 636 queue->rx_irq = 0;
511err_tx_unbind: 637err_tx_unbind:
512 unbind_from_irqhandler(vif->tx_irq, vif); 638 unbind_from_irqhandler(queue->tx_irq, queue);
513 vif->tx_irq = 0; 639 queue->tx_irq = 0;
514err_unmap: 640err_unmap:
515 xenvif_unmap_frontend_rings(vif); 641 xenvif_unmap_frontend_rings(queue);
516err: 642err:
517 module_put(THIS_MODULE); 643 module_put(THIS_MODULE);
518 return err; 644 return err;
@@ -529,38 +655,77 @@ void xenvif_carrier_off(struct xenvif *vif)
529 rtnl_unlock(); 655 rtnl_unlock();
530} 656}
531 657
658static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
659 unsigned int worst_case_skb_lifetime)
660{
661 int i, unmap_timeout = 0;
662
663 for (i = 0; i < MAX_PENDING_REQS; ++i) {
664 if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
665 unmap_timeout++;
666 schedule_timeout(msecs_to_jiffies(1000));
667 if (unmap_timeout > worst_case_skb_lifetime &&
668 net_ratelimit())
669 netdev_err(queue->vif->dev,
670 "Page still granted! Index: %x\n",
671 i);
672 i = -1;
673 }
674 }
675}
676
532void xenvif_disconnect(struct xenvif *vif) 677void xenvif_disconnect(struct xenvif *vif)
533{ 678{
679 struct xenvif_queue *queue = NULL;
680 unsigned int num_queues = vif->dev->real_num_tx_queues;
681 unsigned int queue_index;
682
534 if (netif_carrier_ok(vif->dev)) 683 if (netif_carrier_ok(vif->dev))
535 xenvif_carrier_off(vif); 684 xenvif_carrier_off(vif);
536 685
537 if (vif->task) { 686 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
538 del_timer_sync(&vif->wake_queue); 687 queue = &vif->queues[queue_index];
539 kthread_stop(vif->task);
540 vif->task = NULL;
541 }
542 688
543 if (vif->dealloc_task) { 689 if (queue->task) {
544 kthread_stop(vif->dealloc_task); 690 del_timer_sync(&queue->wake_queue);
545 vif->dealloc_task = NULL; 691 kthread_stop(queue->task);
546 } 692 queue->task = NULL;
693 }
547 694
548 if (vif->tx_irq) { 695 if (queue->dealloc_task) {
549 if (vif->tx_irq == vif->rx_irq) 696 kthread_stop(queue->dealloc_task);
550 unbind_from_irqhandler(vif->tx_irq, vif); 697 queue->dealloc_task = NULL;
551 else {
552 unbind_from_irqhandler(vif->tx_irq, vif);
553 unbind_from_irqhandler(vif->rx_irq, vif);
554 } 698 }
555 vif->tx_irq = 0; 699
700 if (queue->tx_irq) {
701 if (queue->tx_irq == queue->rx_irq)
702 unbind_from_irqhandler(queue->tx_irq, queue);
703 else {
704 unbind_from_irqhandler(queue->tx_irq, queue);
705 unbind_from_irqhandler(queue->rx_irq, queue);
706 }
707 queue->tx_irq = 0;
708 }
709
710 xenvif_unmap_frontend_rings(queue);
556 } 711 }
712}
557 713
558 xenvif_unmap_frontend_rings(vif); 714/* Reverse the relevant parts of xenvif_init_queue().
715 * Used for queue teardown from xenvif_free(), and on the
716 * error handling paths in xenbus.c:connect().
717 */
718void xenvif_deinit_queue(struct xenvif_queue *queue)
719{
720 free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
721 netif_napi_del(&queue->napi);
559} 722}
560 723
561void xenvif_free(struct xenvif *vif) 724void xenvif_free(struct xenvif *vif)
562{ 725{
563 int i, unmap_timeout = 0; 726 struct xenvif_queue *queue = NULL;
727 unsigned int num_queues = vif->dev->real_num_tx_queues;
728 unsigned int queue_index;
564 /* Here we want to avoid timeout messages if an skb can be legitimately 729 /* Here we want to avoid timeout messages if an skb can be legitimately
565 * stuck somewhere else. Realistically this could be an another vif's 730 * stuck somewhere else. Realistically this could be an another vif's
566 * internal or QDisc queue. That another vif also has this 731 * internal or QDisc queue. That another vif also has this
@@ -575,33 +740,21 @@ void xenvif_free(struct xenvif *vif)
575 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * 740 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
576 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); 741 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
577 742
578 for (i = 0; i < MAX_PENDING_REQS; ++i) { 743 unregister_netdev(vif->dev);
579 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
580 unmap_timeout++;
581 schedule_timeout(msecs_to_jiffies(1000));
582 if (unmap_timeout > worst_case_skb_lifetime &&
583 net_ratelimit())
584 netdev_err(vif->dev,
585 "Page still granted! Index: %x\n",
586 i);
587 /* If there are still unmapped pages, reset the loop to
588 * start checking again. We shouldn't exit here until
589 * dealloc thread and NAPI instance release all the
590 * pages. If a kernel bug causes the skbs to stall
591 * somewhere, the interface cannot be brought down
592 * properly.
593 */
594 i = -1;
595 }
596 }
597
598 free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
599 744
600 netif_napi_del(&vif->napi); 745 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
746 queue = &vif->queues[queue_index];
747 xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime);
748 xenvif_deinit_queue(queue);
749 }
601 750
602 unregister_netdev(vif->dev); 751 /* Free the array of queues. The call below does not require
752 * rtnl_lock() because it happens after unregister_netdev().
753 */
754 netif_set_real_num_tx_queues(vif->dev, 0);
755 vfree(vif->queues);
756 vif->queues = NULL;
603 757
604 vfree(vif->grant_copy_op);
605 free_netdev(vif->dev); 758 free_netdev(vif->dev);
606 759
607 module_put(THIS_MODULE); 760 module_put(THIS_MODULE);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 7367208ee8cd..1844a47636b6 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -62,6 +62,11 @@ unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444); 62module_param(rx_drain_timeout_msecs, uint, 0444);
63unsigned int rx_drain_timeout_jiffies; 63unsigned int rx_drain_timeout_jiffies;
64 64
65unsigned int xenvif_max_queues;
66module_param_named(max_queues, xenvif_max_queues, uint, 0644);
67MODULE_PARM_DESC(max_queues,
68 "Maximum number of queues per virtual interface");
69
65/* 70/*
66 * This is the maximum slots a skb can have. If a guest sends a skb 71 * This is the maximum slots a skb can have. If a guest sends a skb
67 * which exceeds this limit it is considered malicious. 72 * which exceeds this limit it is considered malicious.
@@ -70,33 +75,33 @@ unsigned int rx_drain_timeout_jiffies;
70static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; 75static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
71module_param(fatal_skb_slots, uint, 0444); 76module_param(fatal_skb_slots, uint, 0444);
72 77
73static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, 78static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
74 u8 status); 79 u8 status);
75 80
76static void make_tx_response(struct xenvif *vif, 81static void make_tx_response(struct xenvif_queue *queue,
77 struct xen_netif_tx_request *txp, 82 struct xen_netif_tx_request *txp,
78 s8 st); 83 s8 st);
79 84
80static inline int tx_work_todo(struct xenvif *vif); 85static inline int tx_work_todo(struct xenvif_queue *queue);
81static inline int rx_work_todo(struct xenvif *vif); 86static inline int rx_work_todo(struct xenvif_queue *queue);
82 87
83static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, 88static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
84 u16 id, 89 u16 id,
85 s8 st, 90 s8 st,
86 u16 offset, 91 u16 offset,
87 u16 size, 92 u16 size,
88 u16 flags); 93 u16 flags);
89 94
90static inline unsigned long idx_to_pfn(struct xenvif *vif, 95static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
91 u16 idx) 96 u16 idx)
92{ 97{
93 return page_to_pfn(vif->mmap_pages[idx]); 98 return page_to_pfn(queue->mmap_pages[idx]);
94} 99}
95 100
96static inline unsigned long idx_to_kaddr(struct xenvif *vif, 101static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
97 u16 idx) 102 u16 idx)
98{ 103{
99 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); 104 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
100} 105}
101 106
102#define callback_param(vif, pending_idx) \ 107#define callback_param(vif, pending_idx) \
@@ -104,13 +109,13 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
104 109
105/* Find the containing VIF's structure from a pointer in pending_tx_info array 110/* Find the containing VIF's structure from a pointer in pending_tx_info array
106 */ 111 */
107static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf) 112static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
108{ 113{
109 u16 pending_idx = ubuf->desc; 114 u16 pending_idx = ubuf->desc;
110 struct pending_tx_info *temp = 115 struct pending_tx_info *temp =
111 container_of(ubuf, struct pending_tx_info, callback_struct); 116 container_of(ubuf, struct pending_tx_info, callback_struct);
112 return container_of(temp - pending_idx, 117 return container_of(temp - pending_idx,
113 struct xenvif, 118 struct xenvif_queue,
114 pending_tx_info[0]); 119 pending_tx_info[0]);
115} 120}
116 121
@@ -136,24 +141,24 @@ static inline pending_ring_idx_t pending_index(unsigned i)
136 return i & (MAX_PENDING_REQS-1); 141 return i & (MAX_PENDING_REQS-1);
137} 142}
138 143
139bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed) 144bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
140{ 145{
141 RING_IDX prod, cons; 146 RING_IDX prod, cons;
142 147
143 do { 148 do {
144 prod = vif->rx.sring->req_prod; 149 prod = queue->rx.sring->req_prod;
145 cons = vif->rx.req_cons; 150 cons = queue->rx.req_cons;
146 151
147 if (prod - cons >= needed) 152 if (prod - cons >= needed)
148 return true; 153 return true;
149 154
150 vif->rx.sring->req_event = prod + 1; 155 queue->rx.sring->req_event = prod + 1;
151 156
152 /* Make sure event is visible before we check prod 157 /* Make sure event is visible before we check prod
153 * again. 158 * again.
154 */ 159 */
155 mb(); 160 mb();
156 } while (vif->rx.sring->req_prod != prod); 161 } while (queue->rx.sring->req_prod != prod);
157 162
158 return false; 163 return false;
159} 164}
@@ -163,7 +168,8 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
163 * adding 'size' bytes to a buffer which currently contains 'offset' 168 * adding 'size' bytes to a buffer which currently contains 'offset'
164 * bytes. 169 * bytes.
165 */ 170 */
166static bool start_new_rx_buffer(int offset, unsigned long size, int head) 171static bool start_new_rx_buffer(int offset, unsigned long size, int head,
172 bool full_coalesce)
167{ 173{
168 /* simple case: we have completely filled the current buffer. */ 174 /* simple case: we have completely filled the current buffer. */
169 if (offset == MAX_BUFFER_OFFSET) 175 if (offset == MAX_BUFFER_OFFSET)
@@ -175,6 +181,7 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
175 * (i) this frag would fit completely in the next buffer 181 * (i) this frag would fit completely in the next buffer
176 * and (ii) there is already some data in the current buffer 182 * and (ii) there is already some data in the current buffer
177 * and (iii) this is not the head buffer. 183 * and (iii) this is not the head buffer.
184 * and (iv) there is no need to fully utilize the buffers
178 * 185 *
179 * Where: 186 * Where:
180 * - (i) stops us splitting a frag into two copies 187 * - (i) stops us splitting a frag into two copies
@@ -185,6 +192,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
185 * by (ii) but is explicitly checked because 192 * by (ii) but is explicitly checked because
186 * netfront relies on the first buffer being 193 * netfront relies on the first buffer being
187 * non-empty and can crash otherwise. 194 * non-empty and can crash otherwise.
195 * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
196 * slot
188 * 197 *
189 * This means we will effectively linearise small 198 * This means we will effectively linearise small
190 * frags but do not needlessly split large buffers 199 * frags but do not needlessly split large buffers
@@ -192,7 +201,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
192 * own buffers as before. 201 * own buffers as before.
193 */ 202 */
194 BUG_ON(size > MAX_BUFFER_OFFSET); 203 BUG_ON(size > MAX_BUFFER_OFFSET);
195 if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head) 204 if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
205 !full_coalesce)
196 return true; 206 return true;
197 207
198 return false; 208 return false;
@@ -207,13 +217,13 @@ struct netrx_pending_operations {
207 grant_ref_t copy_gref; 217 grant_ref_t copy_gref;
208}; 218};
209 219
210static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, 220static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
211 struct netrx_pending_operations *npo) 221 struct netrx_pending_operations *npo)
212{ 222{
213 struct xenvif_rx_meta *meta; 223 struct xenvif_rx_meta *meta;
214 struct xen_netif_rx_request *req; 224 struct xen_netif_rx_request *req;
215 225
216 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 226 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
217 227
218 meta = npo->meta + npo->meta_prod++; 228 meta = npo->meta + npo->meta_prod++;
219 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 229 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
@@ -227,15 +237,22 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
227 return meta; 237 return meta;
228} 238}
229 239
240struct xenvif_rx_cb {
241 int meta_slots_used;
242 bool full_coalesce;
243};
244
245#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
246
230/* 247/*
231 * Set up the grant operations for this fragment. If it's a flipping 248 * Set up the grant operations for this fragment. If it's a flipping
232 * interface, we also set up the unmap request from here. 249 * interface, we also set up the unmap request from here.
233 */ 250 */
234static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, 251static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
235 struct netrx_pending_operations *npo, 252 struct netrx_pending_operations *npo,
236 struct page *page, unsigned long size, 253 struct page *page, unsigned long size,
237 unsigned long offset, int *head, 254 unsigned long offset, int *head,
238 struct xenvif *foreign_vif, 255 struct xenvif_queue *foreign_queue,
239 grant_ref_t foreign_gref) 256 grant_ref_t foreign_gref)
240{ 257{
241 struct gnttab_copy *copy_gop; 258 struct gnttab_copy *copy_gop;
@@ -261,14 +278,17 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
261 if (bytes > size) 278 if (bytes > size)
262 bytes = size; 279 bytes = size;
263 280
264 if (start_new_rx_buffer(npo->copy_off, bytes, *head)) { 281 if (start_new_rx_buffer(npo->copy_off,
282 bytes,
283 *head,
284 XENVIF_RX_CB(skb)->full_coalesce)) {
265 /* 285 /*
266 * Netfront requires there to be some data in the head 286 * Netfront requires there to be some data in the head
267 * buffer. 287 * buffer.
268 */ 288 */
269 BUG_ON(*head); 289 BUG_ON(*head);
270 290
271 meta = get_next_rx_buffer(vif, npo); 291 meta = get_next_rx_buffer(queue, npo);
272 } 292 }
273 293
274 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) 294 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
@@ -278,8 +298,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
278 copy_gop->flags = GNTCOPY_dest_gref; 298 copy_gop->flags = GNTCOPY_dest_gref;
279 copy_gop->len = bytes; 299 copy_gop->len = bytes;
280 300
281 if (foreign_vif) { 301 if (foreign_queue) {
282 copy_gop->source.domid = foreign_vif->domid; 302 copy_gop->source.domid = foreign_queue->vif->domid;
283 copy_gop->source.u.ref = foreign_gref; 303 copy_gop->source.u.ref = foreign_gref;
284 copy_gop->flags |= GNTCOPY_source_gref; 304 copy_gop->flags |= GNTCOPY_source_gref;
285 } else { 305 } else {
@@ -289,7 +309,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
289 } 309 }
290 copy_gop->source.offset = offset; 310 copy_gop->source.offset = offset;
291 311
292 copy_gop->dest.domid = vif->domid; 312 copy_gop->dest.domid = queue->vif->domid;
293 copy_gop->dest.offset = npo->copy_off; 313 copy_gop->dest.offset = npo->copy_off;
294 copy_gop->dest.u.ref = npo->copy_gref; 314 copy_gop->dest.u.ref = npo->copy_gref;
295 315
@@ -314,8 +334,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
314 gso_type = XEN_NETIF_GSO_TYPE_TCPV6; 334 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
315 } 335 }
316 336
317 if (*head && ((1 << gso_type) & vif->gso_mask)) 337 if (*head && ((1 << gso_type) & queue->vif->gso_mask))
318 vif->rx.req_cons++; 338 queue->rx.req_cons++;
319 339
320 *head = 0; /* There must be something in this buffer now. */ 340 *head = 0; /* There must be something in this buffer now. */
321 341
@@ -337,13 +357,13 @@ static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
337 const int i, 357 const int i,
338 const struct ubuf_info *ubuf) 358 const struct ubuf_info *ubuf)
339{ 359{
340 struct xenvif *foreign_vif = ubuf_to_vif(ubuf); 360 struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
341 361
342 do { 362 do {
343 u16 pending_idx = ubuf->desc; 363 u16 pending_idx = ubuf->desc;
344 364
345 if (skb_shinfo(skb)->frags[i].page.p == 365 if (skb_shinfo(skb)->frags[i].page.p ==
346 foreign_vif->mmap_pages[pending_idx]) 366 foreign_queue->mmap_pages[pending_idx])
347 break; 367 break;
348 ubuf = (struct ubuf_info *) ubuf->ctx; 368 ubuf = (struct ubuf_info *) ubuf->ctx;
349 } while (ubuf); 369 } while (ubuf);
@@ -364,7 +384,8 @@ static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
364 * frontend-side LRO). 384 * frontend-side LRO).
365 */ 385 */
366static int xenvif_gop_skb(struct sk_buff *skb, 386static int xenvif_gop_skb(struct sk_buff *skb,
367 struct netrx_pending_operations *npo) 387 struct netrx_pending_operations *npo,
388 struct xenvif_queue *queue)
368{ 389{
369 struct xenvif *vif = netdev_priv(skb->dev); 390 struct xenvif *vif = netdev_priv(skb->dev);
370 int nr_frags = skb_shinfo(skb)->nr_frags; 391 int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -390,7 +411,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
390 411
391 /* Set up a GSO prefix descriptor, if necessary */ 412 /* Set up a GSO prefix descriptor, if necessary */
392 if ((1 << gso_type) & vif->gso_prefix_mask) { 413 if ((1 << gso_type) & vif->gso_prefix_mask) {
393 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 414 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
394 meta = npo->meta + npo->meta_prod++; 415 meta = npo->meta + npo->meta_prod++;
395 meta->gso_type = gso_type; 416 meta->gso_type = gso_type;
396 meta->gso_size = skb_shinfo(skb)->gso_size; 417 meta->gso_size = skb_shinfo(skb)->gso_size;
@@ -398,7 +419,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
398 meta->id = req->id; 419 meta->id = req->id;
399 } 420 }
400 421
401 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 422 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
402 meta = npo->meta + npo->meta_prod++; 423 meta = npo->meta + npo->meta_prod++;
403 424
404 if ((1 << gso_type) & vif->gso_mask) { 425 if ((1 << gso_type) & vif->gso_mask) {
@@ -422,7 +443,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
422 if (data + len > skb_tail_pointer(skb)) 443 if (data + len > skb_tail_pointer(skb))
423 len = skb_tail_pointer(skb) - data; 444 len = skb_tail_pointer(skb) - data;
424 445
425 xenvif_gop_frag_copy(vif, skb, npo, 446 xenvif_gop_frag_copy(queue, skb, npo,
426 virt_to_page(data), len, offset, &head, 447 virt_to_page(data), len, offset, &head,
427 NULL, 448 NULL,
428 0); 449 0);
@@ -433,7 +454,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
433 /* This variable also signals whether foreign_gref has a real 454 /* This variable also signals whether foreign_gref has a real
434 * value or not. 455 * value or not.
435 */ 456 */
436 struct xenvif *foreign_vif = NULL; 457 struct xenvif_queue *foreign_queue = NULL;
437 grant_ref_t foreign_gref; 458 grant_ref_t foreign_gref;
438 459
439 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && 460 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
@@ -458,8 +479,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
458 if (likely(ubuf)) { 479 if (likely(ubuf)) {
459 u16 pending_idx = ubuf->desc; 480 u16 pending_idx = ubuf->desc;
460 481
461 foreign_vif = ubuf_to_vif(ubuf); 482 foreign_queue = ubuf_to_queue(ubuf);
462 foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref; 483 foreign_gref =
484 foreign_queue->pending_tx_info[pending_idx].req.gref;
463 /* Just a safety measure. If this was the last 485 /* Just a safety measure. If this was the last
464 * element on the list, the for loop will 486 * element on the list, the for loop will
465 * iterate again if a local page were added to 487 * iterate again if a local page were added to
@@ -477,13 +499,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
477 */ 499 */
478 ubuf = head_ubuf; 500 ubuf = head_ubuf;
479 } 501 }
480 xenvif_gop_frag_copy(vif, skb, npo, 502 xenvif_gop_frag_copy(queue, skb, npo,
481 skb_frag_page(&skb_shinfo(skb)->frags[i]), 503 skb_frag_page(&skb_shinfo(skb)->frags[i]),
482 skb_frag_size(&skb_shinfo(skb)->frags[i]), 504 skb_frag_size(&skb_shinfo(skb)->frags[i]),
483 skb_shinfo(skb)->frags[i].page_offset, 505 skb_shinfo(skb)->frags[i].page_offset,
484 &head, 506 &head,
485 foreign_vif, 507 foreign_queue,
486 foreign_vif ? foreign_gref : UINT_MAX); 508 foreign_queue ? foreign_gref : UINT_MAX);
487 } 509 }
488 510
489 return npo->meta_prod - old_meta_prod; 511 return npo->meta_prod - old_meta_prod;
@@ -515,7 +537,7 @@ static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
515 return status; 537 return status;
516} 538}
517 539
518static void xenvif_add_frag_responses(struct xenvif *vif, int status, 540static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
519 struct xenvif_rx_meta *meta, 541 struct xenvif_rx_meta *meta,
520 int nr_meta_slots) 542 int nr_meta_slots)
521{ 543{
@@ -536,23 +558,17 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
536 flags = XEN_NETRXF_more_data; 558 flags = XEN_NETRXF_more_data;
537 559
538 offset = 0; 560 offset = 0;
539 make_rx_response(vif, meta[i].id, status, offset, 561 make_rx_response(queue, meta[i].id, status, offset,
540 meta[i].size, flags); 562 meta[i].size, flags);
541 } 563 }
542} 564}
543 565
544struct xenvif_rx_cb { 566void xenvif_kick_thread(struct xenvif_queue *queue)
545 int meta_slots_used;
546};
547
548#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
549
550void xenvif_kick_thread(struct xenvif *vif)
551{ 567{
552 wake_up(&vif->wq); 568 wake_up(&queue->wq);
553} 569}
554 570
555static void xenvif_rx_action(struct xenvif *vif) 571static void xenvif_rx_action(struct xenvif_queue *queue)
556{ 572{
557 s8 status; 573 s8 status;
558 u16 flags; 574 u16 flags;
@@ -565,13 +581,13 @@ static void xenvif_rx_action(struct xenvif *vif)
565 bool need_to_notify = false; 581 bool need_to_notify = false;
566 582
567 struct netrx_pending_operations npo = { 583 struct netrx_pending_operations npo = {
568 .copy = vif->grant_copy_op, 584 .copy = queue->grant_copy_op,
569 .meta = vif->meta, 585 .meta = queue->meta,
570 }; 586 };
571 587
572 skb_queue_head_init(&rxq); 588 skb_queue_head_init(&rxq);
573 589
574 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { 590 while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) {
575 RING_IDX max_slots_needed; 591 RING_IDX max_slots_needed;
576 RING_IDX old_req_cons; 592 RING_IDX old_req_cons;
577 RING_IDX ring_slots_used; 593 RING_IDX ring_slots_used;
@@ -602,10 +618,15 @@ static void xenvif_rx_action(struct xenvif *vif)
602 618
603 /* To avoid the estimate becoming too pessimal for some 619 /* To avoid the estimate becoming too pessimal for some
604 * frontends that limit posted rx requests, cap the estimate 620 * frontends that limit posted rx requests, cap the estimate
605 * at MAX_SKB_FRAGS. 621 * at MAX_SKB_FRAGS. In this case netback will fully coalesce
622 * the skb into the provided slots.
606 */ 623 */
607 if (max_slots_needed > MAX_SKB_FRAGS) 624 if (max_slots_needed > MAX_SKB_FRAGS) {
608 max_slots_needed = MAX_SKB_FRAGS; 625 max_slots_needed = MAX_SKB_FRAGS;
626 XENVIF_RX_CB(skb)->full_coalesce = true;
627 } else {
628 XENVIF_RX_CB(skb)->full_coalesce = false;
629 }
609 630
610 /* We may need one more slot for GSO metadata */ 631 /* We may need one more slot for GSO metadata */
611 if (skb_is_gso(skb) && 632 if (skb_is_gso(skb) &&
@@ -614,42 +635,42 @@ static void xenvif_rx_action(struct xenvif *vif)
614 max_slots_needed++; 635 max_slots_needed++;
615 636
616 /* If the skb may not fit then bail out now */ 637 /* If the skb may not fit then bail out now */
617 if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { 638 if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) {
618 skb_queue_head(&vif->rx_queue, skb); 639 skb_queue_head(&queue->rx_queue, skb);
619 need_to_notify = true; 640 need_to_notify = true;
620 vif->rx_last_skb_slots = max_slots_needed; 641 queue->rx_last_skb_slots = max_slots_needed;
621 break; 642 break;
622 } else 643 } else
623 vif->rx_last_skb_slots = 0; 644 queue->rx_last_skb_slots = 0;
624 645
625 old_req_cons = vif->rx.req_cons; 646 old_req_cons = queue->rx.req_cons;
626 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo); 647 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
627 ring_slots_used = vif->rx.req_cons - old_req_cons; 648 ring_slots_used = queue->rx.req_cons - old_req_cons;
628 649
629 BUG_ON(ring_slots_used > max_slots_needed); 650 BUG_ON(ring_slots_used > max_slots_needed);
630 651
631 __skb_queue_tail(&rxq, skb); 652 __skb_queue_tail(&rxq, skb);
632 } 653 }
633 654
634 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); 655 BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
635 656
636 if (!npo.copy_prod) 657 if (!npo.copy_prod)
637 goto done; 658 goto done;
638 659
639 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); 660 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
640 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); 661 gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
641 662
642 while ((skb = __skb_dequeue(&rxq)) != NULL) { 663 while ((skb = __skb_dequeue(&rxq)) != NULL) {
643 664
644 if ((1 << vif->meta[npo.meta_cons].gso_type) & 665 if ((1 << queue->meta[npo.meta_cons].gso_type) &
645 vif->gso_prefix_mask) { 666 queue->vif->gso_prefix_mask) {
646 resp = RING_GET_RESPONSE(&vif->rx, 667 resp = RING_GET_RESPONSE(&queue->rx,
647 vif->rx.rsp_prod_pvt++); 668 queue->rx.rsp_prod_pvt++);
648 669
649 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; 670 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
650 671
651 resp->offset = vif->meta[npo.meta_cons].gso_size; 672 resp->offset = queue->meta[npo.meta_cons].gso_size;
652 resp->id = vif->meta[npo.meta_cons].id; 673 resp->id = queue->meta[npo.meta_cons].id;
653 resp->status = XENVIF_RX_CB(skb)->meta_slots_used; 674 resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
654 675
655 npo.meta_cons++; 676 npo.meta_cons++;
@@ -657,10 +678,10 @@ static void xenvif_rx_action(struct xenvif *vif)
657 } 678 }
658 679
659 680
660 vif->dev->stats.tx_bytes += skb->len; 681 queue->stats.tx_bytes += skb->len;
661 vif->dev->stats.tx_packets++; 682 queue->stats.tx_packets++;
662 683
663 status = xenvif_check_gop(vif, 684 status = xenvif_check_gop(queue->vif,
664 XENVIF_RX_CB(skb)->meta_slots_used, 685 XENVIF_RX_CB(skb)->meta_slots_used,
665 &npo); 686 &npo);
666 687
@@ -676,22 +697,22 @@ static void xenvif_rx_action(struct xenvif *vif)
676 flags |= XEN_NETRXF_data_validated; 697 flags |= XEN_NETRXF_data_validated;
677 698
678 offset = 0; 699 offset = 0;
679 resp = make_rx_response(vif, vif->meta[npo.meta_cons].id, 700 resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
680 status, offset, 701 status, offset,
681 vif->meta[npo.meta_cons].size, 702 queue->meta[npo.meta_cons].size,
682 flags); 703 flags);
683 704
684 if ((1 << vif->meta[npo.meta_cons].gso_type) & 705 if ((1 << queue->meta[npo.meta_cons].gso_type) &
685 vif->gso_mask) { 706 queue->vif->gso_mask) {
686 struct xen_netif_extra_info *gso = 707 struct xen_netif_extra_info *gso =
687 (struct xen_netif_extra_info *) 708 (struct xen_netif_extra_info *)
688 RING_GET_RESPONSE(&vif->rx, 709 RING_GET_RESPONSE(&queue->rx,
689 vif->rx.rsp_prod_pvt++); 710 queue->rx.rsp_prod_pvt++);
690 711
691 resp->flags |= XEN_NETRXF_extra_info; 712 resp->flags |= XEN_NETRXF_extra_info;
692 713
693 gso->u.gso.type = vif->meta[npo.meta_cons].gso_type; 714 gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
694 gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; 715 gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
695 gso->u.gso.pad = 0; 716 gso->u.gso.pad = 0;
696 gso->u.gso.features = 0; 717 gso->u.gso.features = 0;
697 718
@@ -699,11 +720,11 @@ static void xenvif_rx_action(struct xenvif *vif)
699 gso->flags = 0; 720 gso->flags = 0;
700 } 721 }
701 722
702 xenvif_add_frag_responses(vif, status, 723 xenvif_add_frag_responses(queue, status,
703 vif->meta + npo.meta_cons + 1, 724 queue->meta + npo.meta_cons + 1,
704 XENVIF_RX_CB(skb)->meta_slots_used); 725 XENVIF_RX_CB(skb)->meta_slots_used);
705 726
706 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); 727 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
707 728
708 need_to_notify |= !!ret; 729 need_to_notify |= !!ret;
709 730
@@ -713,20 +734,20 @@ static void xenvif_rx_action(struct xenvif *vif)
713 734
714done: 735done:
715 if (need_to_notify) 736 if (need_to_notify)
716 notify_remote_via_irq(vif->rx_irq); 737 notify_remote_via_irq(queue->rx_irq);
717} 738}
718 739
719void xenvif_napi_schedule_or_enable_events(struct xenvif *vif) 740void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
720{ 741{
721 int more_to_do; 742 int more_to_do;
722 743
723 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); 744 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
724 745
725 if (more_to_do) 746 if (more_to_do)
726 napi_schedule(&vif->napi); 747 napi_schedule(&queue->napi);
727} 748}
728 749
729static void tx_add_credit(struct xenvif *vif) 750static void tx_add_credit(struct xenvif_queue *queue)
730{ 751{
731 unsigned long max_burst, max_credit; 752 unsigned long max_burst, max_credit;
732 753
@@ -734,55 +755,57 @@ static void tx_add_credit(struct xenvif *vif)
734 * Allow a burst big enough to transmit a jumbo packet of up to 128kB. 755 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
735 * Otherwise the interface can seize up due to insufficient credit. 756 * Otherwise the interface can seize up due to insufficient credit.
736 */ 757 */
737 max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size; 758 max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
738 max_burst = min(max_burst, 131072UL); 759 max_burst = min(max_burst, 131072UL);
739 max_burst = max(max_burst, vif->credit_bytes); 760 max_burst = max(max_burst, queue->credit_bytes);
740 761
741 /* Take care that adding a new chunk of credit doesn't wrap to zero. */ 762 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
742 max_credit = vif->remaining_credit + vif->credit_bytes; 763 max_credit = queue->remaining_credit + queue->credit_bytes;
743 if (max_credit < vif->remaining_credit) 764 if (max_credit < queue->remaining_credit)
744 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ 765 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
745 766
746 vif->remaining_credit = min(max_credit, max_burst); 767 queue->remaining_credit = min(max_credit, max_burst);
747} 768}
748 769
749static void tx_credit_callback(unsigned long data) 770static void tx_credit_callback(unsigned long data)
750{ 771{
751 struct xenvif *vif = (struct xenvif *)data; 772 struct xenvif_queue *queue = (struct xenvif_queue *)data;
752 tx_add_credit(vif); 773 tx_add_credit(queue);
753 xenvif_napi_schedule_or_enable_events(vif); 774 xenvif_napi_schedule_or_enable_events(queue);
754} 775}
755 776
756static void xenvif_tx_err(struct xenvif *vif, 777static void xenvif_tx_err(struct xenvif_queue *queue,
757 struct xen_netif_tx_request *txp, RING_IDX end) 778 struct xen_netif_tx_request *txp, RING_IDX end)
758{ 779{
759 RING_IDX cons = vif->tx.req_cons; 780 RING_IDX cons = queue->tx.req_cons;
760 unsigned long flags; 781 unsigned long flags;
761 782
762 do { 783 do {
763 spin_lock_irqsave(&vif->response_lock, flags); 784 spin_lock_irqsave(&queue->response_lock, flags);
764 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); 785 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
765 spin_unlock_irqrestore(&vif->response_lock, flags); 786 spin_unlock_irqrestore(&queue->response_lock, flags);
766 if (cons == end) 787 if (cons == end)
767 break; 788 break;
768 txp = RING_GET_REQUEST(&vif->tx, cons++); 789 txp = RING_GET_REQUEST(&queue->tx, cons++);
769 } while (1); 790 } while (1);
770 vif->tx.req_cons = cons; 791 queue->tx.req_cons = cons;
771} 792}
772 793
773static void xenvif_fatal_tx_err(struct xenvif *vif) 794static void xenvif_fatal_tx_err(struct xenvif *vif)
774{ 795{
775 netdev_err(vif->dev, "fatal error; disabling device\n"); 796 netdev_err(vif->dev, "fatal error; disabling device\n");
776 vif->disabled = true; 797 vif->disabled = true;
777 xenvif_kick_thread(vif); 798 /* Disable the vif from queue 0's kthread */
799 if (vif->queues)
800 xenvif_kick_thread(&vif->queues[0]);
778} 801}
779 802
780static int xenvif_count_requests(struct xenvif *vif, 803static int xenvif_count_requests(struct xenvif_queue *queue,
781 struct xen_netif_tx_request *first, 804 struct xen_netif_tx_request *first,
782 struct xen_netif_tx_request *txp, 805 struct xen_netif_tx_request *txp,
783 int work_to_do) 806 int work_to_do)
784{ 807{
785 RING_IDX cons = vif->tx.req_cons; 808 RING_IDX cons = queue->tx.req_cons;
786 int slots = 0; 809 int slots = 0;
787 int drop_err = 0; 810 int drop_err = 0;
788 int more_data; 811 int more_data;
@@ -794,10 +817,10 @@ static int xenvif_count_requests(struct xenvif *vif,
794 struct xen_netif_tx_request dropped_tx = { 0 }; 817 struct xen_netif_tx_request dropped_tx = { 0 };
795 818
796 if (slots >= work_to_do) { 819 if (slots >= work_to_do) {
797 netdev_err(vif->dev, 820 netdev_err(queue->vif->dev,
798 "Asked for %d slots but exceeds this limit\n", 821 "Asked for %d slots but exceeds this limit\n",
799 work_to_do); 822 work_to_do);
800 xenvif_fatal_tx_err(vif); 823 xenvif_fatal_tx_err(queue->vif);
801 return -ENODATA; 824 return -ENODATA;
802 } 825 }
803 826
@@ -805,10 +828,10 @@ static int xenvif_count_requests(struct xenvif *vif,
805 * considered malicious. 828 * considered malicious.
806 */ 829 */
807 if (unlikely(slots >= fatal_skb_slots)) { 830 if (unlikely(slots >= fatal_skb_slots)) {
808 netdev_err(vif->dev, 831 netdev_err(queue->vif->dev,
809 "Malicious frontend using %d slots, threshold %u\n", 832 "Malicious frontend using %d slots, threshold %u\n",
810 slots, fatal_skb_slots); 833 slots, fatal_skb_slots);
811 xenvif_fatal_tx_err(vif); 834 xenvif_fatal_tx_err(queue->vif);
812 return -E2BIG; 835 return -E2BIG;
813 } 836 }
814 837
@@ -821,7 +844,7 @@ static int xenvif_count_requests(struct xenvif *vif,
821 */ 844 */
822 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { 845 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
823 if (net_ratelimit()) 846 if (net_ratelimit())
824 netdev_dbg(vif->dev, 847 netdev_dbg(queue->vif->dev,
825 "Too many slots (%d) exceeding limit (%d), dropping packet\n", 848 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
826 slots, XEN_NETBK_LEGACY_SLOTS_MAX); 849 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
827 drop_err = -E2BIG; 850 drop_err = -E2BIG;
@@ -830,7 +853,7 @@ static int xenvif_count_requests(struct xenvif *vif,
830 if (drop_err) 853 if (drop_err)
831 txp = &dropped_tx; 854 txp = &dropped_tx;
832 855
833 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots), 856 memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
834 sizeof(*txp)); 857 sizeof(*txp));
835 858
836 /* If the guest submitted a frame >= 64 KiB then 859 /* If the guest submitted a frame >= 64 KiB then
@@ -844,7 +867,7 @@ static int xenvif_count_requests(struct xenvif *vif,
844 */ 867 */
845 if (!drop_err && txp->size > first->size) { 868 if (!drop_err && txp->size > first->size) {
846 if (net_ratelimit()) 869 if (net_ratelimit())
847 netdev_dbg(vif->dev, 870 netdev_dbg(queue->vif->dev,
848 "Invalid tx request, slot size %u > remaining size %u\n", 871 "Invalid tx request, slot size %u > remaining size %u\n",
849 txp->size, first->size); 872 txp->size, first->size);
850 drop_err = -EIO; 873 drop_err = -EIO;
@@ -854,9 +877,9 @@ static int xenvif_count_requests(struct xenvif *vif,
854 slots++; 877 slots++;
855 878
856 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { 879 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
857 netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", 880 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
858 txp->offset, txp->size); 881 txp->offset, txp->size);
859 xenvif_fatal_tx_err(vif); 882 xenvif_fatal_tx_err(queue->vif);
860 return -EINVAL; 883 return -EINVAL;
861 } 884 }
862 885
@@ -868,7 +891,7 @@ static int xenvif_count_requests(struct xenvif *vif,
868 } while (more_data); 891 } while (more_data);
869 892
870 if (drop_err) { 893 if (drop_err) {
871 xenvif_tx_err(vif, first, cons + slots); 894 xenvif_tx_err(queue, first, cons + slots);
872 return drop_err; 895 return drop_err;
873 } 896 }
874 897
@@ -882,17 +905,17 @@ struct xenvif_tx_cb {
882 905
883#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) 906#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
884 907
885static inline void xenvif_tx_create_map_op(struct xenvif *vif, 908static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
886 u16 pending_idx, 909 u16 pending_idx,
887 struct xen_netif_tx_request *txp, 910 struct xen_netif_tx_request *txp,
888 struct gnttab_map_grant_ref *mop) 911 struct gnttab_map_grant_ref *mop)
889{ 912{
890 vif->pages_to_map[mop-vif->tx_map_ops] = vif->mmap_pages[pending_idx]; 913 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
891 gnttab_set_map_op(mop, idx_to_kaddr(vif, pending_idx), 914 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
892 GNTMAP_host_map | GNTMAP_readonly, 915 GNTMAP_host_map | GNTMAP_readonly,
893 txp->gref, vif->domid); 916 txp->gref, queue->vif->domid);
894 917
895 memcpy(&vif->pending_tx_info[pending_idx].req, txp, 918 memcpy(&queue->pending_tx_info[pending_idx].req, txp,
896 sizeof(*txp)); 919 sizeof(*txp));
897} 920}
898 921
@@ -913,7 +936,7 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
913 return skb; 936 return skb;
914} 937}
915 938
916static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif, 939static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
917 struct sk_buff *skb, 940 struct sk_buff *skb,
918 struct xen_netif_tx_request *txp, 941 struct xen_netif_tx_request *txp,
919 struct gnttab_map_grant_ref *gop) 942 struct gnttab_map_grant_ref *gop)
@@ -940,9 +963,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
940 963
941 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; 964 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
942 shinfo->nr_frags++, txp++, gop++) { 965 shinfo->nr_frags++, txp++, gop++) {
943 index = pending_index(vif->pending_cons++); 966 index = pending_index(queue->pending_cons++);
944 pending_idx = vif->pending_ring[index]; 967 pending_idx = queue->pending_ring[index];
945 xenvif_tx_create_map_op(vif, pending_idx, txp, gop); 968 xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
946 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); 969 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
947 } 970 }
948 971
@@ -950,7 +973,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
950 struct sk_buff *nskb = xenvif_alloc_skb(0); 973 struct sk_buff *nskb = xenvif_alloc_skb(0);
951 if (unlikely(nskb == NULL)) { 974 if (unlikely(nskb == NULL)) {
952 if (net_ratelimit()) 975 if (net_ratelimit())
953 netdev_err(vif->dev, 976 netdev_err(queue->vif->dev,
954 "Can't allocate the frag_list skb.\n"); 977 "Can't allocate the frag_list skb.\n");
955 return NULL; 978 return NULL;
956 } 979 }
@@ -960,9 +983,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
960 983
961 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; 984 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
962 shinfo->nr_frags++, txp++, gop++) { 985 shinfo->nr_frags++, txp++, gop++) {
963 index = pending_index(vif->pending_cons++); 986 index = pending_index(queue->pending_cons++);
964 pending_idx = vif->pending_ring[index]; 987 pending_idx = queue->pending_ring[index];
965 xenvif_tx_create_map_op(vif, pending_idx, txp, gop); 988 xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
966 frag_set_pending_idx(&frags[shinfo->nr_frags], 989 frag_set_pending_idx(&frags[shinfo->nr_frags],
967 pending_idx); 990 pending_idx);
968 } 991 }
@@ -973,34 +996,34 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
973 return gop; 996 return gop;
974} 997}
975 998
976static inline void xenvif_grant_handle_set(struct xenvif *vif, 999static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
977 u16 pending_idx, 1000 u16 pending_idx,
978 grant_handle_t handle) 1001 grant_handle_t handle)
979{ 1002{
980 if (unlikely(vif->grant_tx_handle[pending_idx] != 1003 if (unlikely(queue->grant_tx_handle[pending_idx] !=
981 NETBACK_INVALID_HANDLE)) { 1004 NETBACK_INVALID_HANDLE)) {
982 netdev_err(vif->dev, 1005 netdev_err(queue->vif->dev,
983 "Trying to overwrite active handle! pending_idx: %x\n", 1006 "Trying to overwrite active handle! pending_idx: %x\n",
984 pending_idx); 1007 pending_idx);
985 BUG(); 1008 BUG();
986 } 1009 }
987 vif->grant_tx_handle[pending_idx] = handle; 1010 queue->grant_tx_handle[pending_idx] = handle;
988} 1011}
989 1012
990static inline void xenvif_grant_handle_reset(struct xenvif *vif, 1013static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
991 u16 pending_idx) 1014 u16 pending_idx)
992{ 1015{
993 if (unlikely(vif->grant_tx_handle[pending_idx] == 1016 if (unlikely(queue->grant_tx_handle[pending_idx] ==
994 NETBACK_INVALID_HANDLE)) { 1017 NETBACK_INVALID_HANDLE)) {
995 netdev_err(vif->dev, 1018 netdev_err(queue->vif->dev,
996 "Trying to unmap invalid handle! pending_idx: %x\n", 1019 "Trying to unmap invalid handle! pending_idx: %x\n",
997 pending_idx); 1020 pending_idx);
998 BUG(); 1021 BUG();
999 } 1022 }
1000 vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; 1023 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
1001} 1024}
1002 1025
1003static int xenvif_tx_check_gop(struct xenvif *vif, 1026static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1004 struct sk_buff *skb, 1027 struct sk_buff *skb,
1005 struct gnttab_map_grant_ref **gopp_map, 1028 struct gnttab_map_grant_ref **gopp_map,
1006 struct gnttab_copy **gopp_copy) 1029 struct gnttab_copy **gopp_copy)
@@ -1017,12 +1040,12 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
1017 (*gopp_copy)++; 1040 (*gopp_copy)++;
1018 if (unlikely(err)) { 1041 if (unlikely(err)) {
1019 if (net_ratelimit()) 1042 if (net_ratelimit())
1020 netdev_dbg(vif->dev, 1043 netdev_dbg(queue->vif->dev,
1021 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", 1044 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
1022 (*gopp_copy)->status, 1045 (*gopp_copy)->status,
1023 pending_idx, 1046 pending_idx,
1024 (*gopp_copy)->source.u.ref); 1047 (*gopp_copy)->source.u.ref);
1025 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); 1048 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1026 } 1049 }
1027 1050
1028check_frags: 1051check_frags:
@@ -1035,24 +1058,24 @@ check_frags:
1035 newerr = gop_map->status; 1058 newerr = gop_map->status;
1036 1059
1037 if (likely(!newerr)) { 1060 if (likely(!newerr)) {
1038 xenvif_grant_handle_set(vif, 1061 xenvif_grant_handle_set(queue,
1039 pending_idx, 1062 pending_idx,
1040 gop_map->handle); 1063 gop_map->handle);
1041 /* Had a previous error? Invalidate this fragment. */ 1064 /* Had a previous error? Invalidate this fragment. */
1042 if (unlikely(err)) 1065 if (unlikely(err))
1043 xenvif_idx_unmap(vif, pending_idx); 1066 xenvif_idx_unmap(queue, pending_idx);
1044 continue; 1067 continue;
1045 } 1068 }
1046 1069
1047 /* Error on this fragment: respond to client with an error. */ 1070 /* Error on this fragment: respond to client with an error. */
1048 if (net_ratelimit()) 1071 if (net_ratelimit())
1049 netdev_dbg(vif->dev, 1072 netdev_dbg(queue->vif->dev,
1050 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n", 1073 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
1051 i, 1074 i,
1052 gop_map->status, 1075 gop_map->status,
1053 pending_idx, 1076 pending_idx,
1054 gop_map->ref); 1077 gop_map->ref);
1055 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); 1078 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1056 1079
1057 /* Not the first error? Preceding frags already invalidated. */ 1080 /* Not the first error? Preceding frags already invalidated. */
1058 if (err) 1081 if (err)
@@ -1060,7 +1083,7 @@ check_frags:
1060 /* First error: invalidate preceding fragments. */ 1083 /* First error: invalidate preceding fragments. */
1061 for (j = 0; j < i; j++) { 1084 for (j = 0; j < i; j++) {
1062 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1085 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1063 xenvif_idx_unmap(vif, pending_idx); 1086 xenvif_idx_unmap(queue, pending_idx);
1064 } 1087 }
1065 1088
1066 /* Remember the error: invalidate all subsequent fragments. */ 1089 /* Remember the error: invalidate all subsequent fragments. */
@@ -1084,7 +1107,7 @@ check_frags:
1084 shinfo = skb_shinfo(first_skb); 1107 shinfo = skb_shinfo(first_skb);
1085 for (j = 0; j < shinfo->nr_frags; j++) { 1108 for (j = 0; j < shinfo->nr_frags; j++) {
1086 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1109 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1087 xenvif_idx_unmap(vif, pending_idx); 1110 xenvif_idx_unmap(queue, pending_idx);
1088 } 1111 }
1089 } 1112 }
1090 1113
@@ -1092,7 +1115,7 @@ check_frags:
1092 return err; 1115 return err;
1093} 1116}
1094 1117
1095static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb) 1118static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1096{ 1119{
1097 struct skb_shared_info *shinfo = skb_shinfo(skb); 1120 struct skb_shared_info *shinfo = skb_shinfo(skb);
1098 int nr_frags = shinfo->nr_frags; 1121 int nr_frags = shinfo->nr_frags;
@@ -1110,23 +1133,23 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
1110 /* If this is not the first frag, chain it to the previous*/ 1133 /* If this is not the first frag, chain it to the previous*/
1111 if (prev_pending_idx == INVALID_PENDING_IDX) 1134 if (prev_pending_idx == INVALID_PENDING_IDX)
1112 skb_shinfo(skb)->destructor_arg = 1135 skb_shinfo(skb)->destructor_arg =
1113 &callback_param(vif, pending_idx); 1136 &callback_param(queue, pending_idx);
1114 else 1137 else
1115 callback_param(vif, prev_pending_idx).ctx = 1138 callback_param(queue, prev_pending_idx).ctx =
1116 &callback_param(vif, pending_idx); 1139 &callback_param(queue, pending_idx);
1117 1140
1118 callback_param(vif, pending_idx).ctx = NULL; 1141 callback_param(queue, pending_idx).ctx = NULL;
1119 prev_pending_idx = pending_idx; 1142 prev_pending_idx = pending_idx;
1120 1143
1121 txp = &vif->pending_tx_info[pending_idx].req; 1144 txp = &queue->pending_tx_info[pending_idx].req;
1122 page = virt_to_page(idx_to_kaddr(vif, pending_idx)); 1145 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
1123 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); 1146 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1124 skb->len += txp->size; 1147 skb->len += txp->size;
1125 skb->data_len += txp->size; 1148 skb->data_len += txp->size;
1126 skb->truesize += txp->size; 1149 skb->truesize += txp->size;
1127 1150
1128 /* Take an extra reference to offset network stack's put_page */ 1151 /* Take an extra reference to offset network stack's put_page */
1129 get_page(vif->mmap_pages[pending_idx]); 1152 get_page(queue->mmap_pages[pending_idx]);
1130 } 1153 }
1131 /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc 1154 /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
1132 * overlaps with "index", and "mapping" is not set. I think mapping 1155 * overlaps with "index", and "mapping" is not set. I think mapping
@@ -1136,33 +1159,33 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
1136 skb->pfmemalloc = false; 1159 skb->pfmemalloc = false;
1137} 1160}
1138 1161
1139static int xenvif_get_extras(struct xenvif *vif, 1162static int xenvif_get_extras(struct xenvif_queue *queue,
1140 struct xen_netif_extra_info *extras, 1163 struct xen_netif_extra_info *extras,
1141 int work_to_do) 1164 int work_to_do)
1142{ 1165{
1143 struct xen_netif_extra_info extra; 1166 struct xen_netif_extra_info extra;
1144 RING_IDX cons = vif->tx.req_cons; 1167 RING_IDX cons = queue->tx.req_cons;
1145 1168
1146 do { 1169 do {
1147 if (unlikely(work_to_do-- <= 0)) { 1170 if (unlikely(work_to_do-- <= 0)) {
1148 netdev_err(vif->dev, "Missing extra info\n"); 1171 netdev_err(queue->vif->dev, "Missing extra info\n");
1149 xenvif_fatal_tx_err(vif); 1172 xenvif_fatal_tx_err(queue->vif);
1150 return -EBADR; 1173 return -EBADR;
1151 } 1174 }
1152 1175
1153 memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons), 1176 memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
1154 sizeof(extra)); 1177 sizeof(extra));
1155 if (unlikely(!extra.type || 1178 if (unlikely(!extra.type ||
1156 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1179 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1157 vif->tx.req_cons = ++cons; 1180 queue->tx.req_cons = ++cons;
1158 netdev_err(vif->dev, 1181 netdev_err(queue->vif->dev,
1159 "Invalid extra type: %d\n", extra.type); 1182 "Invalid extra type: %d\n", extra.type);
1160 xenvif_fatal_tx_err(vif); 1183 xenvif_fatal_tx_err(queue->vif);
1161 return -EINVAL; 1184 return -EINVAL;
1162 } 1185 }
1163 1186
1164 memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); 1187 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1165 vif->tx.req_cons = ++cons; 1188 queue->tx.req_cons = ++cons;
1166 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); 1189 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1167 1190
1168 return work_to_do; 1191 return work_to_do;
@@ -1197,7 +1220,7 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
1197 return 0; 1220 return 0;
1198} 1221}
1199 1222
1200static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) 1223static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
1201{ 1224{
1202 bool recalculate_partial_csum = false; 1225 bool recalculate_partial_csum = false;
1203 1226
@@ -1207,7 +1230,7 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1207 * recalculate the partial checksum. 1230 * recalculate the partial checksum.
1208 */ 1231 */
1209 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 1232 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1210 vif->rx_gso_checksum_fixup++; 1233 queue->stats.rx_gso_checksum_fixup++;
1211 skb->ip_summed = CHECKSUM_PARTIAL; 1234 skb->ip_summed = CHECKSUM_PARTIAL;
1212 recalculate_partial_csum = true; 1235 recalculate_partial_csum = true;
1213 } 1236 }
@@ -1219,31 +1242,31 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1219 return skb_checksum_setup(skb, recalculate_partial_csum); 1242 return skb_checksum_setup(skb, recalculate_partial_csum);
1220} 1243}
1221 1244
1222static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) 1245static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1223{ 1246{
1224 u64 now = get_jiffies_64(); 1247 u64 now = get_jiffies_64();
1225 u64 next_credit = vif->credit_window_start + 1248 u64 next_credit = queue->credit_window_start +
1226 msecs_to_jiffies(vif->credit_usec / 1000); 1249 msecs_to_jiffies(queue->credit_usec / 1000);
1227 1250
1228 /* Timer could already be pending in rare cases. */ 1251 /* Timer could already be pending in rare cases. */
1229 if (timer_pending(&vif->credit_timeout)) 1252 if (timer_pending(&queue->credit_timeout))
1230 return true; 1253 return true;
1231 1254
1232 /* Passed the point where we can replenish credit? */ 1255 /* Passed the point where we can replenish credit? */
1233 if (time_after_eq64(now, next_credit)) { 1256 if (time_after_eq64(now, next_credit)) {
1234 vif->credit_window_start = now; 1257 queue->credit_window_start = now;
1235 tx_add_credit(vif); 1258 tx_add_credit(queue);
1236 } 1259 }
1237 1260
1238 /* Still too big to send right now? Set a callback. */ 1261 /* Still too big to send right now? Set a callback. */
1239 if (size > vif->remaining_credit) { 1262 if (size > queue->remaining_credit) {
1240 vif->credit_timeout.data = 1263 queue->credit_timeout.data =
1241 (unsigned long)vif; 1264 (unsigned long)queue;
1242 vif->credit_timeout.function = 1265 queue->credit_timeout.function =
1243 tx_credit_callback; 1266 tx_credit_callback;
1244 mod_timer(&vif->credit_timeout, 1267 mod_timer(&queue->credit_timeout,
1245 next_credit); 1268 next_credit);
1246 vif->credit_window_start = next_credit; 1269 queue->credit_window_start = next_credit;
1247 1270
1248 return true; 1271 return true;
1249 } 1272 }
@@ -1251,16 +1274,16 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1251 return false; 1274 return false;
1252} 1275}
1253 1276
1254static void xenvif_tx_build_gops(struct xenvif *vif, 1277static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1255 int budget, 1278 int budget,
1256 unsigned *copy_ops, 1279 unsigned *copy_ops,
1257 unsigned *map_ops) 1280 unsigned *map_ops)
1258{ 1281{
1259 struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop; 1282 struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
1260 struct sk_buff *skb; 1283 struct sk_buff *skb;
1261 int ret; 1284 int ret;
1262 1285
1263 while (skb_queue_len(&vif->tx_queue) < budget) { 1286 while (skb_queue_len(&queue->tx_queue) < budget) {
1264 struct xen_netif_tx_request txreq; 1287 struct xen_netif_tx_request txreq;
1265 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1288 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1266 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; 1289 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
@@ -1270,69 +1293,69 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1270 unsigned int data_len; 1293 unsigned int data_len;
1271 pending_ring_idx_t index; 1294 pending_ring_idx_t index;
1272 1295
1273 if (vif->tx.sring->req_prod - vif->tx.req_cons > 1296 if (queue->tx.sring->req_prod - queue->tx.req_cons >
1274 XEN_NETIF_TX_RING_SIZE) { 1297 XEN_NETIF_TX_RING_SIZE) {
1275 netdev_err(vif->dev, 1298 netdev_err(queue->vif->dev,
1276 "Impossible number of requests. " 1299 "Impossible number of requests. "
1277 "req_prod %d, req_cons %d, size %ld\n", 1300 "req_prod %d, req_cons %d, size %ld\n",
1278 vif->tx.sring->req_prod, vif->tx.req_cons, 1301 queue->tx.sring->req_prod, queue->tx.req_cons,
1279 XEN_NETIF_TX_RING_SIZE); 1302 XEN_NETIF_TX_RING_SIZE);
1280 xenvif_fatal_tx_err(vif); 1303 xenvif_fatal_tx_err(queue->vif);
1281 break; 1304 break;
1282 } 1305 }
1283 1306
1284 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx); 1307 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
1285 if (!work_to_do) 1308 if (!work_to_do)
1286 break; 1309 break;
1287 1310
1288 idx = vif->tx.req_cons; 1311 idx = queue->tx.req_cons;
1289 rmb(); /* Ensure that we see the request before we copy it. */ 1312 rmb(); /* Ensure that we see the request before we copy it. */
1290 memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq)); 1313 memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
1291 1314
1292 /* Credit-based scheduling. */ 1315 /* Credit-based scheduling. */
1293 if (txreq.size > vif->remaining_credit && 1316 if (txreq.size > queue->remaining_credit &&
1294 tx_credit_exceeded(vif, txreq.size)) 1317 tx_credit_exceeded(queue, txreq.size))
1295 break; 1318 break;
1296 1319
1297 vif->remaining_credit -= txreq.size; 1320 queue->remaining_credit -= txreq.size;
1298 1321
1299 work_to_do--; 1322 work_to_do--;
1300 vif->tx.req_cons = ++idx; 1323 queue->tx.req_cons = ++idx;
1301 1324
1302 memset(extras, 0, sizeof(extras)); 1325 memset(extras, 0, sizeof(extras));
1303 if (txreq.flags & XEN_NETTXF_extra_info) { 1326 if (txreq.flags & XEN_NETTXF_extra_info) {
1304 work_to_do = xenvif_get_extras(vif, extras, 1327 work_to_do = xenvif_get_extras(queue, extras,
1305 work_to_do); 1328 work_to_do);
1306 idx = vif->tx.req_cons; 1329 idx = queue->tx.req_cons;
1307 if (unlikely(work_to_do < 0)) 1330 if (unlikely(work_to_do < 0))
1308 break; 1331 break;
1309 } 1332 }
1310 1333
1311 ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do); 1334 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
1312 if (unlikely(ret < 0)) 1335 if (unlikely(ret < 0))
1313 break; 1336 break;
1314 1337
1315 idx += ret; 1338 idx += ret;
1316 1339
1317 if (unlikely(txreq.size < ETH_HLEN)) { 1340 if (unlikely(txreq.size < ETH_HLEN)) {
1318 netdev_dbg(vif->dev, 1341 netdev_dbg(queue->vif->dev,
1319 "Bad packet size: %d\n", txreq.size); 1342 "Bad packet size: %d\n", txreq.size);
1320 xenvif_tx_err(vif, &txreq, idx); 1343 xenvif_tx_err(queue, &txreq, idx);
1321 break; 1344 break;
1322 } 1345 }
1323 1346
1324 /* No crossing a page as the payload mustn't fragment. */ 1347 /* No crossing a page as the payload mustn't fragment. */
1325 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { 1348 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1326 netdev_err(vif->dev, 1349 netdev_err(queue->vif->dev,
1327 "txreq.offset: %x, size: %u, end: %lu\n", 1350 "txreq.offset: %x, size: %u, end: %lu\n",
1328 txreq.offset, txreq.size, 1351 txreq.offset, txreq.size,
1329 (txreq.offset&~PAGE_MASK) + txreq.size); 1352 (txreq.offset&~PAGE_MASK) + txreq.size);
1330 xenvif_fatal_tx_err(vif); 1353 xenvif_fatal_tx_err(queue->vif);
1331 break; 1354 break;
1332 } 1355 }
1333 1356
1334 index = pending_index(vif->pending_cons); 1357 index = pending_index(queue->pending_cons);
1335 pending_idx = vif->pending_ring[index]; 1358 pending_idx = queue->pending_ring[index];
1336 1359
1337 data_len = (txreq.size > PKT_PROT_LEN && 1360 data_len = (txreq.size > PKT_PROT_LEN &&
1338 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? 1361 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
@@ -1340,9 +1363,9 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1340 1363
1341 skb = xenvif_alloc_skb(data_len); 1364 skb = xenvif_alloc_skb(data_len);
1342 if (unlikely(skb == NULL)) { 1365 if (unlikely(skb == NULL)) {
1343 netdev_dbg(vif->dev, 1366 netdev_dbg(queue->vif->dev,
1344 "Can't allocate a skb in start_xmit.\n"); 1367 "Can't allocate a skb in start_xmit.\n");
1345 xenvif_tx_err(vif, &txreq, idx); 1368 xenvif_tx_err(queue, &txreq, idx);
1346 break; 1369 break;
1347 } 1370 }
1348 1371
@@ -1350,7 +1373,7 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1350 struct xen_netif_extra_info *gso; 1373 struct xen_netif_extra_info *gso;
1351 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1374 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1352 1375
1353 if (xenvif_set_skb_gso(vif, skb, gso)) { 1376 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1354 /* Failure in xenvif_set_skb_gso is fatal. */ 1377 /* Failure in xenvif_set_skb_gso is fatal. */
1355 kfree_skb(skb); 1378 kfree_skb(skb);
1356 break; 1379 break;
@@ -1360,18 +1383,18 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1360 XENVIF_TX_CB(skb)->pending_idx = pending_idx; 1383 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
1361 1384
1362 __skb_put(skb, data_len); 1385 __skb_put(skb, data_len);
1363 vif->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; 1386 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
1364 vif->tx_copy_ops[*copy_ops].source.domid = vif->domid; 1387 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
1365 vif->tx_copy_ops[*copy_ops].source.offset = txreq.offset; 1388 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
1366 1389
1367 vif->tx_copy_ops[*copy_ops].dest.u.gmfn = 1390 queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
1368 virt_to_mfn(skb->data); 1391 virt_to_mfn(skb->data);
1369 vif->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; 1392 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1370 vif->tx_copy_ops[*copy_ops].dest.offset = 1393 queue->tx_copy_ops[*copy_ops].dest.offset =
1371 offset_in_page(skb->data); 1394 offset_in_page(skb->data);
1372 1395
1373 vif->tx_copy_ops[*copy_ops].len = data_len; 1396 queue->tx_copy_ops[*copy_ops].len = data_len;
1374 vif->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; 1397 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
1375 1398
1376 (*copy_ops)++; 1399 (*copy_ops)++;
1377 1400
@@ -1380,42 +1403,42 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1380 skb_shinfo(skb)->nr_frags++; 1403 skb_shinfo(skb)->nr_frags++;
1381 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1404 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1382 pending_idx); 1405 pending_idx);
1383 xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop); 1406 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
1384 gop++; 1407 gop++;
1385 } else { 1408 } else {
1386 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1409 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1387 INVALID_PENDING_IDX); 1410 INVALID_PENDING_IDX);
1388 memcpy(&vif->pending_tx_info[pending_idx].req, &txreq, 1411 memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
1389 sizeof(txreq)); 1412 sizeof(txreq));
1390 } 1413 }
1391 1414
1392 vif->pending_cons++; 1415 queue->pending_cons++;
1393 1416
1394 request_gop = xenvif_get_requests(vif, skb, txfrags, gop); 1417 request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
1395 if (request_gop == NULL) { 1418 if (request_gop == NULL) {
1396 kfree_skb(skb); 1419 kfree_skb(skb);
1397 xenvif_tx_err(vif, &txreq, idx); 1420 xenvif_tx_err(queue, &txreq, idx);
1398 break; 1421 break;
1399 } 1422 }
1400 gop = request_gop; 1423 gop = request_gop;
1401 1424
1402 __skb_queue_tail(&vif->tx_queue, skb); 1425 __skb_queue_tail(&queue->tx_queue, skb);
1403 1426
1404 vif->tx.req_cons = idx; 1427 queue->tx.req_cons = idx;
1405 1428
1406 if (((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops)) || 1429 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1407 (*copy_ops >= ARRAY_SIZE(vif->tx_copy_ops))) 1430 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1408 break; 1431 break;
1409 } 1432 }
1410 1433
1411 (*map_ops) = gop - vif->tx_map_ops; 1434 (*map_ops) = gop - queue->tx_map_ops;
1412 return; 1435 return;
1413} 1436}
1414 1437
1415/* Consolidate skb with a frag_list into a brand new one with local pages on 1438/* Consolidate skb with a frag_list into a brand new one with local pages on
1416 * frags. Returns 0 or -ENOMEM if can't allocate new pages. 1439 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1417 */ 1440 */
1418static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb) 1441static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1419{ 1442{
1420 unsigned int offset = skb_headlen(skb); 1443 unsigned int offset = skb_headlen(skb);
1421 skb_frag_t frags[MAX_SKB_FRAGS]; 1444 skb_frag_t frags[MAX_SKB_FRAGS];
@@ -1423,10 +1446,10 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
1423 struct ubuf_info *uarg; 1446 struct ubuf_info *uarg;
1424 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; 1447 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1425 1448
1426 vif->tx_zerocopy_sent += 2; 1449 queue->stats.tx_zerocopy_sent += 2;
1427 vif->tx_frag_overflow++; 1450 queue->stats.tx_frag_overflow++;
1428 1451
1429 xenvif_fill_frags(vif, nskb); 1452 xenvif_fill_frags(queue, nskb);
1430 /* Subtract frags size, we will correct it later */ 1453 /* Subtract frags size, we will correct it later */
1431 skb->truesize -= skb->data_len; 1454 skb->truesize -= skb->data_len;
1432 skb->len += nskb->len; 1455 skb->len += nskb->len;
@@ -1478,37 +1501,37 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
1478 return 0; 1501 return 0;
1479} 1502}
1480 1503
1481static int xenvif_tx_submit(struct xenvif *vif) 1504static int xenvif_tx_submit(struct xenvif_queue *queue)
1482{ 1505{
1483 struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops; 1506 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1484 struct gnttab_copy *gop_copy = vif->tx_copy_ops; 1507 struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1485 struct sk_buff *skb; 1508 struct sk_buff *skb;
1486 int work_done = 0; 1509 int work_done = 0;
1487 1510
1488 while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) { 1511 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1489 struct xen_netif_tx_request *txp; 1512 struct xen_netif_tx_request *txp;
1490 u16 pending_idx; 1513 u16 pending_idx;
1491 unsigned data_len; 1514 unsigned data_len;
1492 1515
1493 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 1516 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1494 txp = &vif->pending_tx_info[pending_idx].req; 1517 txp = &queue->pending_tx_info[pending_idx].req;
1495 1518
1496 /* Check the remap error code. */ 1519 /* Check the remap error code. */
1497 if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) { 1520 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1498 skb_shinfo(skb)->nr_frags = 0; 1521 skb_shinfo(skb)->nr_frags = 0;
1499 kfree_skb(skb); 1522 kfree_skb(skb);
1500 continue; 1523 continue;
1501 } 1524 }
1502 1525
1503 data_len = skb->len; 1526 data_len = skb->len;
1504 callback_param(vif, pending_idx).ctx = NULL; 1527 callback_param(queue, pending_idx).ctx = NULL;
1505 if (data_len < txp->size) { 1528 if (data_len < txp->size) {
1506 /* Append the packet payload as a fragment. */ 1529 /* Append the packet payload as a fragment. */
1507 txp->offset += data_len; 1530 txp->offset += data_len;
1508 txp->size -= data_len; 1531 txp->size -= data_len;
1509 } else { 1532 } else {
1510 /* Schedule a response immediately. */ 1533 /* Schedule a response immediately. */
1511 xenvif_idx_release(vif, pending_idx, 1534 xenvif_idx_release(queue, pending_idx,
1512 XEN_NETIF_RSP_OKAY); 1535 XEN_NETIF_RSP_OKAY);
1513 } 1536 }
1514 1537
@@ -1517,12 +1540,12 @@ static int xenvif_tx_submit(struct xenvif *vif)
1517 else if (txp->flags & XEN_NETTXF_data_validated) 1540 else if (txp->flags & XEN_NETTXF_data_validated)
1518 skb->ip_summed = CHECKSUM_UNNECESSARY; 1541 skb->ip_summed = CHECKSUM_UNNECESSARY;
1519 1542
1520 xenvif_fill_frags(vif, skb); 1543 xenvif_fill_frags(queue, skb);
1521 1544
1522 if (unlikely(skb_has_frag_list(skb))) { 1545 if (unlikely(skb_has_frag_list(skb))) {
1523 if (xenvif_handle_frag_list(vif, skb)) { 1546 if (xenvif_handle_frag_list(queue, skb)) {
1524 if (net_ratelimit()) 1547 if (net_ratelimit())
1525 netdev_err(vif->dev, 1548 netdev_err(queue->vif->dev,
1526 "Not enough memory to consolidate frag_list!\n"); 1549 "Not enough memory to consolidate frag_list!\n");
1527 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1550 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1528 kfree_skb(skb); 1551 kfree_skb(skb);
@@ -1535,12 +1558,12 @@ static int xenvif_tx_submit(struct xenvif *vif)
1535 __pskb_pull_tail(skb, target - skb_headlen(skb)); 1558 __pskb_pull_tail(skb, target - skb_headlen(skb));
1536 } 1559 }
1537 1560
1538 skb->dev = vif->dev; 1561 skb->dev = queue->vif->dev;
1539 skb->protocol = eth_type_trans(skb, skb->dev); 1562 skb->protocol = eth_type_trans(skb, skb->dev);
1540 skb_reset_network_header(skb); 1563 skb_reset_network_header(skb);
1541 1564
1542 if (checksum_setup(vif, skb)) { 1565 if (checksum_setup(queue, skb)) {
1543 netdev_dbg(vif->dev, 1566 netdev_dbg(queue->vif->dev,
1544 "Can't setup checksum in net_tx_action\n"); 1567 "Can't setup checksum in net_tx_action\n");
1545 /* We have to set this flag to trigger the callback */ 1568 /* We have to set this flag to trigger the callback */
1546 if (skb_shinfo(skb)->destructor_arg) 1569 if (skb_shinfo(skb)->destructor_arg)
@@ -1565,8 +1588,8 @@ static int xenvif_tx_submit(struct xenvif *vif)
1565 DIV_ROUND_UP(skb->len - hdrlen, mss); 1588 DIV_ROUND_UP(skb->len - hdrlen, mss);
1566 } 1589 }
1567 1590
1568 vif->dev->stats.rx_bytes += skb->len; 1591 queue->stats.rx_bytes += skb->len;
1569 vif->dev->stats.rx_packets++; 1592 queue->stats.rx_packets++;
1570 1593
1571 work_done++; 1594 work_done++;
1572 1595
@@ -1577,7 +1600,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
1577 */ 1600 */
1578 if (skb_shinfo(skb)->destructor_arg) { 1601 if (skb_shinfo(skb)->destructor_arg) {
1579 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1602 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1580 vif->tx_zerocopy_sent++; 1603 queue->stats.tx_zerocopy_sent++;
1581 } 1604 }
1582 1605
1583 netif_receive_skb(skb); 1606 netif_receive_skb(skb);
@@ -1590,47 +1613,47 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1590{ 1613{
1591 unsigned long flags; 1614 unsigned long flags;
1592 pending_ring_idx_t index; 1615 pending_ring_idx_t index;
1593 struct xenvif *vif = ubuf_to_vif(ubuf); 1616 struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1594 1617
1595 /* This is the only place where we grab this lock, to protect callbacks 1618 /* This is the only place where we grab this lock, to protect callbacks
1596 * from each other. 1619 * from each other.
1597 */ 1620 */
1598 spin_lock_irqsave(&vif->callback_lock, flags); 1621 spin_lock_irqsave(&queue->callback_lock, flags);
1599 do { 1622 do {
1600 u16 pending_idx = ubuf->desc; 1623 u16 pending_idx = ubuf->desc;
1601 ubuf = (struct ubuf_info *) ubuf->ctx; 1624 ubuf = (struct ubuf_info *) ubuf->ctx;
1602 BUG_ON(vif->dealloc_prod - vif->dealloc_cons >= 1625 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1603 MAX_PENDING_REQS); 1626 MAX_PENDING_REQS);
1604 index = pending_index(vif->dealloc_prod); 1627 index = pending_index(queue->dealloc_prod);
1605 vif->dealloc_ring[index] = pending_idx; 1628 queue->dealloc_ring[index] = pending_idx;
1606 /* Sync with xenvif_tx_dealloc_action: 1629 /* Sync with xenvif_tx_dealloc_action:
1607 * insert idx then incr producer. 1630 * insert idx then incr producer.
1608 */ 1631 */
1609 smp_wmb(); 1632 smp_wmb();
1610 vif->dealloc_prod++; 1633 queue->dealloc_prod++;
1611 } while (ubuf); 1634 } while (ubuf);
1612 wake_up(&vif->dealloc_wq); 1635 wake_up(&queue->dealloc_wq);
1613 spin_unlock_irqrestore(&vif->callback_lock, flags); 1636 spin_unlock_irqrestore(&queue->callback_lock, flags);
1614 1637
1615 if (likely(zerocopy_success)) 1638 if (likely(zerocopy_success))
1616 vif->tx_zerocopy_success++; 1639 queue->stats.tx_zerocopy_success++;
1617 else 1640 else
1618 vif->tx_zerocopy_fail++; 1641 queue->stats.tx_zerocopy_fail++;
1619} 1642}
1620 1643
1621static inline void xenvif_tx_dealloc_action(struct xenvif *vif) 1644static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1622{ 1645{
1623 struct gnttab_unmap_grant_ref *gop; 1646 struct gnttab_unmap_grant_ref *gop;
1624 pending_ring_idx_t dc, dp; 1647 pending_ring_idx_t dc, dp;
1625 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS]; 1648 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1626 unsigned int i = 0; 1649 unsigned int i = 0;
1627 1650
1628 dc = vif->dealloc_cons; 1651 dc = queue->dealloc_cons;
1629 gop = vif->tx_unmap_ops; 1652 gop = queue->tx_unmap_ops;
1630 1653
1631 /* Free up any grants we have finished using */ 1654 /* Free up any grants we have finished using */
1632 do { 1655 do {
1633 dp = vif->dealloc_prod; 1656 dp = queue->dealloc_prod;
1634 1657
1635 /* Ensure we see all indices enqueued by all 1658 /* Ensure we see all indices enqueued by all
1636 * xenvif_zerocopy_callback(). 1659 * xenvif_zerocopy_callback().
@@ -1638,38 +1661,38 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
1638 smp_rmb(); 1661 smp_rmb();
1639 1662
1640 while (dc != dp) { 1663 while (dc != dp) {
1641 BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS); 1664 BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
1642 pending_idx = 1665 pending_idx =
1643 vif->dealloc_ring[pending_index(dc++)]; 1666 queue->dealloc_ring[pending_index(dc++)];
1644 1667
1645 pending_idx_release[gop-vif->tx_unmap_ops] = 1668 pending_idx_release[gop-queue->tx_unmap_ops] =
1646 pending_idx; 1669 pending_idx;
1647 vif->pages_to_unmap[gop-vif->tx_unmap_ops] = 1670 queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
1648 vif->mmap_pages[pending_idx]; 1671 queue->mmap_pages[pending_idx];
1649 gnttab_set_unmap_op(gop, 1672 gnttab_set_unmap_op(gop,
1650 idx_to_kaddr(vif, pending_idx), 1673 idx_to_kaddr(queue, pending_idx),
1651 GNTMAP_host_map, 1674 GNTMAP_host_map,
1652 vif->grant_tx_handle[pending_idx]); 1675 queue->grant_tx_handle[pending_idx]);
1653 xenvif_grant_handle_reset(vif, pending_idx); 1676 xenvif_grant_handle_reset(queue, pending_idx);
1654 ++gop; 1677 ++gop;
1655 } 1678 }
1656 1679
1657 } while (dp != vif->dealloc_prod); 1680 } while (dp != queue->dealloc_prod);
1658 1681
1659 vif->dealloc_cons = dc; 1682 queue->dealloc_cons = dc;
1660 1683
1661 if (gop - vif->tx_unmap_ops > 0) { 1684 if (gop - queue->tx_unmap_ops > 0) {
1662 int ret; 1685 int ret;
1663 ret = gnttab_unmap_refs(vif->tx_unmap_ops, 1686 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1664 NULL, 1687 NULL,
1665 vif->pages_to_unmap, 1688 queue->pages_to_unmap,
1666 gop - vif->tx_unmap_ops); 1689 gop - queue->tx_unmap_ops);
1667 if (ret) { 1690 if (ret) {
1668 netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n", 1691 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
1669 gop - vif->tx_unmap_ops, ret); 1692 gop - queue->tx_unmap_ops, ret);
1670 for (i = 0; i < gop - vif->tx_unmap_ops; ++i) { 1693 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1671 if (gop[i].status != GNTST_okay) 1694 if (gop[i].status != GNTST_okay)
1672 netdev_err(vif->dev, 1695 netdev_err(queue->vif->dev,
1673 " host_addr: %llx handle: %x status: %d\n", 1696 " host_addr: %llx handle: %x status: %d\n",
1674 gop[i].host_addr, 1697 gop[i].host_addr,
1675 gop[i].handle, 1698 gop[i].handle,
@@ -1679,91 +1702,91 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
1679 } 1702 }
1680 } 1703 }
1681 1704
1682 for (i = 0; i < gop - vif->tx_unmap_ops; ++i) 1705 for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1683 xenvif_idx_release(vif, pending_idx_release[i], 1706 xenvif_idx_release(queue, pending_idx_release[i],
1684 XEN_NETIF_RSP_OKAY); 1707 XEN_NETIF_RSP_OKAY);
1685} 1708}
1686 1709
1687 1710
1688/* Called after netfront has transmitted */ 1711/* Called after netfront has transmitted */
1689int xenvif_tx_action(struct xenvif *vif, int budget) 1712int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1690{ 1713{
1691 unsigned nr_mops, nr_cops = 0; 1714 unsigned nr_mops, nr_cops = 0;
1692 int work_done, ret; 1715 int work_done, ret;
1693 1716
1694 if (unlikely(!tx_work_todo(vif))) 1717 if (unlikely(!tx_work_todo(queue)))
1695 return 0; 1718 return 0;
1696 1719
1697 xenvif_tx_build_gops(vif, budget, &nr_cops, &nr_mops); 1720 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1698 1721
1699 if (nr_cops == 0) 1722 if (nr_cops == 0)
1700 return 0; 1723 return 0;
1701 1724
1702 gnttab_batch_copy(vif->tx_copy_ops, nr_cops); 1725 gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1703 if (nr_mops != 0) { 1726 if (nr_mops != 0) {
1704 ret = gnttab_map_refs(vif->tx_map_ops, 1727 ret = gnttab_map_refs(queue->tx_map_ops,
1705 NULL, 1728 NULL,
1706 vif->pages_to_map, 1729 queue->pages_to_map,
1707 nr_mops); 1730 nr_mops);
1708 BUG_ON(ret); 1731 BUG_ON(ret);
1709 } 1732 }
1710 1733
1711 work_done = xenvif_tx_submit(vif); 1734 work_done = xenvif_tx_submit(queue);
1712 1735
1713 return work_done; 1736 return work_done;
1714} 1737}
1715 1738
1716static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, 1739static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1717 u8 status) 1740 u8 status)
1718{ 1741{
1719 struct pending_tx_info *pending_tx_info; 1742 struct pending_tx_info *pending_tx_info;
1720 pending_ring_idx_t index; 1743 pending_ring_idx_t index;
1721 unsigned long flags; 1744 unsigned long flags;
1722 1745
1723 pending_tx_info = &vif->pending_tx_info[pending_idx]; 1746 pending_tx_info = &queue->pending_tx_info[pending_idx];
1724 spin_lock_irqsave(&vif->response_lock, flags); 1747 spin_lock_irqsave(&queue->response_lock, flags);
1725 make_tx_response(vif, &pending_tx_info->req, status); 1748 make_tx_response(queue, &pending_tx_info->req, status);
1726 index = pending_index(vif->pending_prod); 1749 index = pending_index(queue->pending_prod);
1727 vif->pending_ring[index] = pending_idx; 1750 queue->pending_ring[index] = pending_idx;
1728 /* TX shouldn't use the index before we give it back here */ 1751 /* TX shouldn't use the index before we give it back here */
1729 mb(); 1752 mb();
1730 vif->pending_prod++; 1753 queue->pending_prod++;
1731 spin_unlock_irqrestore(&vif->response_lock, flags); 1754 spin_unlock_irqrestore(&queue->response_lock, flags);
1732} 1755}
1733 1756
1734 1757
1735static void make_tx_response(struct xenvif *vif, 1758static void make_tx_response(struct xenvif_queue *queue,
1736 struct xen_netif_tx_request *txp, 1759 struct xen_netif_tx_request *txp,
1737 s8 st) 1760 s8 st)
1738{ 1761{
1739 RING_IDX i = vif->tx.rsp_prod_pvt; 1762 RING_IDX i = queue->tx.rsp_prod_pvt;
1740 struct xen_netif_tx_response *resp; 1763 struct xen_netif_tx_response *resp;
1741 int notify; 1764 int notify;
1742 1765
1743 resp = RING_GET_RESPONSE(&vif->tx, i); 1766 resp = RING_GET_RESPONSE(&queue->tx, i);
1744 resp->id = txp->id; 1767 resp->id = txp->id;
1745 resp->status = st; 1768 resp->status = st;
1746 1769
1747 if (txp->flags & XEN_NETTXF_extra_info) 1770 if (txp->flags & XEN_NETTXF_extra_info)
1748 RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL; 1771 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1749 1772
1750 vif->tx.rsp_prod_pvt = ++i; 1773 queue->tx.rsp_prod_pvt = ++i;
1751 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); 1774 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1752 if (notify) 1775 if (notify)
1753 notify_remote_via_irq(vif->tx_irq); 1776 notify_remote_via_irq(queue->tx_irq);
1754} 1777}
1755 1778
1756static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, 1779static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
1757 u16 id, 1780 u16 id,
1758 s8 st, 1781 s8 st,
1759 u16 offset, 1782 u16 offset,
1760 u16 size, 1783 u16 size,
1761 u16 flags) 1784 u16 flags)
1762{ 1785{
1763 RING_IDX i = vif->rx.rsp_prod_pvt; 1786 RING_IDX i = queue->rx.rsp_prod_pvt;
1764 struct xen_netif_rx_response *resp; 1787 struct xen_netif_rx_response *resp;
1765 1788
1766 resp = RING_GET_RESPONSE(&vif->rx, i); 1789 resp = RING_GET_RESPONSE(&queue->rx, i);
1767 resp->offset = offset; 1790 resp->offset = offset;
1768 resp->flags = flags; 1791 resp->flags = flags;
1769 resp->id = id; 1792 resp->id = id;
@@ -1771,26 +1794,26 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1771 if (st < 0) 1794 if (st < 0)
1772 resp->status = (s16)st; 1795 resp->status = (s16)st;
1773 1796
1774 vif->rx.rsp_prod_pvt = ++i; 1797 queue->rx.rsp_prod_pvt = ++i;
1775 1798
1776 return resp; 1799 return resp;
1777} 1800}
1778 1801
1779void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx) 1802void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1780{ 1803{
1781 int ret; 1804 int ret;
1782 struct gnttab_unmap_grant_ref tx_unmap_op; 1805 struct gnttab_unmap_grant_ref tx_unmap_op;
1783 1806
1784 gnttab_set_unmap_op(&tx_unmap_op, 1807 gnttab_set_unmap_op(&tx_unmap_op,
1785 idx_to_kaddr(vif, pending_idx), 1808 idx_to_kaddr(queue, pending_idx),
1786 GNTMAP_host_map, 1809 GNTMAP_host_map,
1787 vif->grant_tx_handle[pending_idx]); 1810 queue->grant_tx_handle[pending_idx]);
1788 xenvif_grant_handle_reset(vif, pending_idx); 1811 xenvif_grant_handle_reset(queue, pending_idx);
1789 1812
1790 ret = gnttab_unmap_refs(&tx_unmap_op, NULL, 1813 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1791 &vif->mmap_pages[pending_idx], 1); 1814 &queue->mmap_pages[pending_idx], 1);
1792 if (ret) { 1815 if (ret) {
1793 netdev_err(vif->dev, 1816 netdev_err(queue->vif->dev,
1794 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n", 1817 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
1795 ret, 1818 ret,
1796 pending_idx, 1819 pending_idx,
@@ -1800,41 +1823,40 @@ void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
1800 BUG(); 1823 BUG();
1801 } 1824 }
1802 1825
1803 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); 1826 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
1804} 1827}
1805 1828
1806static inline int rx_work_todo(struct xenvif *vif) 1829static inline int rx_work_todo(struct xenvif_queue *queue)
1807{ 1830{
1808 return (!skb_queue_empty(&vif->rx_queue) && 1831 return (!skb_queue_empty(&queue->rx_queue) &&
1809 xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) || 1832 xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)) ||
1810 vif->rx_queue_purge; 1833 queue->rx_queue_purge;
1811} 1834}
1812 1835
1813static inline int tx_work_todo(struct xenvif *vif) 1836static inline int tx_work_todo(struct xenvif_queue *queue)
1814{ 1837{
1815 1838 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1816 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
1817 return 1; 1839 return 1;
1818 1840
1819 return 0; 1841 return 0;
1820} 1842}
1821 1843
1822static inline bool tx_dealloc_work_todo(struct xenvif *vif) 1844static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1823{ 1845{
1824 return vif->dealloc_cons != vif->dealloc_prod; 1846 return queue->dealloc_cons != queue->dealloc_prod;
1825} 1847}
1826 1848
1827void xenvif_unmap_frontend_rings(struct xenvif *vif) 1849void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
1828{ 1850{
1829 if (vif->tx.sring) 1851 if (queue->tx.sring)
1830 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), 1852 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1831 vif->tx.sring); 1853 queue->tx.sring);
1832 if (vif->rx.sring) 1854 if (queue->rx.sring)
1833 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), 1855 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1834 vif->rx.sring); 1856 queue->rx.sring);
1835} 1857}
1836 1858
1837int xenvif_map_frontend_rings(struct xenvif *vif, 1859int xenvif_map_frontend_rings(struct xenvif_queue *queue,
1838 grant_ref_t tx_ring_ref, 1860 grant_ref_t tx_ring_ref,
1839 grant_ref_t rx_ring_ref) 1861 grant_ref_t rx_ring_ref)
1840{ 1862{
@@ -1844,85 +1866,78 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
1844 1866
1845 int err = -ENOMEM; 1867 int err = -ENOMEM;
1846 1868
1847 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), 1869 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1848 tx_ring_ref, &addr); 1870 tx_ring_ref, &addr);
1849 if (err) 1871 if (err)
1850 goto err; 1872 goto err;
1851 1873
1852 txs = (struct xen_netif_tx_sring *)addr; 1874 txs = (struct xen_netif_tx_sring *)addr;
1853 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); 1875 BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);
1854 1876
1855 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), 1877 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1856 rx_ring_ref, &addr); 1878 rx_ring_ref, &addr);
1857 if (err) 1879 if (err)
1858 goto err; 1880 goto err;
1859 1881
1860 rxs = (struct xen_netif_rx_sring *)addr; 1882 rxs = (struct xen_netif_rx_sring *)addr;
1861 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); 1883 BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
1862 1884
1863 return 0; 1885 return 0;
1864 1886
1865err: 1887err:
1866 xenvif_unmap_frontend_rings(vif); 1888 xenvif_unmap_frontend_rings(queue);
1867 return err; 1889 return err;
1868} 1890}
1869 1891
1870void xenvif_stop_queue(struct xenvif *vif) 1892static void xenvif_start_queue(struct xenvif_queue *queue)
1871{ 1893{
1872 if (!vif->can_queue) 1894 if (xenvif_schedulable(queue->vif))
1873 return; 1895 xenvif_wake_queue(queue);
1874
1875 netif_stop_queue(vif->dev);
1876}
1877
1878static void xenvif_start_queue(struct xenvif *vif)
1879{
1880 if (xenvif_schedulable(vif))
1881 netif_wake_queue(vif->dev);
1882} 1896}
1883 1897
1884int xenvif_kthread_guest_rx(void *data) 1898int xenvif_kthread_guest_rx(void *data)
1885{ 1899{
1886 struct xenvif *vif = data; 1900 struct xenvif_queue *queue = data;
1887 struct sk_buff *skb; 1901 struct sk_buff *skb;
1888 1902
1889 while (!kthread_should_stop()) { 1903 while (!kthread_should_stop()) {
1890 wait_event_interruptible(vif->wq, 1904 wait_event_interruptible(queue->wq,
1891 rx_work_todo(vif) || 1905 rx_work_todo(queue) ||
1892 vif->disabled || 1906 queue->vif->disabled ||
1893 kthread_should_stop()); 1907 kthread_should_stop());
1894 1908
1895 /* This frontend is found to be rogue, disable it in 1909 /* This frontend is found to be rogue, disable it in
1896 * kthread context. Currently this is only set when 1910 * kthread context. Currently this is only set when
1897 * netback finds out frontend sends malformed packet, 1911 * netback finds out frontend sends malformed packet,
1898 * but we cannot disable the interface in softirq 1912 * but we cannot disable the interface in softirq
1899 * context so we defer it here. 1913 * context so we defer it here, if this thread is
1914 * associated with queue 0.
1900 */ 1915 */
1901 if (unlikely(vif->disabled && netif_carrier_ok(vif->dev))) 1916 if (unlikely(queue->vif->disabled && netif_carrier_ok(queue->vif->dev) && queue->id == 0))
1902 xenvif_carrier_off(vif); 1917 xenvif_carrier_off(queue->vif);
1903 1918
1904 if (kthread_should_stop()) 1919 if (kthread_should_stop())
1905 break; 1920 break;
1906 1921
1907 if (vif->rx_queue_purge) { 1922 if (queue->rx_queue_purge) {
1908 skb_queue_purge(&vif->rx_queue); 1923 skb_queue_purge(&queue->rx_queue);
1909 vif->rx_queue_purge = false; 1924 queue->rx_queue_purge = false;
1910 } 1925 }
1911 1926
1912 if (!skb_queue_empty(&vif->rx_queue)) 1927 if (!skb_queue_empty(&queue->rx_queue))
1913 xenvif_rx_action(vif); 1928 xenvif_rx_action(queue);
1914 1929
1915 if (skb_queue_empty(&vif->rx_queue) && 1930 if (skb_queue_empty(&queue->rx_queue) &&
1916 netif_queue_stopped(vif->dev)) { 1931 xenvif_queue_stopped(queue)) {
1917 del_timer_sync(&vif->wake_queue); 1932 del_timer_sync(&queue->wake_queue);
1918 xenvif_start_queue(vif); 1933 xenvif_start_queue(queue);
1919 } 1934 }
1920 1935
1921 cond_resched(); 1936 cond_resched();
1922 } 1937 }
1923 1938
1924 /* Bin any remaining skbs */ 1939 /* Bin any remaining skbs */
1925 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) 1940 while ((skb = skb_dequeue(&queue->rx_queue)) != NULL)
1926 dev_kfree_skb(skb); 1941 dev_kfree_skb(skb);
1927 1942
1928 return 0; 1943 return 0;
@@ -1930,22 +1945,22 @@ int xenvif_kthread_guest_rx(void *data)
1930 1945
1931int xenvif_dealloc_kthread(void *data) 1946int xenvif_dealloc_kthread(void *data)
1932{ 1947{
1933 struct xenvif *vif = data; 1948 struct xenvif_queue *queue = data;
1934 1949
1935 while (!kthread_should_stop()) { 1950 while (!kthread_should_stop()) {
1936 wait_event_interruptible(vif->dealloc_wq, 1951 wait_event_interruptible(queue->dealloc_wq,
1937 tx_dealloc_work_todo(vif) || 1952 tx_dealloc_work_todo(queue) ||
1938 kthread_should_stop()); 1953 kthread_should_stop());
1939 if (kthread_should_stop()) 1954 if (kthread_should_stop())
1940 break; 1955 break;
1941 1956
1942 xenvif_tx_dealloc_action(vif); 1957 xenvif_tx_dealloc_action(queue);
1943 cond_resched(); 1958 cond_resched();
1944 } 1959 }
1945 1960
1946 /* Unmap anything remaining*/ 1961 /* Unmap anything remaining*/
1947 if (tx_dealloc_work_todo(vif)) 1962 if (tx_dealloc_work_todo(queue))
1948 xenvif_tx_dealloc_action(vif); 1963 xenvif_tx_dealloc_action(queue);
1949 1964
1950 return 0; 1965 return 0;
1951} 1966}
@@ -1957,6 +1972,9 @@ static int __init netback_init(void)
1957 if (!xen_domain()) 1972 if (!xen_domain())
1958 return -ENODEV; 1973 return -ENODEV;
1959 1974
1975 /* Allow as many queues as there are CPUs, by default */
1976 xenvif_max_queues = num_online_cpus();
1977
1960 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { 1978 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1961 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", 1979 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1962 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX); 1980 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 7a206cffb062..96c63dc2509e 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -19,6 +19,8 @@
19*/ 19*/
20 20
21#include "common.h" 21#include "common.h"
22#include <linux/vmalloc.h>
23#include <linux/rtnetlink.h>
22 24
23struct backend_info { 25struct backend_info {
24 struct xenbus_device *dev; 26 struct xenbus_device *dev;
@@ -34,8 +36,9 @@ struct backend_info {
34 u8 have_hotplug_status_watch:1; 36 u8 have_hotplug_status_watch:1;
35}; 37};
36 38
37static int connect_rings(struct backend_info *); 39static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
38static void connect(struct backend_info *); 40static void connect(struct backend_info *be);
41static int read_xenbus_vif_flags(struct backend_info *be);
39static void backend_create_xenvif(struct backend_info *be); 42static void backend_create_xenvif(struct backend_info *be);
40static void unregister_hotplug_status_watch(struct backend_info *be); 43static void unregister_hotplug_status_watch(struct backend_info *be);
41static void set_backend_state(struct backend_info *be, 44static void set_backend_state(struct backend_info *be,
@@ -157,6 +160,12 @@ static int netback_probe(struct xenbus_device *dev,
157 if (err) 160 if (err)
158 pr_debug("Error writing feature-split-event-channels\n"); 161 pr_debug("Error writing feature-split-event-channels\n");
159 162
163 /* Multi-queue support: This is an optional feature. */
164 err = xenbus_printf(XBT_NIL, dev->nodename,
165 "multi-queue-max-queues", "%u", xenvif_max_queues);
166 if (err)
167 pr_debug("Error writing multi-queue-max-queues\n");
168
160 err = xenbus_switch_state(dev, XenbusStateInitWait); 169 err = xenbus_switch_state(dev, XenbusStateInitWait);
161 if (err) 170 if (err)
162 goto fail; 171 goto fail;
@@ -485,10 +494,26 @@ static void connect(struct backend_info *be)
485{ 494{
486 int err; 495 int err;
487 struct xenbus_device *dev = be->dev; 496 struct xenbus_device *dev = be->dev;
497 unsigned long credit_bytes, credit_usec;
498 unsigned int queue_index;
499 unsigned int requested_num_queues;
500 struct xenvif_queue *queue;
488 501
489 err = connect_rings(be); 502 /* Check whether the frontend requested multiple queues
490 if (err) 503 * and read the number requested.
504 */
505 err = xenbus_scanf(XBT_NIL, dev->otherend,
506 "multi-queue-num-queues",
507 "%u", &requested_num_queues);
508 if (err < 0) {
509 requested_num_queues = 1; /* Fall back to single queue */
510 } else if (requested_num_queues > xenvif_max_queues) {
511 /* buggy or malicious guest */
512 xenbus_dev_fatal(dev, err,
513 "guest requested %u queues, exceeding the maximum of %u.",
514 requested_num_queues, xenvif_max_queues);
491 return; 515 return;
516 }
492 517
493 err = xen_net_read_mac(dev, be->vif->fe_dev_addr); 518 err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
494 if (err) { 519 if (err) {
@@ -496,9 +521,54 @@ static void connect(struct backend_info *be)
496 return; 521 return;
497 } 522 }
498 523
499 xen_net_read_rate(dev, &be->vif->credit_bytes, 524 xen_net_read_rate(dev, &credit_bytes, &credit_usec);
500 &be->vif->credit_usec); 525 read_xenbus_vif_flags(be);
501 be->vif->remaining_credit = be->vif->credit_bytes; 526
527 /* Use the number of queues requested by the frontend */
528 be->vif->queues = vzalloc(requested_num_queues *
529 sizeof(struct xenvif_queue));
530 rtnl_lock();
531 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
532 rtnl_unlock();
533
534 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
535 queue = &be->vif->queues[queue_index];
536 queue->vif = be->vif;
537 queue->id = queue_index;
538 snprintf(queue->name, sizeof(queue->name), "%s-q%u",
539 be->vif->dev->name, queue->id);
540
541 err = xenvif_init_queue(queue);
542 if (err) {
543 /* xenvif_init_queue() cleans up after itself on
544 * failure, but we need to clean up any previously
545 * initialised queues. Set num_queues to i so that
546 * earlier queues can be destroyed using the regular
547 * disconnect logic.
548 */
549 rtnl_lock();
550 netif_set_real_num_tx_queues(be->vif->dev, queue_index);
551 rtnl_unlock();
552 goto err;
553 }
554
555 queue->remaining_credit = credit_bytes;
556
557 err = connect_rings(be, queue);
558 if (err) {
559 /* connect_rings() cleans up after itself on failure,
560 * but we need to clean up after xenvif_init_queue() here,
561 * and also clean up any previously initialised queues.
562 */
563 xenvif_deinit_queue(queue);
564 rtnl_lock();
565 netif_set_real_num_tx_queues(be->vif->dev, queue_index);
566 rtnl_unlock();
567 goto err;
568 }
569 }
570
571 xenvif_carrier_on(be->vif);
502 572
503 unregister_hotplug_status_watch(be); 573 unregister_hotplug_status_watch(be);
504 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, 574 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
@@ -507,45 +577,109 @@ static void connect(struct backend_info *be)
507 if (!err) 577 if (!err)
508 be->have_hotplug_status_watch = 1; 578 be->have_hotplug_status_watch = 1;
509 579
510 netif_wake_queue(be->vif->dev); 580 netif_tx_wake_all_queues(be->vif->dev);
581
582 return;
583
584err:
585 if (be->vif->dev->real_num_tx_queues > 0)
586 xenvif_disconnect(be->vif); /* Clean up existing queues */
587 vfree(be->vif->queues);
588 be->vif->queues = NULL;
589 rtnl_lock();
590 netif_set_real_num_tx_queues(be->vif->dev, 0);
591 rtnl_unlock();
592 return;
511} 593}
512 594
513 595
514static int connect_rings(struct backend_info *be) 596static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
515{ 597{
516 struct xenvif *vif = be->vif;
517 struct xenbus_device *dev = be->dev; 598 struct xenbus_device *dev = be->dev;
599 unsigned int num_queues = queue->vif->dev->real_num_tx_queues;
518 unsigned long tx_ring_ref, rx_ring_ref; 600 unsigned long tx_ring_ref, rx_ring_ref;
519 unsigned int tx_evtchn, rx_evtchn, rx_copy; 601 unsigned int tx_evtchn, rx_evtchn;
520 int err; 602 int err;
521 int val; 603 char *xspath;
604 size_t xspathsize;
605 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
606
607 /* If the frontend requested 1 queue, or we have fallen back
608 * to single queue due to lack of frontend support for multi-
609 * queue, expect the remaining XenStore keys in the toplevel
610 * directory. Otherwise, expect them in a subdirectory called
611 * queue-N.
612 */
613 if (num_queues == 1) {
614 xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL);
615 if (!xspath) {
616 xenbus_dev_fatal(dev, -ENOMEM,
617 "reading ring references");
618 return -ENOMEM;
619 }
620 strcpy(xspath, dev->otherend);
621 } else {
622 xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
623 xspath = kzalloc(xspathsize, GFP_KERNEL);
624 if (!xspath) {
625 xenbus_dev_fatal(dev, -ENOMEM,
626 "reading ring references");
627 return -ENOMEM;
628 }
629 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend,
630 queue->id);
631 }
522 632
523 err = xenbus_gather(XBT_NIL, dev->otherend, 633 err = xenbus_gather(XBT_NIL, xspath,
524 "tx-ring-ref", "%lu", &tx_ring_ref, 634 "tx-ring-ref", "%lu", &tx_ring_ref,
525 "rx-ring-ref", "%lu", &rx_ring_ref, NULL); 635 "rx-ring-ref", "%lu", &rx_ring_ref, NULL);
526 if (err) { 636 if (err) {
527 xenbus_dev_fatal(dev, err, 637 xenbus_dev_fatal(dev, err,
528 "reading %s/ring-ref", 638 "reading %s/ring-ref",
529 dev->otherend); 639 xspath);
530 return err; 640 goto err;
531 } 641 }
532 642
533 /* Try split event channels first, then single event channel. */ 643 /* Try split event channels first, then single event channel. */
534 err = xenbus_gather(XBT_NIL, dev->otherend, 644 err = xenbus_gather(XBT_NIL, xspath,
535 "event-channel-tx", "%u", &tx_evtchn, 645 "event-channel-tx", "%u", &tx_evtchn,
536 "event-channel-rx", "%u", &rx_evtchn, NULL); 646 "event-channel-rx", "%u", &rx_evtchn, NULL);
537 if (err < 0) { 647 if (err < 0) {
538 err = xenbus_scanf(XBT_NIL, dev->otherend, 648 err = xenbus_scanf(XBT_NIL, xspath,
539 "event-channel", "%u", &tx_evtchn); 649 "event-channel", "%u", &tx_evtchn);
540 if (err < 0) { 650 if (err < 0) {
541 xenbus_dev_fatal(dev, err, 651 xenbus_dev_fatal(dev, err,
542 "reading %s/event-channel(-tx/rx)", 652 "reading %s/event-channel(-tx/rx)",
543 dev->otherend); 653 xspath);
544 return err; 654 goto err;
545 } 655 }
546 rx_evtchn = tx_evtchn; 656 rx_evtchn = tx_evtchn;
547 } 657 }
548 658
659 /* Map the shared frame, irq etc. */
660 err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref,
661 tx_evtchn, rx_evtchn);
662 if (err) {
663 xenbus_dev_fatal(dev, err,
664 "mapping shared-frames %lu/%lu port tx %u rx %u",
665 tx_ring_ref, rx_ring_ref,
666 tx_evtchn, rx_evtchn);
667 goto err;
668 }
669
670 err = 0;
671err: /* Regular return falls through with err == 0 */
672 kfree(xspath);
673 return err;
674}
675
676static int read_xenbus_vif_flags(struct backend_info *be)
677{
678 struct xenvif *vif = be->vif;
679 struct xenbus_device *dev = be->dev;
680 unsigned int rx_copy;
681 int err, val;
682
549 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", 683 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
550 &rx_copy); 684 &rx_copy);
551 if (err == -ENOENT) { 685 if (err == -ENOENT) {
@@ -621,16 +755,6 @@ static int connect_rings(struct backend_info *be)
621 val = 0; 755 val = 0;
622 vif->ipv6_csum = !!val; 756 vif->ipv6_csum = !!val;
623 757
624 /* Map the shared frame, irq etc. */
625 err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
626 tx_evtchn, rx_evtchn);
627 if (err) {
628 xenbus_dev_fatal(dev, err,
629 "mapping shared-frames %lu/%lu port tx %u rx %u",
630 tx_ring_ref, rx_ring_ref,
631 tx_evtchn, rx_evtchn);
632 return err;
633 }
634 return 0; 758 return 0;
635} 759}
636 760
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 158b5e639fc7..5a7872ac3566 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -57,6 +57,12 @@
57#include <xen/interface/memory.h> 57#include <xen/interface/memory.h>
58#include <xen/interface/grant_table.h> 58#include <xen/interface/grant_table.h>
59 59
60/* Module parameters */
61static unsigned int xennet_max_queues;
62module_param_named(max_queues, xennet_max_queues, uint, 0644);
63MODULE_PARM_DESC(max_queues,
64 "Maximum number of queues per virtual interface");
65
60static const struct ethtool_ops xennet_ethtool_ops; 66static const struct ethtool_ops xennet_ethtool_ops;
61 67
62struct netfront_cb { 68struct netfront_cb {
@@ -73,6 +79,12 @@ struct netfront_cb {
73#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) 79#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
74#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) 80#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
75 81
82/* Queue name is interface name with "-qNNN" appended */
83#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
84
85/* IRQ name is queue name with "-tx" or "-rx" appended */
86#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
87
76struct netfront_stats { 88struct netfront_stats {
77 u64 rx_packets; 89 u64 rx_packets;
78 u64 tx_packets; 90 u64 tx_packets;
@@ -81,9 +93,12 @@ struct netfront_stats {
81 struct u64_stats_sync syncp; 93 struct u64_stats_sync syncp;
82}; 94};
83 95
84struct netfront_info { 96struct netfront_info;
85 struct list_head list; 97
86 struct net_device *netdev; 98struct netfront_queue {
99 unsigned int id; /* Queue ID, 0-based */
100 char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
101 struct netfront_info *info;
87 102
88 struct napi_struct napi; 103 struct napi_struct napi;
89 104
@@ -93,10 +108,8 @@ struct netfront_info {
93 unsigned int tx_evtchn, rx_evtchn; 108 unsigned int tx_evtchn, rx_evtchn;
94 unsigned int tx_irq, rx_irq; 109 unsigned int tx_irq, rx_irq;
95 /* Only used when split event channels support is enabled */ 110 /* Only used when split event channels support is enabled */
96 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */ 111 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
97 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ 112 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
98
99 struct xenbus_device *xbdev;
100 113
101 spinlock_t tx_lock; 114 spinlock_t tx_lock;
102 struct xen_netif_tx_front_ring tx; 115 struct xen_netif_tx_front_ring tx;
@@ -140,11 +153,21 @@ struct netfront_info {
140 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 153 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
141 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 154 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
142 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 155 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
156};
157
158struct netfront_info {
159 struct list_head list;
160 struct net_device *netdev;
161
162 struct xenbus_device *xbdev;
163
164 /* Multi-queue support */
165 struct netfront_queue *queues;
143 166
144 /* Statistics */ 167 /* Statistics */
145 struct netfront_stats __percpu *stats; 168 struct netfront_stats __percpu *stats;
146 169
147 unsigned long rx_gso_checksum_fixup; 170 atomic_t rx_gso_checksum_fixup;
148}; 171};
149 172
150struct netfront_rx_info { 173struct netfront_rx_info {
@@ -187,21 +210,21 @@ static int xennet_rxidx(RING_IDX idx)
187 return idx & (NET_RX_RING_SIZE - 1); 210 return idx & (NET_RX_RING_SIZE - 1);
188} 211}
189 212
190static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, 213static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
191 RING_IDX ri) 214 RING_IDX ri)
192{ 215{
193 int i = xennet_rxidx(ri); 216 int i = xennet_rxidx(ri);
194 struct sk_buff *skb = np->rx_skbs[i]; 217 struct sk_buff *skb = queue->rx_skbs[i];
195 np->rx_skbs[i] = NULL; 218 queue->rx_skbs[i] = NULL;
196 return skb; 219 return skb;
197} 220}
198 221
199static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, 222static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
200 RING_IDX ri) 223 RING_IDX ri)
201{ 224{
202 int i = xennet_rxidx(ri); 225 int i = xennet_rxidx(ri);
203 grant_ref_t ref = np->grant_rx_ref[i]; 226 grant_ref_t ref = queue->grant_rx_ref[i];
204 np->grant_rx_ref[i] = GRANT_INVALID_REF; 227 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
205 return ref; 228 return ref;
206} 229}
207 230
@@ -221,41 +244,40 @@ static bool xennet_can_sg(struct net_device *dev)
221 244
222static void rx_refill_timeout(unsigned long data) 245static void rx_refill_timeout(unsigned long data)
223{ 246{
224 struct net_device *dev = (struct net_device *)data; 247 struct netfront_queue *queue = (struct netfront_queue *)data;
225 struct netfront_info *np = netdev_priv(dev); 248 napi_schedule(&queue->napi);
226 napi_schedule(&np->napi);
227} 249}
228 250
229static int netfront_tx_slot_available(struct netfront_info *np) 251static int netfront_tx_slot_available(struct netfront_queue *queue)
230{ 252{
231 return (np->tx.req_prod_pvt - np->tx.rsp_cons) < 253 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
232 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2); 254 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
233} 255}
234 256
235static void xennet_maybe_wake_tx(struct net_device *dev) 257static void xennet_maybe_wake_tx(struct netfront_queue *queue)
236{ 258{
237 struct netfront_info *np = netdev_priv(dev); 259 struct net_device *dev = queue->info->netdev;
260 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
238 261
239 if (unlikely(netif_queue_stopped(dev)) && 262 if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
240 netfront_tx_slot_available(np) && 263 netfront_tx_slot_available(queue) &&
241 likely(netif_running(dev))) 264 likely(netif_running(dev)))
242 netif_wake_queue(dev); 265 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
243} 266}
244 267
245static void xennet_alloc_rx_buffers(struct net_device *dev) 268static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
246{ 269{
247 unsigned short id; 270 unsigned short id;
248 struct netfront_info *np = netdev_priv(dev);
249 struct sk_buff *skb; 271 struct sk_buff *skb;
250 struct page *page; 272 struct page *page;
251 int i, batch_target, notify; 273 int i, batch_target, notify;
252 RING_IDX req_prod = np->rx.req_prod_pvt; 274 RING_IDX req_prod = queue->rx.req_prod_pvt;
253 grant_ref_t ref; 275 grant_ref_t ref;
254 unsigned long pfn; 276 unsigned long pfn;
255 void *vaddr; 277 void *vaddr;
256 struct xen_netif_rx_request *req; 278 struct xen_netif_rx_request *req;
257 279
258 if (unlikely(!netif_carrier_ok(dev))) 280 if (unlikely(!netif_carrier_ok(queue->info->netdev)))
259 return; 281 return;
260 282
261 /* 283 /*
@@ -264,9 +286,10 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
264 * allocator, so should reduce the chance of failed allocation requests 286 * allocator, so should reduce the chance of failed allocation requests
265 * both for ourself and for other kernel subsystems. 287 * both for ourself and for other kernel subsystems.
266 */ 288 */
267 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); 289 batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons);
268 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { 290 for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) {
269 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN, 291 skb = __netdev_alloc_skb(queue->info->netdev,
292 RX_COPY_THRESHOLD + NET_IP_ALIGN,
270 GFP_ATOMIC | __GFP_NOWARN); 293 GFP_ATOMIC | __GFP_NOWARN);
271 if (unlikely(!skb)) 294 if (unlikely(!skb))
272 goto no_skb; 295 goto no_skb;
@@ -279,7 +302,7 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
279 kfree_skb(skb); 302 kfree_skb(skb);
280no_skb: 303no_skb:
281 /* Could not allocate any skbuffs. Try again later. */ 304 /* Could not allocate any skbuffs. Try again later. */
282 mod_timer(&np->rx_refill_timer, 305 mod_timer(&queue->rx_refill_timer,
283 jiffies + (HZ/10)); 306 jiffies + (HZ/10));
284 307
285 /* Any skbuffs queued for refill? Force them out. */ 308 /* Any skbuffs queued for refill? Force them out. */
@@ -289,44 +312,44 @@ no_skb:
289 } 312 }
290 313
291 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); 314 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
292 __skb_queue_tail(&np->rx_batch, skb); 315 __skb_queue_tail(&queue->rx_batch, skb);
293 } 316 }
294 317
295 /* Is the batch large enough to be worthwhile? */ 318 /* Is the batch large enough to be worthwhile? */
296 if (i < (np->rx_target/2)) { 319 if (i < (queue->rx_target/2)) {
297 if (req_prod > np->rx.sring->req_prod) 320 if (req_prod > queue->rx.sring->req_prod)
298 goto push; 321 goto push;
299 return; 322 return;
300 } 323 }
301 324
302 /* Adjust our fill target if we risked running out of buffers. */ 325 /* Adjust our fill target if we risked running out of buffers. */
303 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && 326 if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) &&
304 ((np->rx_target *= 2) > np->rx_max_target)) 327 ((queue->rx_target *= 2) > queue->rx_max_target))
305 np->rx_target = np->rx_max_target; 328 queue->rx_target = queue->rx_max_target;
306 329
307 refill: 330 refill:
308 for (i = 0; ; i++) { 331 for (i = 0; ; i++) {
309 skb = __skb_dequeue(&np->rx_batch); 332 skb = __skb_dequeue(&queue->rx_batch);
310 if (skb == NULL) 333 if (skb == NULL)
311 break; 334 break;
312 335
313 skb->dev = dev; 336 skb->dev = queue->info->netdev;
314 337
315 id = xennet_rxidx(req_prod + i); 338 id = xennet_rxidx(req_prod + i);
316 339
317 BUG_ON(np->rx_skbs[id]); 340 BUG_ON(queue->rx_skbs[id]);
318 np->rx_skbs[id] = skb; 341 queue->rx_skbs[id] = skb;
319 342
320 ref = gnttab_claim_grant_reference(&np->gref_rx_head); 343 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
321 BUG_ON((signed short)ref < 0); 344 BUG_ON((signed short)ref < 0);
322 np->grant_rx_ref[id] = ref; 345 queue->grant_rx_ref[id] = ref;
323 346
324 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); 347 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
325 vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0])); 348 vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
326 349
327 req = RING_GET_REQUEST(&np->rx, req_prod + i); 350 req = RING_GET_REQUEST(&queue->rx, req_prod + i);
328 gnttab_grant_foreign_access_ref(ref, 351 gnttab_grant_foreign_access_ref(ref,
329 np->xbdev->otherend_id, 352 queue->info->xbdev->otherend_id,
330 pfn_to_mfn(pfn), 353 pfn_to_mfn(pfn),
331 0); 354 0);
332 355
@@ -337,72 +360,77 @@ no_skb:
337 wmb(); /* barrier so backend seens requests */ 360 wmb(); /* barrier so backend seens requests */
338 361
339 /* Above is a suitable barrier to ensure backend will see requests. */ 362 /* Above is a suitable barrier to ensure backend will see requests. */
340 np->rx.req_prod_pvt = req_prod + i; 363 queue->rx.req_prod_pvt = req_prod + i;
341 push: 364 push:
342 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); 365 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
343 if (notify) 366 if (notify)
344 notify_remote_via_irq(np->rx_irq); 367 notify_remote_via_irq(queue->rx_irq);
345} 368}
346 369
347static int xennet_open(struct net_device *dev) 370static int xennet_open(struct net_device *dev)
348{ 371{
349 struct netfront_info *np = netdev_priv(dev); 372 struct netfront_info *np = netdev_priv(dev);
350 373 unsigned int num_queues = dev->real_num_tx_queues;
351 napi_enable(&np->napi); 374 unsigned int i = 0;
352 375 struct netfront_queue *queue = NULL;
353 spin_lock_bh(&np->rx_lock); 376
354 if (netif_carrier_ok(dev)) { 377 for (i = 0; i < num_queues; ++i) {
355 xennet_alloc_rx_buffers(dev); 378 queue = &np->queues[i];
356 np->rx.sring->rsp_event = np->rx.rsp_cons + 1; 379 napi_enable(&queue->napi);
357 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 380
358 napi_schedule(&np->napi); 381 spin_lock_bh(&queue->rx_lock);
382 if (netif_carrier_ok(dev)) {
383 xennet_alloc_rx_buffers(queue);
384 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
385 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
386 napi_schedule(&queue->napi);
387 }
388 spin_unlock_bh(&queue->rx_lock);
359 } 389 }
360 spin_unlock_bh(&np->rx_lock);
361 390
362 netif_start_queue(dev); 391 netif_tx_start_all_queues(dev);
363 392
364 return 0; 393 return 0;
365} 394}
366 395
367static void xennet_tx_buf_gc(struct net_device *dev) 396static void xennet_tx_buf_gc(struct netfront_queue *queue)
368{ 397{
369 RING_IDX cons, prod; 398 RING_IDX cons, prod;
370 unsigned short id; 399 unsigned short id;
371 struct netfront_info *np = netdev_priv(dev);
372 struct sk_buff *skb; 400 struct sk_buff *skb;
373 401
374 BUG_ON(!netif_carrier_ok(dev)); 402 BUG_ON(!netif_carrier_ok(queue->info->netdev));
375 403
376 do { 404 do {
377 prod = np->tx.sring->rsp_prod; 405 prod = queue->tx.sring->rsp_prod;
378 rmb(); /* Ensure we see responses up to 'rp'. */ 406 rmb(); /* Ensure we see responses up to 'rp'. */
379 407
380 for (cons = np->tx.rsp_cons; cons != prod; cons++) { 408 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
381 struct xen_netif_tx_response *txrsp; 409 struct xen_netif_tx_response *txrsp;
382 410
383 txrsp = RING_GET_RESPONSE(&np->tx, cons); 411 txrsp = RING_GET_RESPONSE(&queue->tx, cons);
384 if (txrsp->status == XEN_NETIF_RSP_NULL) 412 if (txrsp->status == XEN_NETIF_RSP_NULL)
385 continue; 413 continue;
386 414
387 id = txrsp->id; 415 id = txrsp->id;
388 skb = np->tx_skbs[id].skb; 416 skb = queue->tx_skbs[id].skb;
389 if (unlikely(gnttab_query_foreign_access( 417 if (unlikely(gnttab_query_foreign_access(
390 np->grant_tx_ref[id]) != 0)) { 418 queue->grant_tx_ref[id]) != 0)) {
391 pr_alert("%s: warning -- grant still in use by backend domain\n", 419 pr_alert("%s: warning -- grant still in use by backend domain\n",
392 __func__); 420 __func__);
393 BUG(); 421 BUG();
394 } 422 }
395 gnttab_end_foreign_access_ref( 423 gnttab_end_foreign_access_ref(
396 np->grant_tx_ref[id], GNTMAP_readonly); 424 queue->grant_tx_ref[id], GNTMAP_readonly);
397 gnttab_release_grant_reference( 425 gnttab_release_grant_reference(
398 &np->gref_tx_head, np->grant_tx_ref[id]); 426 &queue->gref_tx_head, queue->grant_tx_ref[id]);
399 np->grant_tx_ref[id] = GRANT_INVALID_REF; 427 queue->grant_tx_ref[id] = GRANT_INVALID_REF;
400 np->grant_tx_page[id] = NULL; 428 queue->grant_tx_page[id] = NULL;
401 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); 429 add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
402 dev_kfree_skb_irq(skb); 430 dev_kfree_skb_irq(skb);
403 } 431 }
404 432
405 np->tx.rsp_cons = prod; 433 queue->tx.rsp_cons = prod;
406 434
407 /* 435 /*
408 * Set a new event, then check for race with update of tx_cons. 436 * Set a new event, then check for race with update of tx_cons.
@@ -412,21 +440,20 @@ static void xennet_tx_buf_gc(struct net_device *dev)
412 * data is outstanding: in such cases notification from Xen is 440 * data is outstanding: in such cases notification from Xen is
413 * likely to be the only kick that we'll get. 441 * likely to be the only kick that we'll get.
414 */ 442 */
415 np->tx.sring->rsp_event = 443 queue->tx.sring->rsp_event =
416 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 444 prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
417 mb(); /* update shared area */ 445 mb(); /* update shared area */
418 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); 446 } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
419 447
420 xennet_maybe_wake_tx(dev); 448 xennet_maybe_wake_tx(queue);
421} 449}
422 450
423static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, 451static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
424 struct xen_netif_tx_request *tx) 452 struct xen_netif_tx_request *tx)
425{ 453{
426 struct netfront_info *np = netdev_priv(dev);
427 char *data = skb->data; 454 char *data = skb->data;
428 unsigned long mfn; 455 unsigned long mfn;
429 RING_IDX prod = np->tx.req_prod_pvt; 456 RING_IDX prod = queue->tx.req_prod_pvt;
430 int frags = skb_shinfo(skb)->nr_frags; 457 int frags = skb_shinfo(skb)->nr_frags;
431 unsigned int offset = offset_in_page(data); 458 unsigned int offset = offset_in_page(data);
432 unsigned int len = skb_headlen(skb); 459 unsigned int len = skb_headlen(skb);
@@ -443,19 +470,19 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
443 data += tx->size; 470 data += tx->size;
444 offset = 0; 471 offset = 0;
445 472
446 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 473 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
447 np->tx_skbs[id].skb = skb_get(skb); 474 queue->tx_skbs[id].skb = skb_get(skb);
448 tx = RING_GET_REQUEST(&np->tx, prod++); 475 tx = RING_GET_REQUEST(&queue->tx, prod++);
449 tx->id = id; 476 tx->id = id;
450 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 477 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
451 BUG_ON((signed short)ref < 0); 478 BUG_ON((signed short)ref < 0);
452 479
453 mfn = virt_to_mfn(data); 480 mfn = virt_to_mfn(data);
454 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 481 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
455 mfn, GNTMAP_readonly); 482 mfn, GNTMAP_readonly);
456 483
457 np->grant_tx_page[id] = virt_to_page(data); 484 queue->grant_tx_page[id] = virt_to_page(data);
458 tx->gref = np->grant_tx_ref[id] = ref; 485 tx->gref = queue->grant_tx_ref[id] = ref;
459 tx->offset = offset; 486 tx->offset = offset;
460 tx->size = len; 487 tx->size = len;
461 tx->flags = 0; 488 tx->flags = 0;
@@ -487,21 +514,21 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
487 514
488 tx->flags |= XEN_NETTXF_more_data; 515 tx->flags |= XEN_NETTXF_more_data;
489 516
490 id = get_id_from_freelist(&np->tx_skb_freelist, 517 id = get_id_from_freelist(&queue->tx_skb_freelist,
491 np->tx_skbs); 518 queue->tx_skbs);
492 np->tx_skbs[id].skb = skb_get(skb); 519 queue->tx_skbs[id].skb = skb_get(skb);
493 tx = RING_GET_REQUEST(&np->tx, prod++); 520 tx = RING_GET_REQUEST(&queue->tx, prod++);
494 tx->id = id; 521 tx->id = id;
495 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 522 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
496 BUG_ON((signed short)ref < 0); 523 BUG_ON((signed short)ref < 0);
497 524
498 mfn = pfn_to_mfn(page_to_pfn(page)); 525 mfn = pfn_to_mfn(page_to_pfn(page));
499 gnttab_grant_foreign_access_ref(ref, 526 gnttab_grant_foreign_access_ref(ref,
500 np->xbdev->otherend_id, 527 queue->info->xbdev->otherend_id,
501 mfn, GNTMAP_readonly); 528 mfn, GNTMAP_readonly);
502 529
503 np->grant_tx_page[id] = page; 530 queue->grant_tx_page[id] = page;
504 tx->gref = np->grant_tx_ref[id] = ref; 531 tx->gref = queue->grant_tx_ref[id] = ref;
505 tx->offset = offset; 532 tx->offset = offset;
506 tx->size = bytes; 533 tx->size = bytes;
507 tx->flags = 0; 534 tx->flags = 0;
@@ -518,7 +545,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
518 } 545 }
519 } 546 }
520 547
521 np->tx.req_prod_pvt = prod; 548 queue->tx.req_prod_pvt = prod;
522} 549}
523 550
524/* 551/*
@@ -544,6 +571,24 @@ static int xennet_count_skb_frag_slots(struct sk_buff *skb)
544 return pages; 571 return pages;
545} 572}
546 573
574static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
575 void *accel_priv, select_queue_fallback_t fallback)
576{
577 unsigned int num_queues = dev->real_num_tx_queues;
578 u32 hash;
579 u16 queue_idx;
580
581 /* First, check if there is only one queue */
582 if (num_queues == 1) {
583 queue_idx = 0;
584 } else {
585 hash = skb_get_hash(skb);
586 queue_idx = hash % num_queues;
587 }
588
589 return queue_idx;
590}
591
547static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 592static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
548{ 593{
549 unsigned short id; 594 unsigned short id;
@@ -559,6 +604,16 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
559 unsigned int offset = offset_in_page(data); 604 unsigned int offset = offset_in_page(data);
560 unsigned int len = skb_headlen(skb); 605 unsigned int len = skb_headlen(skb);
561 unsigned long flags; 606 unsigned long flags;
607 struct netfront_queue *queue = NULL;
608 unsigned int num_queues = dev->real_num_tx_queues;
609 u16 queue_index;
610
611 /* Drop the packet if no queues are set up */
612 if (num_queues < 1)
613 goto drop;
614 /* Determine which queue to transmit this SKB on */
615 queue_index = skb_get_queue_mapping(skb);
616 queue = &np->queues[queue_index];
562 617
563 /* If skb->len is too big for wire format, drop skb and alert 618 /* If skb->len is too big for wire format, drop skb and alert
564 * user about misconfiguration. 619 * user about misconfiguration.
@@ -578,30 +633,30 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
578 goto drop; 633 goto drop;
579 } 634 }
580 635
581 spin_lock_irqsave(&np->tx_lock, flags); 636 spin_lock_irqsave(&queue->tx_lock, flags);
582 637
583 if (unlikely(!netif_carrier_ok(dev) || 638 if (unlikely(!netif_carrier_ok(dev) ||
584 (slots > 1 && !xennet_can_sg(dev)) || 639 (slots > 1 && !xennet_can_sg(dev)) ||
585 netif_needs_gso(skb, netif_skb_features(skb)))) { 640 netif_needs_gso(skb, netif_skb_features(skb)))) {
586 spin_unlock_irqrestore(&np->tx_lock, flags); 641 spin_unlock_irqrestore(&queue->tx_lock, flags);
587 goto drop; 642 goto drop;
588 } 643 }
589 644
590 i = np->tx.req_prod_pvt; 645 i = queue->tx.req_prod_pvt;
591 646
592 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 647 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
593 np->tx_skbs[id].skb = skb; 648 queue->tx_skbs[id].skb = skb;
594 649
595 tx = RING_GET_REQUEST(&np->tx, i); 650 tx = RING_GET_REQUEST(&queue->tx, i);
596 651
597 tx->id = id; 652 tx->id = id;
598 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 653 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
599 BUG_ON((signed short)ref < 0); 654 BUG_ON((signed short)ref < 0);
600 mfn = virt_to_mfn(data); 655 mfn = virt_to_mfn(data);
601 gnttab_grant_foreign_access_ref( 656 gnttab_grant_foreign_access_ref(
602 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); 657 ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
603 np->grant_tx_page[id] = virt_to_page(data); 658 queue->grant_tx_page[id] = virt_to_page(data);
604 tx->gref = np->grant_tx_ref[id] = ref; 659 tx->gref = queue->grant_tx_ref[id] = ref;
605 tx->offset = offset; 660 tx->offset = offset;
606 tx->size = len; 661 tx->size = len;
607 662
@@ -617,7 +672,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
617 struct xen_netif_extra_info *gso; 672 struct xen_netif_extra_info *gso;
618 673
619 gso = (struct xen_netif_extra_info *) 674 gso = (struct xen_netif_extra_info *)
620 RING_GET_REQUEST(&np->tx, ++i); 675 RING_GET_REQUEST(&queue->tx, ++i);
621 676
622 tx->flags |= XEN_NETTXF_extra_info; 677 tx->flags |= XEN_NETTXF_extra_info;
623 678
@@ -632,14 +687,14 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
632 gso->flags = 0; 687 gso->flags = 0;
633 } 688 }
634 689
635 np->tx.req_prod_pvt = i + 1; 690 queue->tx.req_prod_pvt = i + 1;
636 691
637 xennet_make_frags(skb, dev, tx); 692 xennet_make_frags(skb, queue, tx);
638 tx->size = skb->len; 693 tx->size = skb->len;
639 694
640 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); 695 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
641 if (notify) 696 if (notify)
642 notify_remote_via_irq(np->tx_irq); 697 notify_remote_via_irq(queue->tx_irq);
643 698
644 u64_stats_update_begin(&stats->syncp); 699 u64_stats_update_begin(&stats->syncp);
645 stats->tx_bytes += skb->len; 700 stats->tx_bytes += skb->len;
@@ -647,12 +702,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
647 u64_stats_update_end(&stats->syncp); 702 u64_stats_update_end(&stats->syncp);
648 703
649 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 704 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
650 xennet_tx_buf_gc(dev); 705 xennet_tx_buf_gc(queue);
651 706
652 if (!netfront_tx_slot_available(np)) 707 if (!netfront_tx_slot_available(queue))
653 netif_stop_queue(dev); 708 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
654 709
655 spin_unlock_irqrestore(&np->tx_lock, flags); 710 spin_unlock_irqrestore(&queue->tx_lock, flags);
656 711
657 return NETDEV_TX_OK; 712 return NETDEV_TX_OK;
658 713
@@ -665,32 +720,38 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
665static int xennet_close(struct net_device *dev) 720static int xennet_close(struct net_device *dev)
666{ 721{
667 struct netfront_info *np = netdev_priv(dev); 722 struct netfront_info *np = netdev_priv(dev);
668 netif_stop_queue(np->netdev); 723 unsigned int num_queues = dev->real_num_tx_queues;
669 napi_disable(&np->napi); 724 unsigned int i;
725 struct netfront_queue *queue;
726 netif_tx_stop_all_queues(np->netdev);
727 for (i = 0; i < num_queues; ++i) {
728 queue = &np->queues[i];
729 napi_disable(&queue->napi);
730 }
670 return 0; 731 return 0;
671} 732}
672 733
673static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, 734static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
674 grant_ref_t ref) 735 grant_ref_t ref)
675{ 736{
676 int new = xennet_rxidx(np->rx.req_prod_pvt); 737 int new = xennet_rxidx(queue->rx.req_prod_pvt);
677 738
678 BUG_ON(np->rx_skbs[new]); 739 BUG_ON(queue->rx_skbs[new]);
679 np->rx_skbs[new] = skb; 740 queue->rx_skbs[new] = skb;
680 np->grant_rx_ref[new] = ref; 741 queue->grant_rx_ref[new] = ref;
681 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 742 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
682 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 743 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
683 np->rx.req_prod_pvt++; 744 queue->rx.req_prod_pvt++;
684} 745}
685 746
686static int xennet_get_extras(struct netfront_info *np, 747static int xennet_get_extras(struct netfront_queue *queue,
687 struct xen_netif_extra_info *extras, 748 struct xen_netif_extra_info *extras,
688 RING_IDX rp) 749 RING_IDX rp)
689 750
690{ 751{
691 struct xen_netif_extra_info *extra; 752 struct xen_netif_extra_info *extra;
692 struct device *dev = &np->netdev->dev; 753 struct device *dev = &queue->info->netdev->dev;
693 RING_IDX cons = np->rx.rsp_cons; 754 RING_IDX cons = queue->rx.rsp_cons;
694 int err = 0; 755 int err = 0;
695 756
696 do { 757 do {
@@ -705,7 +766,7 @@ static int xennet_get_extras(struct netfront_info *np,
705 } 766 }
706 767
707 extra = (struct xen_netif_extra_info *) 768 extra = (struct xen_netif_extra_info *)
708 RING_GET_RESPONSE(&np->rx, ++cons); 769 RING_GET_RESPONSE(&queue->rx, ++cons);
709 770
710 if (unlikely(!extra->type || 771 if (unlikely(!extra->type ||
711 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 772 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
@@ -718,33 +779,33 @@ static int xennet_get_extras(struct netfront_info *np,
718 sizeof(*extra)); 779 sizeof(*extra));
719 } 780 }
720 781
721 skb = xennet_get_rx_skb(np, cons); 782 skb = xennet_get_rx_skb(queue, cons);
722 ref = xennet_get_rx_ref(np, cons); 783 ref = xennet_get_rx_ref(queue, cons);
723 xennet_move_rx_slot(np, skb, ref); 784 xennet_move_rx_slot(queue, skb, ref);
724 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 785 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
725 786
726 np->rx.rsp_cons = cons; 787 queue->rx.rsp_cons = cons;
727 return err; 788 return err;
728} 789}
729 790
730static int xennet_get_responses(struct netfront_info *np, 791static int xennet_get_responses(struct netfront_queue *queue,
731 struct netfront_rx_info *rinfo, RING_IDX rp, 792 struct netfront_rx_info *rinfo, RING_IDX rp,
732 struct sk_buff_head *list) 793 struct sk_buff_head *list)
733{ 794{
734 struct xen_netif_rx_response *rx = &rinfo->rx; 795 struct xen_netif_rx_response *rx = &rinfo->rx;
735 struct xen_netif_extra_info *extras = rinfo->extras; 796 struct xen_netif_extra_info *extras = rinfo->extras;
736 struct device *dev = &np->netdev->dev; 797 struct device *dev = &queue->info->netdev->dev;
737 RING_IDX cons = np->rx.rsp_cons; 798 RING_IDX cons = queue->rx.rsp_cons;
738 struct sk_buff *skb = xennet_get_rx_skb(np, cons); 799 struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
739 grant_ref_t ref = xennet_get_rx_ref(np, cons); 800 grant_ref_t ref = xennet_get_rx_ref(queue, cons);
740 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); 801 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
741 int slots = 1; 802 int slots = 1;
742 int err = 0; 803 int err = 0;
743 unsigned long ret; 804 unsigned long ret;
744 805
745 if (rx->flags & XEN_NETRXF_extra_info) { 806 if (rx->flags & XEN_NETRXF_extra_info) {
746 err = xennet_get_extras(np, extras, rp); 807 err = xennet_get_extras(queue, extras, rp);
747 cons = np->rx.rsp_cons; 808 cons = queue->rx.rsp_cons;
748 } 809 }
749 810
750 for (;;) { 811 for (;;) {
@@ -753,7 +814,7 @@ static int xennet_get_responses(struct netfront_info *np,
753 if (net_ratelimit()) 814 if (net_ratelimit())
754 dev_warn(dev, "rx->offset: %x, size: %u\n", 815 dev_warn(dev, "rx->offset: %x, size: %u\n",
755 rx->offset, rx->status); 816 rx->offset, rx->status);
756 xennet_move_rx_slot(np, skb, ref); 817 xennet_move_rx_slot(queue, skb, ref);
757 err = -EINVAL; 818 err = -EINVAL;
758 goto next; 819 goto next;
759 } 820 }
@@ -774,7 +835,7 @@ static int xennet_get_responses(struct netfront_info *np,
774 ret = gnttab_end_foreign_access_ref(ref, 0); 835 ret = gnttab_end_foreign_access_ref(ref, 0);
775 BUG_ON(!ret); 836 BUG_ON(!ret);
776 837
777 gnttab_release_grant_reference(&np->gref_rx_head, ref); 838 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
778 839
779 __skb_queue_tail(list, skb); 840 __skb_queue_tail(list, skb);
780 841
@@ -789,9 +850,9 @@ next:
789 break; 850 break;
790 } 851 }
791 852
792 rx = RING_GET_RESPONSE(&np->rx, cons + slots); 853 rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
793 skb = xennet_get_rx_skb(np, cons + slots); 854 skb = xennet_get_rx_skb(queue, cons + slots);
794 ref = xennet_get_rx_ref(np, cons + slots); 855 ref = xennet_get_rx_ref(queue, cons + slots);
795 slots++; 856 slots++;
796 } 857 }
797 858
@@ -802,7 +863,7 @@ next:
802 } 863 }
803 864
804 if (unlikely(err)) 865 if (unlikely(err))
805 np->rx.rsp_cons = cons + slots; 866 queue->rx.rsp_cons = cons + slots;
806 867
807 return err; 868 return err;
808} 869}
@@ -836,17 +897,17 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
836 return 0; 897 return 0;
837} 898}
838 899
839static RING_IDX xennet_fill_frags(struct netfront_info *np, 900static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
840 struct sk_buff *skb, 901 struct sk_buff *skb,
841 struct sk_buff_head *list) 902 struct sk_buff_head *list)
842{ 903{
843 struct skb_shared_info *shinfo = skb_shinfo(skb); 904 struct skb_shared_info *shinfo = skb_shinfo(skb);
844 RING_IDX cons = np->rx.rsp_cons; 905 RING_IDX cons = queue->rx.rsp_cons;
845 struct sk_buff *nskb; 906 struct sk_buff *nskb;
846 907
847 while ((nskb = __skb_dequeue(list))) { 908 while ((nskb = __skb_dequeue(list))) {
848 struct xen_netif_rx_response *rx = 909 struct xen_netif_rx_response *rx =
849 RING_GET_RESPONSE(&np->rx, ++cons); 910 RING_GET_RESPONSE(&queue->rx, ++cons);
850 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 911 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
851 912
852 if (shinfo->nr_frags == MAX_SKB_FRAGS) { 913 if (shinfo->nr_frags == MAX_SKB_FRAGS) {
@@ -879,7 +940,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
879 */ 940 */
880 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 941 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
881 struct netfront_info *np = netdev_priv(dev); 942 struct netfront_info *np = netdev_priv(dev);
882 np->rx_gso_checksum_fixup++; 943 atomic_inc(&np->rx_gso_checksum_fixup);
883 skb->ip_summed = CHECKSUM_PARTIAL; 944 skb->ip_summed = CHECKSUM_PARTIAL;
884 recalculate_partial_csum = true; 945 recalculate_partial_csum = true;
885 } 946 }
@@ -891,11 +952,10 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
891 return skb_checksum_setup(skb, recalculate_partial_csum); 952 return skb_checksum_setup(skb, recalculate_partial_csum);
892} 953}
893 954
894static int handle_incoming_queue(struct net_device *dev, 955static int handle_incoming_queue(struct netfront_queue *queue,
895 struct sk_buff_head *rxq) 956 struct sk_buff_head *rxq)
896{ 957{
897 struct netfront_info *np = netdev_priv(dev); 958 struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
898 struct netfront_stats *stats = this_cpu_ptr(np->stats);
899 int packets_dropped = 0; 959 int packets_dropped = 0;
900 struct sk_buff *skb; 960 struct sk_buff *skb;
901 961
@@ -906,13 +966,13 @@ static int handle_incoming_queue(struct net_device *dev,
906 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 966 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
907 967
908 /* Ethernet work: Delayed to here as it peeks the header. */ 968 /* Ethernet work: Delayed to here as it peeks the header. */
909 skb->protocol = eth_type_trans(skb, dev); 969 skb->protocol = eth_type_trans(skb, queue->info->netdev);
910 skb_reset_network_header(skb); 970 skb_reset_network_header(skb);
911 971
912 if (checksum_setup(dev, skb)) { 972 if (checksum_setup(queue->info->netdev, skb)) {
913 kfree_skb(skb); 973 kfree_skb(skb);
914 packets_dropped++; 974 packets_dropped++;
915 dev->stats.rx_errors++; 975 queue->info->netdev->stats.rx_errors++;
916 continue; 976 continue;
917 } 977 }
918 978
@@ -922,7 +982,7 @@ static int handle_incoming_queue(struct net_device *dev,
922 u64_stats_update_end(&stats->syncp); 982 u64_stats_update_end(&stats->syncp);
923 983
924 /* Pass it up. */ 984 /* Pass it up. */
925 napi_gro_receive(&np->napi, skb); 985 napi_gro_receive(&queue->napi, skb);
926 } 986 }
927 987
928 return packets_dropped; 988 return packets_dropped;
@@ -930,8 +990,8 @@ static int handle_incoming_queue(struct net_device *dev,
930 990
931static int xennet_poll(struct napi_struct *napi, int budget) 991static int xennet_poll(struct napi_struct *napi, int budget)
932{ 992{
933 struct netfront_info *np = container_of(napi, struct netfront_info, napi); 993 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
934 struct net_device *dev = np->netdev; 994 struct net_device *dev = queue->info->netdev;
935 struct sk_buff *skb; 995 struct sk_buff *skb;
936 struct netfront_rx_info rinfo; 996 struct netfront_rx_info rinfo;
937 struct xen_netif_rx_response *rx = &rinfo.rx; 997 struct xen_netif_rx_response *rx = &rinfo.rx;
@@ -944,29 +1004,29 @@ static int xennet_poll(struct napi_struct *napi, int budget)
944 unsigned long flags; 1004 unsigned long flags;
945 int err; 1005 int err;
946 1006
947 spin_lock(&np->rx_lock); 1007 spin_lock(&queue->rx_lock);
948 1008
949 skb_queue_head_init(&rxq); 1009 skb_queue_head_init(&rxq);
950 skb_queue_head_init(&errq); 1010 skb_queue_head_init(&errq);
951 skb_queue_head_init(&tmpq); 1011 skb_queue_head_init(&tmpq);
952 1012
953 rp = np->rx.sring->rsp_prod; 1013 rp = queue->rx.sring->rsp_prod;
954 rmb(); /* Ensure we see queued responses up to 'rp'. */ 1014 rmb(); /* Ensure we see queued responses up to 'rp'. */
955 1015
956 i = np->rx.rsp_cons; 1016 i = queue->rx.rsp_cons;
957 work_done = 0; 1017 work_done = 0;
958 while ((i != rp) && (work_done < budget)) { 1018 while ((i != rp) && (work_done < budget)) {
959 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 1019 memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
960 memset(extras, 0, sizeof(rinfo.extras)); 1020 memset(extras, 0, sizeof(rinfo.extras));
961 1021
962 err = xennet_get_responses(np, &rinfo, rp, &tmpq); 1022 err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
963 1023
964 if (unlikely(err)) { 1024 if (unlikely(err)) {
965err: 1025err:
966 while ((skb = __skb_dequeue(&tmpq))) 1026 while ((skb = __skb_dequeue(&tmpq)))
967 __skb_queue_tail(&errq, skb); 1027 __skb_queue_tail(&errq, skb);
968 dev->stats.rx_errors++; 1028 dev->stats.rx_errors++;
969 i = np->rx.rsp_cons; 1029 i = queue->rx.rsp_cons;
970 continue; 1030 continue;
971 } 1031 }
972 1032
@@ -978,7 +1038,7 @@ err:
978 1038
979 if (unlikely(xennet_set_skb_gso(skb, gso))) { 1039 if (unlikely(xennet_set_skb_gso(skb, gso))) {
980 __skb_queue_head(&tmpq, skb); 1040 __skb_queue_head(&tmpq, skb);
981 np->rx.rsp_cons += skb_queue_len(&tmpq); 1041 queue->rx.rsp_cons += skb_queue_len(&tmpq);
982 goto err; 1042 goto err;
983 } 1043 }
984 } 1044 }
@@ -992,7 +1052,7 @@ err:
992 skb->data_len = rx->status; 1052 skb->data_len = rx->status;
993 skb->len += rx->status; 1053 skb->len += rx->status;
994 1054
995 i = xennet_fill_frags(np, skb, &tmpq); 1055 i = xennet_fill_frags(queue, skb, &tmpq);
996 1056
997 if (rx->flags & XEN_NETRXF_csum_blank) 1057 if (rx->flags & XEN_NETRXF_csum_blank)
998 skb->ip_summed = CHECKSUM_PARTIAL; 1058 skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1001,22 +1061,22 @@ err:
1001 1061
1002 __skb_queue_tail(&rxq, skb); 1062 __skb_queue_tail(&rxq, skb);
1003 1063
1004 np->rx.rsp_cons = ++i; 1064 queue->rx.rsp_cons = ++i;
1005 work_done++; 1065 work_done++;
1006 } 1066 }
1007 1067
1008 __skb_queue_purge(&errq); 1068 __skb_queue_purge(&errq);
1009 1069
1010 work_done -= handle_incoming_queue(dev, &rxq); 1070 work_done -= handle_incoming_queue(queue, &rxq);
1011 1071
1012 /* If we get a callback with very few responses, reduce fill target. */ 1072 /* If we get a callback with very few responses, reduce fill target. */
1013 /* NB. Note exponential increase, linear decrease. */ 1073 /* NB. Note exponential increase, linear decrease. */
1014 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 1074 if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) >
1015 ((3*np->rx_target) / 4)) && 1075 ((3*queue->rx_target) / 4)) &&
1016 (--np->rx_target < np->rx_min_target)) 1076 (--queue->rx_target < queue->rx_min_target))
1017 np->rx_target = np->rx_min_target; 1077 queue->rx_target = queue->rx_min_target;
1018 1078
1019 xennet_alloc_rx_buffers(dev); 1079 xennet_alloc_rx_buffers(queue);
1020 1080
1021 if (work_done < budget) { 1081 if (work_done < budget) {
1022 int more_to_do = 0; 1082 int more_to_do = 0;
@@ -1025,14 +1085,14 @@ err:
1025 1085
1026 local_irq_save(flags); 1086 local_irq_save(flags);
1027 1087
1028 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); 1088 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1029 if (!more_to_do) 1089 if (!more_to_do)
1030 __napi_complete(napi); 1090 __napi_complete(napi);
1031 1091
1032 local_irq_restore(flags); 1092 local_irq_restore(flags);
1033 } 1093 }
1034 1094
1035 spin_unlock(&np->rx_lock); 1095 spin_unlock(&queue->rx_lock);
1036 1096
1037 return work_done; 1097 return work_done;
1038} 1098}
@@ -1080,43 +1140,43 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1080 return tot; 1140 return tot;
1081} 1141}
1082 1142
1083static void xennet_release_tx_bufs(struct netfront_info *np) 1143static void xennet_release_tx_bufs(struct netfront_queue *queue)
1084{ 1144{
1085 struct sk_buff *skb; 1145 struct sk_buff *skb;
1086 int i; 1146 int i;
1087 1147
1088 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1148 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1089 /* Skip over entries which are actually freelist references */ 1149 /* Skip over entries which are actually freelist references */
1090 if (skb_entry_is_link(&np->tx_skbs[i])) 1150 if (skb_entry_is_link(&queue->tx_skbs[i]))
1091 continue; 1151 continue;
1092 1152
1093 skb = np->tx_skbs[i].skb; 1153 skb = queue->tx_skbs[i].skb;
1094 get_page(np->grant_tx_page[i]); 1154 get_page(queue->grant_tx_page[i]);
1095 gnttab_end_foreign_access(np->grant_tx_ref[i], 1155 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1096 GNTMAP_readonly, 1156 GNTMAP_readonly,
1097 (unsigned long)page_address(np->grant_tx_page[i])); 1157 (unsigned long)page_address(queue->grant_tx_page[i]));
1098 np->grant_tx_page[i] = NULL; 1158 queue->grant_tx_page[i] = NULL;
1099 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1159 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1100 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); 1160 add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1101 dev_kfree_skb_irq(skb); 1161 dev_kfree_skb_irq(skb);
1102 } 1162 }
1103} 1163}
1104 1164
1105static void xennet_release_rx_bufs(struct netfront_info *np) 1165static void xennet_release_rx_bufs(struct netfront_queue *queue)
1106{ 1166{
1107 int id, ref; 1167 int id, ref;
1108 1168
1109 spin_lock_bh(&np->rx_lock); 1169 spin_lock_bh(&queue->rx_lock);
1110 1170
1111 for (id = 0; id < NET_RX_RING_SIZE; id++) { 1171 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1112 struct sk_buff *skb; 1172 struct sk_buff *skb;
1113 struct page *page; 1173 struct page *page;
1114 1174
1115 skb = np->rx_skbs[id]; 1175 skb = queue->rx_skbs[id];
1116 if (!skb) 1176 if (!skb)
1117 continue; 1177 continue;
1118 1178
1119 ref = np->grant_rx_ref[id]; 1179 ref = queue->grant_rx_ref[id];
1120 if (ref == GRANT_INVALID_REF) 1180 if (ref == GRANT_INVALID_REF)
1121 continue; 1181 continue;
1122 1182
@@ -1128,21 +1188,28 @@ static void xennet_release_rx_bufs(struct netfront_info *np)
1128 get_page(page); 1188 get_page(page);
1129 gnttab_end_foreign_access(ref, 0, 1189 gnttab_end_foreign_access(ref, 0,
1130 (unsigned long)page_address(page)); 1190 (unsigned long)page_address(page));
1131 np->grant_rx_ref[id] = GRANT_INVALID_REF; 1191 queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1132 1192
1133 kfree_skb(skb); 1193 kfree_skb(skb);
1134 } 1194 }
1135 1195
1136 spin_unlock_bh(&np->rx_lock); 1196 spin_unlock_bh(&queue->rx_lock);
1137} 1197}
1138 1198
1139static void xennet_uninit(struct net_device *dev) 1199static void xennet_uninit(struct net_device *dev)
1140{ 1200{
1141 struct netfront_info *np = netdev_priv(dev); 1201 struct netfront_info *np = netdev_priv(dev);
1142 xennet_release_tx_bufs(np); 1202 unsigned int num_queues = dev->real_num_tx_queues;
1143 xennet_release_rx_bufs(np); 1203 struct netfront_queue *queue;
1144 gnttab_free_grant_references(np->gref_tx_head); 1204 unsigned int i;
1145 gnttab_free_grant_references(np->gref_rx_head); 1205
1206 for (i = 0; i < num_queues; ++i) {
1207 queue = &np->queues[i];
1208 xennet_release_tx_bufs(queue);
1209 xennet_release_rx_bufs(queue);
1210 gnttab_free_grant_references(queue->gref_tx_head);
1211 gnttab_free_grant_references(queue->gref_rx_head);
1212 }
1146} 1213}
1147 1214
1148static netdev_features_t xennet_fix_features(struct net_device *dev, 1215static netdev_features_t xennet_fix_features(struct net_device *dev,
@@ -1203,25 +1270,24 @@ static int xennet_set_features(struct net_device *dev,
1203 1270
1204static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) 1271static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1205{ 1272{
1206 struct netfront_info *np = dev_id; 1273 struct netfront_queue *queue = dev_id;
1207 struct net_device *dev = np->netdev;
1208 unsigned long flags; 1274 unsigned long flags;
1209 1275
1210 spin_lock_irqsave(&np->tx_lock, flags); 1276 spin_lock_irqsave(&queue->tx_lock, flags);
1211 xennet_tx_buf_gc(dev); 1277 xennet_tx_buf_gc(queue);
1212 spin_unlock_irqrestore(&np->tx_lock, flags); 1278 spin_unlock_irqrestore(&queue->tx_lock, flags);
1213 1279
1214 return IRQ_HANDLED; 1280 return IRQ_HANDLED;
1215} 1281}
1216 1282
1217static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) 1283static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1218{ 1284{
1219 struct netfront_info *np = dev_id; 1285 struct netfront_queue *queue = dev_id;
1220 struct net_device *dev = np->netdev; 1286 struct net_device *dev = queue->info->netdev;
1221 1287
1222 if (likely(netif_carrier_ok(dev) && 1288 if (likely(netif_carrier_ok(dev) &&
1223 RING_HAS_UNCONSUMED_RESPONSES(&np->rx))) 1289 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1224 napi_schedule(&np->napi); 1290 napi_schedule(&queue->napi);
1225 1291
1226 return IRQ_HANDLED; 1292 return IRQ_HANDLED;
1227} 1293}
@@ -1236,7 +1302,12 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1236#ifdef CONFIG_NET_POLL_CONTROLLER 1302#ifdef CONFIG_NET_POLL_CONTROLLER
1237static void xennet_poll_controller(struct net_device *dev) 1303static void xennet_poll_controller(struct net_device *dev)
1238{ 1304{
1239 xennet_interrupt(0, dev); 1305 /* Poll each queue */
1306 struct netfront_info *info = netdev_priv(dev);
1307 unsigned int num_queues = dev->real_num_tx_queues;
1308 unsigned int i;
1309 for (i = 0; i < num_queues; ++i)
1310 xennet_interrupt(0, &info->queues[i]);
1240} 1311}
1241#endif 1312#endif
1242 1313
@@ -1251,6 +1322,7 @@ static const struct net_device_ops xennet_netdev_ops = {
1251 .ndo_validate_addr = eth_validate_addr, 1322 .ndo_validate_addr = eth_validate_addr,
1252 .ndo_fix_features = xennet_fix_features, 1323 .ndo_fix_features = xennet_fix_features,
1253 .ndo_set_features = xennet_set_features, 1324 .ndo_set_features = xennet_set_features,
1325 .ndo_select_queue = xennet_select_queue,
1254#ifdef CONFIG_NET_POLL_CONTROLLER 1326#ifdef CONFIG_NET_POLL_CONTROLLER
1255 .ndo_poll_controller = xennet_poll_controller, 1327 .ndo_poll_controller = xennet_poll_controller,
1256#endif 1328#endif
@@ -1258,66 +1330,30 @@ static const struct net_device_ops xennet_netdev_ops = {
1258 1330
1259static struct net_device *xennet_create_dev(struct xenbus_device *dev) 1331static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1260{ 1332{
1261 int i, err; 1333 int err;
1262 struct net_device *netdev; 1334 struct net_device *netdev;
1263 struct netfront_info *np; 1335 struct netfront_info *np;
1264 1336
1265 netdev = alloc_etherdev(sizeof(struct netfront_info)); 1337 netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1266 if (!netdev) 1338 if (!netdev)
1267 return ERR_PTR(-ENOMEM); 1339 return ERR_PTR(-ENOMEM);
1268 1340
1269 np = netdev_priv(netdev); 1341 np = netdev_priv(netdev);
1270 np->xbdev = dev; 1342 np->xbdev = dev;
1271 1343
1272 spin_lock_init(&np->tx_lock); 1344 /* No need to use rtnl_lock() before the call below as it
1273 spin_lock_init(&np->rx_lock); 1345 * happens before register_netdev().
1274 1346 */
1275 skb_queue_head_init(&np->rx_batch); 1347 netif_set_real_num_tx_queues(netdev, 0);
1276 np->rx_target = RX_DFL_MIN_TARGET; 1348 np->queues = NULL;
1277 np->rx_min_target = RX_DFL_MIN_TARGET;
1278 np->rx_max_target = RX_MAX_TARGET;
1279
1280 init_timer(&np->rx_refill_timer);
1281 np->rx_refill_timer.data = (unsigned long)netdev;
1282 np->rx_refill_timer.function = rx_refill_timeout;
1283 1349
1284 err = -ENOMEM; 1350 err = -ENOMEM;
1285 np->stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1351 np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1286 if (np->stats == NULL) 1352 if (np->stats == NULL)
1287 goto exit; 1353 goto exit;
1288 1354
1289 /* Initialise tx_skbs as a free chain containing every entry. */
1290 np->tx_skb_freelist = 0;
1291 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1292 skb_entry_set_link(&np->tx_skbs[i], i+1);
1293 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1294 np->grant_tx_page[i] = NULL;
1295 }
1296
1297 /* Clear out rx_skbs */
1298 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1299 np->rx_skbs[i] = NULL;
1300 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1301 }
1302
1303 /* A grant for every tx ring slot */
1304 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1305 &np->gref_tx_head) < 0) {
1306 pr_alert("can't alloc tx grant refs\n");
1307 err = -ENOMEM;
1308 goto exit_free_stats;
1309 }
1310 /* A grant for every rx ring slot */
1311 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1312 &np->gref_rx_head) < 0) {
1313 pr_alert("can't alloc rx grant refs\n");
1314 err = -ENOMEM;
1315 goto exit_free_tx;
1316 }
1317
1318 netdev->netdev_ops = &xennet_netdev_ops; 1355 netdev->netdev_ops = &xennet_netdev_ops;
1319 1356
1320 netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1321 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 1357 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1322 NETIF_F_GSO_ROBUST; 1358 NETIF_F_GSO_ROBUST;
1323 netdev->hw_features = NETIF_F_SG | 1359 netdev->hw_features = NETIF_F_SG |
@@ -1332,7 +1368,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1332 */ 1368 */
1333 netdev->features |= netdev->hw_features; 1369 netdev->features |= netdev->hw_features;
1334 1370
1335 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); 1371 netdev->ethtool_ops = &xennet_ethtool_ops;
1336 SET_NETDEV_DEV(netdev, &dev->dev); 1372 SET_NETDEV_DEV(netdev, &dev->dev);
1337 1373
1338 netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); 1374 netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
@@ -1343,10 +1379,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1343 1379
1344 return netdev; 1380 return netdev;
1345 1381
1346 exit_free_tx:
1347 gnttab_free_grant_references(np->gref_tx_head);
1348 exit_free_stats:
1349 free_percpu(np->stats);
1350 exit: 1382 exit:
1351 free_netdev(netdev); 1383 free_netdev(netdev);
1352 return ERR_PTR(err); 1384 return ERR_PTR(err);
@@ -1404,30 +1436,36 @@ static void xennet_end_access(int ref, void *page)
1404 1436
1405static void xennet_disconnect_backend(struct netfront_info *info) 1437static void xennet_disconnect_backend(struct netfront_info *info)
1406{ 1438{
1407 /* Stop old i/f to prevent errors whilst we rebuild the state. */ 1439 unsigned int i = 0;
1408 spin_lock_bh(&info->rx_lock); 1440 struct netfront_queue *queue = NULL;
1409 spin_lock_irq(&info->tx_lock); 1441 unsigned int num_queues = info->netdev->real_num_tx_queues;
1410 netif_carrier_off(info->netdev); 1442
1411 spin_unlock_irq(&info->tx_lock); 1443 for (i = 0; i < num_queues; ++i) {
1412 spin_unlock_bh(&info->rx_lock); 1444 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1413 1445 spin_lock_bh(&queue->rx_lock);
1414 if (info->tx_irq && (info->tx_irq == info->rx_irq)) 1446 spin_lock_irq(&queue->tx_lock);
1415 unbind_from_irqhandler(info->tx_irq, info); 1447 netif_carrier_off(queue->info->netdev);
1416 if (info->tx_irq && (info->tx_irq != info->rx_irq)) { 1448 spin_unlock_irq(&queue->tx_lock);
1417 unbind_from_irqhandler(info->tx_irq, info); 1449 spin_unlock_bh(&queue->rx_lock);
1418 unbind_from_irqhandler(info->rx_irq, info); 1450
1419 } 1451 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1420 info->tx_evtchn = info->rx_evtchn = 0; 1452 unbind_from_irqhandler(queue->tx_irq, queue);
1421 info->tx_irq = info->rx_irq = 0; 1453 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1454 unbind_from_irqhandler(queue->tx_irq, queue);
1455 unbind_from_irqhandler(queue->rx_irq, queue);
1456 }
1457 queue->tx_evtchn = queue->rx_evtchn = 0;
1458 queue->tx_irq = queue->rx_irq = 0;
1422 1459
1423 /* End access and free the pages */ 1460 /* End access and free the pages */
1424 xennet_end_access(info->tx_ring_ref, info->tx.sring); 1461 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1425 xennet_end_access(info->rx_ring_ref, info->rx.sring); 1462 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1426 1463
1427 info->tx_ring_ref = GRANT_INVALID_REF; 1464 queue->tx_ring_ref = GRANT_INVALID_REF;
1428 info->rx_ring_ref = GRANT_INVALID_REF; 1465 queue->rx_ring_ref = GRANT_INVALID_REF;
1429 info->tx.sring = NULL; 1466 queue->tx.sring = NULL;
1430 info->rx.sring = NULL; 1467 queue->rx.sring = NULL;
1468 }
1431} 1469}
1432 1470
1433/** 1471/**
@@ -1468,100 +1506,86 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1468 return 0; 1506 return 0;
1469} 1507}
1470 1508
1471static int setup_netfront_single(struct netfront_info *info) 1509static int setup_netfront_single(struct netfront_queue *queue)
1472{ 1510{
1473 int err; 1511 int err;
1474 1512
1475 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn); 1513 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1476 if (err < 0) 1514 if (err < 0)
1477 goto fail; 1515 goto fail;
1478 1516
1479 err = bind_evtchn_to_irqhandler(info->tx_evtchn, 1517 err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1480 xennet_interrupt, 1518 xennet_interrupt,
1481 0, info->netdev->name, info); 1519 0, queue->info->netdev->name, queue);
1482 if (err < 0) 1520 if (err < 0)
1483 goto bind_fail; 1521 goto bind_fail;
1484 info->rx_evtchn = info->tx_evtchn; 1522 queue->rx_evtchn = queue->tx_evtchn;
1485 info->rx_irq = info->tx_irq = err; 1523 queue->rx_irq = queue->tx_irq = err;
1486 1524
1487 return 0; 1525 return 0;
1488 1526
1489bind_fail: 1527bind_fail:
1490 xenbus_free_evtchn(info->xbdev, info->tx_evtchn); 1528 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1491 info->tx_evtchn = 0; 1529 queue->tx_evtchn = 0;
1492fail: 1530fail:
1493 return err; 1531 return err;
1494} 1532}
1495 1533
1496static int setup_netfront_split(struct netfront_info *info) 1534static int setup_netfront_split(struct netfront_queue *queue)
1497{ 1535{
1498 int err; 1536 int err;
1499 1537
1500 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn); 1538 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1501 if (err < 0) 1539 if (err < 0)
1502 goto fail; 1540 goto fail;
1503 err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn); 1541 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1504 if (err < 0) 1542 if (err < 0)
1505 goto alloc_rx_evtchn_fail; 1543 goto alloc_rx_evtchn_fail;
1506 1544
1507 snprintf(info->tx_irq_name, sizeof(info->tx_irq_name), 1545 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1508 "%s-tx", info->netdev->name); 1546 "%s-tx", queue->name);
1509 err = bind_evtchn_to_irqhandler(info->tx_evtchn, 1547 err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1510 xennet_tx_interrupt, 1548 xennet_tx_interrupt,
1511 0, info->tx_irq_name, info); 1549 0, queue->tx_irq_name, queue);
1512 if (err < 0) 1550 if (err < 0)
1513 goto bind_tx_fail; 1551 goto bind_tx_fail;
1514 info->tx_irq = err; 1552 queue->tx_irq = err;
1515 1553
1516 snprintf(info->rx_irq_name, sizeof(info->rx_irq_name), 1554 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1517 "%s-rx", info->netdev->name); 1555 "%s-rx", queue->name);
1518 err = bind_evtchn_to_irqhandler(info->rx_evtchn, 1556 err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1519 xennet_rx_interrupt, 1557 xennet_rx_interrupt,
1520 0, info->rx_irq_name, info); 1558 0, queue->rx_irq_name, queue);
1521 if (err < 0) 1559 if (err < 0)
1522 goto bind_rx_fail; 1560 goto bind_rx_fail;
1523 info->rx_irq = err; 1561 queue->rx_irq = err;
1524 1562
1525 return 0; 1563 return 0;
1526 1564
1527bind_rx_fail: 1565bind_rx_fail:
1528 unbind_from_irqhandler(info->tx_irq, info); 1566 unbind_from_irqhandler(queue->tx_irq, queue);
1529 info->tx_irq = 0; 1567 queue->tx_irq = 0;
1530bind_tx_fail: 1568bind_tx_fail:
1531 xenbus_free_evtchn(info->xbdev, info->rx_evtchn); 1569 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1532 info->rx_evtchn = 0; 1570 queue->rx_evtchn = 0;
1533alloc_rx_evtchn_fail: 1571alloc_rx_evtchn_fail:
1534 xenbus_free_evtchn(info->xbdev, info->tx_evtchn); 1572 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1535 info->tx_evtchn = 0; 1573 queue->tx_evtchn = 0;
1536fail: 1574fail:
1537 return err; 1575 return err;
1538} 1576}
1539 1577
1540static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) 1578static int setup_netfront(struct xenbus_device *dev,
1579 struct netfront_queue *queue, unsigned int feature_split_evtchn)
1541{ 1580{
1542 struct xen_netif_tx_sring *txs; 1581 struct xen_netif_tx_sring *txs;
1543 struct xen_netif_rx_sring *rxs; 1582 struct xen_netif_rx_sring *rxs;
1544 int err; 1583 int err;
1545 struct net_device *netdev = info->netdev;
1546 unsigned int feature_split_evtchn;
1547 1584
1548 info->tx_ring_ref = GRANT_INVALID_REF; 1585 queue->tx_ring_ref = GRANT_INVALID_REF;
1549 info->rx_ring_ref = GRANT_INVALID_REF; 1586 queue->rx_ring_ref = GRANT_INVALID_REF;
1550 info->rx.sring = NULL; 1587 queue->rx.sring = NULL;
1551 info->tx.sring = NULL; 1588 queue->tx.sring = NULL;
1552 netdev->irq = 0;
1553
1554 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1555 "feature-split-event-channels", "%u",
1556 &feature_split_evtchn);
1557 if (err < 0)
1558 feature_split_evtchn = 0;
1559
1560 err = xen_net_read_mac(dev, netdev->dev_addr);
1561 if (err) {
1562 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1563 goto fail;
1564 }
1565 1589
1566 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1590 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1567 if (!txs) { 1591 if (!txs) {
@@ -1570,13 +1594,13 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1570 goto fail; 1594 goto fail;
1571 } 1595 }
1572 SHARED_RING_INIT(txs); 1596 SHARED_RING_INIT(txs);
1573 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 1597 FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE);
1574 1598
1575 err = xenbus_grant_ring(dev, virt_to_mfn(txs)); 1599 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1576 if (err < 0) 1600 if (err < 0)
1577 goto grant_tx_ring_fail; 1601 goto grant_tx_ring_fail;
1602 queue->tx_ring_ref = err;
1578 1603
1579 info->tx_ring_ref = err;
1580 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1604 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1581 if (!rxs) { 1605 if (!rxs) {
1582 err = -ENOMEM; 1606 err = -ENOMEM;
@@ -1584,21 +1608,21 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1584 goto alloc_rx_ring_fail; 1608 goto alloc_rx_ring_fail;
1585 } 1609 }
1586 SHARED_RING_INIT(rxs); 1610 SHARED_RING_INIT(rxs);
1587 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 1611 FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
1588 1612
1589 err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); 1613 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1590 if (err < 0) 1614 if (err < 0)
1591 goto grant_rx_ring_fail; 1615 goto grant_rx_ring_fail;
1592 info->rx_ring_ref = err; 1616 queue->rx_ring_ref = err;
1593 1617
1594 if (feature_split_evtchn) 1618 if (feature_split_evtchn)
1595 err = setup_netfront_split(info); 1619 err = setup_netfront_split(queue);
1596 /* setup single event channel if 1620 /* setup single event channel if
1597 * a) feature-split-event-channels == 0 1621 * a) feature-split-event-channels == 0
1598 * b) feature-split-event-channels == 1 but failed to setup 1622 * b) feature-split-event-channels == 1 but failed to setup
1599 */ 1623 */
1600 if (!feature_split_evtchn || (feature_split_evtchn && err)) 1624 if (!feature_split_evtchn || (feature_split_evtchn && err))
1601 err = setup_netfront_single(info); 1625 err = setup_netfront_single(queue);
1602 1626
1603 if (err) 1627 if (err)
1604 goto alloc_evtchn_fail; 1628 goto alloc_evtchn_fail;
@@ -1609,17 +1633,163 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1609 * granted pages because backend is not accessing it at this point. 1633 * granted pages because backend is not accessing it at this point.
1610 */ 1634 */
1611alloc_evtchn_fail: 1635alloc_evtchn_fail:
1612 gnttab_end_foreign_access_ref(info->rx_ring_ref, 0); 1636 gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1613grant_rx_ring_fail: 1637grant_rx_ring_fail:
1614 free_page((unsigned long)rxs); 1638 free_page((unsigned long)rxs);
1615alloc_rx_ring_fail: 1639alloc_rx_ring_fail:
1616 gnttab_end_foreign_access_ref(info->tx_ring_ref, 0); 1640 gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1617grant_tx_ring_fail: 1641grant_tx_ring_fail:
1618 free_page((unsigned long)txs); 1642 free_page((unsigned long)txs);
1619fail: 1643fail:
1620 return err; 1644 return err;
1621} 1645}
1622 1646
1647/* Queue-specific initialisation
1648 * This used to be done in xennet_create_dev() but must now
1649 * be run per-queue.
1650 */
1651static int xennet_init_queue(struct netfront_queue *queue)
1652{
1653 unsigned short i;
1654 int err = 0;
1655
1656 spin_lock_init(&queue->tx_lock);
1657 spin_lock_init(&queue->rx_lock);
1658
1659 skb_queue_head_init(&queue->rx_batch);
1660 queue->rx_target = RX_DFL_MIN_TARGET;
1661 queue->rx_min_target = RX_DFL_MIN_TARGET;
1662 queue->rx_max_target = RX_MAX_TARGET;
1663
1664 init_timer(&queue->rx_refill_timer);
1665 queue->rx_refill_timer.data = (unsigned long)queue;
1666 queue->rx_refill_timer.function = rx_refill_timeout;
1667
1668 snprintf(queue->name, sizeof(queue->name), "%s-q%u",
1669 queue->info->netdev->name, queue->id);
1670
1671 /* Initialise tx_skbs as a free chain containing every entry. */
1672 queue->tx_skb_freelist = 0;
1673 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1674 skb_entry_set_link(&queue->tx_skbs[i], i+1);
1675 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1676 queue->grant_tx_page[i] = NULL;
1677 }
1678
1679 /* Clear out rx_skbs */
1680 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1681 queue->rx_skbs[i] = NULL;
1682 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1683 }
1684
1685 /* A grant for every tx ring slot */
1686 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1687 &queue->gref_tx_head) < 0) {
1688 pr_alert("can't alloc tx grant refs\n");
1689 err = -ENOMEM;
1690 goto exit;
1691 }
1692
1693 /* A grant for every rx ring slot */
1694 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1695 &queue->gref_rx_head) < 0) {
1696 pr_alert("can't alloc rx grant refs\n");
1697 err = -ENOMEM;
1698 goto exit_free_tx;
1699 }
1700
1701 netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64);
1702
1703 return 0;
1704
1705 exit_free_tx:
1706 gnttab_free_grant_references(queue->gref_tx_head);
1707 exit:
1708 return err;
1709}
1710
1711static int write_queue_xenstore_keys(struct netfront_queue *queue,
1712 struct xenbus_transaction *xbt, int write_hierarchical)
1713{
1714 /* Write the queue-specific keys into XenStore in the traditional
1715 * way for a single queue, or in a queue subkeys for multiple
1716 * queues.
1717 */
1718 struct xenbus_device *dev = queue->info->xbdev;
1719 int err;
1720 const char *message;
1721 char *path;
1722 size_t pathsize;
1723
1724 /* Choose the correct place to write the keys */
1725 if (write_hierarchical) {
1726 pathsize = strlen(dev->nodename) + 10;
1727 path = kzalloc(pathsize, GFP_KERNEL);
1728 if (!path) {
1729 err = -ENOMEM;
1730 message = "out of memory while writing ring references";
1731 goto error;
1732 }
1733 snprintf(path, pathsize, "%s/queue-%u",
1734 dev->nodename, queue->id);
1735 } else {
1736 path = (char *)dev->nodename;
1737 }
1738
1739 /* Write ring references */
1740 err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1741 queue->tx_ring_ref);
1742 if (err) {
1743 message = "writing tx-ring-ref";
1744 goto error;
1745 }
1746
1747 err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1748 queue->rx_ring_ref);
1749 if (err) {
1750 message = "writing rx-ring-ref";
1751 goto error;
1752 }
1753
1754 /* Write event channels; taking into account both shared
1755 * and split event channel scenarios.
1756 */
1757 if (queue->tx_evtchn == queue->rx_evtchn) {
1758 /* Shared event channel */
1759 err = xenbus_printf(*xbt, path,
1760 "event-channel", "%u", queue->tx_evtchn);
1761 if (err) {
1762 message = "writing event-channel";
1763 goto error;
1764 }
1765 } else {
1766 /* Split event channels */
1767 err = xenbus_printf(*xbt, path,
1768 "event-channel-tx", "%u", queue->tx_evtchn);
1769 if (err) {
1770 message = "writing event-channel-tx";
1771 goto error;
1772 }
1773
1774 err = xenbus_printf(*xbt, path,
1775 "event-channel-rx", "%u", queue->rx_evtchn);
1776 if (err) {
1777 message = "writing event-channel-rx";
1778 goto error;
1779 }
1780 }
1781
1782 if (write_hierarchical)
1783 kfree(path);
1784 return 0;
1785
1786error:
1787 if (write_hierarchical)
1788 kfree(path);
1789 xenbus_dev_fatal(dev, err, "%s", message);
1790 return err;
1791}
1792
1623/* Common code used when first setting up, and when resuming. */ 1793/* Common code used when first setting up, and when resuming. */
1624static int talk_to_netback(struct xenbus_device *dev, 1794static int talk_to_netback(struct xenbus_device *dev,
1625 struct netfront_info *info) 1795 struct netfront_info *info)
@@ -1627,11 +1797,83 @@ static int talk_to_netback(struct xenbus_device *dev,
1627 const char *message; 1797 const char *message;
1628 struct xenbus_transaction xbt; 1798 struct xenbus_transaction xbt;
1629 int err; 1799 int err;
1800 unsigned int feature_split_evtchn;
1801 unsigned int i = 0;
1802 unsigned int max_queues = 0;
1803 struct netfront_queue *queue = NULL;
1804 unsigned int num_queues = 1;
1630 1805
1631 /* Create shared ring, alloc event channel. */ 1806 info->netdev->irq = 0;
1632 err = setup_netfront(dev, info); 1807
1633 if (err) 1808 /* Check if backend supports multiple queues */
1809 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1810 "multi-queue-max-queues", "%u", &max_queues);
1811 if (err < 0)
1812 max_queues = 1;
1813 num_queues = min(max_queues, xennet_max_queues);
1814
1815 /* Check feature-split-event-channels */
1816 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1817 "feature-split-event-channels", "%u",
1818 &feature_split_evtchn);
1819 if (err < 0)
1820 feature_split_evtchn = 0;
1821
1822 /* Read mac addr. */
1823 err = xen_net_read_mac(dev, info->netdev->dev_addr);
1824 if (err) {
1825 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1826 goto out;
1827 }
1828
1829 /* Allocate array of queues */
1830 info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL);
1831 if (!info->queues) {
1832 err = -ENOMEM;
1634 goto out; 1833 goto out;
1834 }
1835 rtnl_lock();
1836 netif_set_real_num_tx_queues(info->netdev, num_queues);
1837 rtnl_unlock();
1838
1839 /* Create shared ring, alloc event channel -- for each queue */
1840 for (i = 0; i < num_queues; ++i) {
1841 queue = &info->queues[i];
1842 queue->id = i;
1843 queue->info = info;
1844 err = xennet_init_queue(queue);
1845 if (err) {
1846 /* xennet_init_queue() cleans up after itself on failure,
1847 * but we still have to clean up any previously initialised
1848 * queues. If i > 0, set num_queues to i, then goto
1849 * destroy_ring, which calls xennet_disconnect_backend()
1850 * to tidy up.
1851 */
1852 if (i > 0) {
1853 rtnl_lock();
1854 netif_set_real_num_tx_queues(info->netdev, i);
1855 rtnl_unlock();
1856 goto destroy_ring;
1857 } else {
1858 goto out;
1859 }
1860 }
1861 err = setup_netfront(dev, queue, feature_split_evtchn);
1862 if (err) {
1863 /* As for xennet_init_queue(), setup_netfront() will tidy
1864 * up the current queue on error, but we need to clean up
1865 * those already allocated.
1866 */
1867 if (i > 0) {
1868 rtnl_lock();
1869 netif_set_real_num_tx_queues(info->netdev, i);
1870 rtnl_unlock();
1871 goto destroy_ring;
1872 } else {
1873 goto out;
1874 }
1875 }
1876 }
1635 1877
1636again: 1878again:
1637 err = xenbus_transaction_start(&xbt); 1879 err = xenbus_transaction_start(&xbt);
@@ -1640,41 +1882,29 @@ again:
1640 goto destroy_ring; 1882 goto destroy_ring;
1641 } 1883 }
1642 1884
1643 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", 1885 if (num_queues == 1) {
1644 info->tx_ring_ref); 1886 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
1645 if (err) { 1887 if (err)
1646 message = "writing tx ring-ref"; 1888 goto abort_transaction_no_dev_fatal;
1647 goto abort_transaction;
1648 }
1649 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1650 info->rx_ring_ref);
1651 if (err) {
1652 message = "writing rx ring-ref";
1653 goto abort_transaction;
1654 }
1655
1656 if (info->tx_evtchn == info->rx_evtchn) {
1657 err = xenbus_printf(xbt, dev->nodename,
1658 "event-channel", "%u", info->tx_evtchn);
1659 if (err) {
1660 message = "writing event-channel";
1661 goto abort_transaction;
1662 }
1663 } else { 1889 } else {
1664 err = xenbus_printf(xbt, dev->nodename, 1890 /* Write the number of queues */
1665 "event-channel-tx", "%u", info->tx_evtchn); 1891 err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues",
1892 "%u", num_queues);
1666 if (err) { 1893 if (err) {
1667 message = "writing event-channel-tx"; 1894 message = "writing multi-queue-num-queues";
1668 goto abort_transaction; 1895 goto abort_transaction_no_dev_fatal;
1669 } 1896 }
1670 err = xenbus_printf(xbt, dev->nodename, 1897
1671 "event-channel-rx", "%u", info->rx_evtchn); 1898 /* Write the keys for each queue */
1672 if (err) { 1899 for (i = 0; i < num_queues; ++i) {
1673 message = "writing event-channel-rx"; 1900 queue = &info->queues[i];
1674 goto abort_transaction; 1901 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
1902 if (err)
1903 goto abort_transaction_no_dev_fatal;
1675 } 1904 }
1676 } 1905 }
1677 1906
1907 /* The remaining keys are not queue-specific */
1678 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1908 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1679 1); 1909 1);
1680 if (err) { 1910 if (err) {
@@ -1724,10 +1954,16 @@ again:
1724 return 0; 1954 return 0;
1725 1955
1726 abort_transaction: 1956 abort_transaction:
1727 xenbus_transaction_end(xbt, 1);
1728 xenbus_dev_fatal(dev, err, "%s", message); 1957 xenbus_dev_fatal(dev, err, "%s", message);
1958abort_transaction_no_dev_fatal:
1959 xenbus_transaction_end(xbt, 1);
1729 destroy_ring: 1960 destroy_ring:
1730 xennet_disconnect_backend(info); 1961 xennet_disconnect_backend(info);
1962 kfree(info->queues);
1963 info->queues = NULL;
1964 rtnl_lock();
1965 netif_set_real_num_tx_queues(info->netdev, 0);
1966 rtnl_lock();
1731 out: 1967 out:
1732 return err; 1968 return err;
1733} 1969}
@@ -1735,11 +1971,14 @@ again:
1735static int xennet_connect(struct net_device *dev) 1971static int xennet_connect(struct net_device *dev)
1736{ 1972{
1737 struct netfront_info *np = netdev_priv(dev); 1973 struct netfront_info *np = netdev_priv(dev);
1974 unsigned int num_queues = 0;
1738 int i, requeue_idx, err; 1975 int i, requeue_idx, err;
1739 struct sk_buff *skb; 1976 struct sk_buff *skb;
1740 grant_ref_t ref; 1977 grant_ref_t ref;
1741 struct xen_netif_rx_request *req; 1978 struct xen_netif_rx_request *req;
1742 unsigned int feature_rx_copy; 1979 unsigned int feature_rx_copy;
1980 unsigned int j = 0;
1981 struct netfront_queue *queue = NULL;
1743 1982
1744 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1983 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1745 "feature-rx-copy", "%u", &feature_rx_copy); 1984 "feature-rx-copy", "%u", &feature_rx_copy);
@@ -1756,40 +1995,47 @@ static int xennet_connect(struct net_device *dev)
1756 if (err) 1995 if (err)
1757 return err; 1996 return err;
1758 1997
1998 /* talk_to_netback() sets the correct number of queues */
1999 num_queues = dev->real_num_tx_queues;
2000
1759 rtnl_lock(); 2001 rtnl_lock();
1760 netdev_update_features(dev); 2002 netdev_update_features(dev);
1761 rtnl_unlock(); 2003 rtnl_unlock();
1762 2004
1763 spin_lock_bh(&np->rx_lock); 2005 /* By now, the queue structures have been set up */
1764 spin_lock_irq(&np->tx_lock); 2006 for (j = 0; j < num_queues; ++j) {
2007 queue = &np->queues[j];
2008 spin_lock_bh(&queue->rx_lock);
2009 spin_lock_irq(&queue->tx_lock);
1765 2010
1766 /* Step 1: Discard all pending TX packet fragments. */ 2011 /* Step 1: Discard all pending TX packet fragments. */
1767 xennet_release_tx_bufs(np); 2012 xennet_release_tx_bufs(queue);
1768 2013
1769 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 2014 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1770 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 2015 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1771 skb_frag_t *frag; 2016 skb_frag_t *frag;
1772 const struct page *page; 2017 const struct page *page;
1773 if (!np->rx_skbs[i]) 2018 if (!queue->rx_skbs[i])
1774 continue; 2019 continue;
1775 2020
1776 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); 2021 skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i);
1777 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 2022 ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i);
1778 req = RING_GET_REQUEST(&np->rx, requeue_idx); 2023 req = RING_GET_REQUEST(&queue->rx, requeue_idx);
1779 2024
1780 frag = &skb_shinfo(skb)->frags[0]; 2025 frag = &skb_shinfo(skb)->frags[0];
1781 page = skb_frag_page(frag); 2026 page = skb_frag_page(frag);
1782 gnttab_grant_foreign_access_ref( 2027 gnttab_grant_foreign_access_ref(
1783 ref, np->xbdev->otherend_id, 2028 ref, queue->info->xbdev->otherend_id,
1784 pfn_to_mfn(page_to_pfn(page)), 2029 pfn_to_mfn(page_to_pfn(page)),
1785 0); 2030 0);
1786 req->gref = ref; 2031 req->gref = ref;
1787 req->id = requeue_idx; 2032 req->id = requeue_idx;
1788 2033
1789 requeue_idx++; 2034 requeue_idx++;
1790 } 2035 }
1791 2036
1792 np->rx.req_prod_pvt = requeue_idx; 2037 queue->rx.req_prod_pvt = requeue_idx;
2038 }
1793 2039
1794 /* 2040 /*
1795 * Step 3: All public and private state should now be sane. Get 2041 * Step 3: All public and private state should now be sane. Get
@@ -1798,14 +2044,17 @@ static int xennet_connect(struct net_device *dev)
1798 * packets. 2044 * packets.
1799 */ 2045 */
1800 netif_carrier_on(np->netdev); 2046 netif_carrier_on(np->netdev);
1801 notify_remote_via_irq(np->tx_irq); 2047 for (j = 0; j < num_queues; ++j) {
1802 if (np->tx_irq != np->rx_irq) 2048 queue = &np->queues[j];
1803 notify_remote_via_irq(np->rx_irq); 2049 notify_remote_via_irq(queue->tx_irq);
1804 xennet_tx_buf_gc(dev); 2050 if (queue->tx_irq != queue->rx_irq)
1805 xennet_alloc_rx_buffers(dev); 2051 notify_remote_via_irq(queue->rx_irq);
1806 2052 xennet_tx_buf_gc(queue);
1807 spin_unlock_irq(&np->tx_lock); 2053 xennet_alloc_rx_buffers(queue);
1808 spin_unlock_bh(&np->rx_lock); 2054
2055 spin_unlock_irq(&queue->tx_lock);
2056 spin_unlock_bh(&queue->rx_lock);
2057 }
1809 2058
1810 return 0; 2059 return 0;
1811} 2060}
@@ -1878,7 +2127,7 @@ static void xennet_get_ethtool_stats(struct net_device *dev,
1878 int i; 2127 int i;
1879 2128
1880 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 2129 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1881 data[i] = *(unsigned long *)(np + xennet_stats[i].offset); 2130 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
1882} 2131}
1883 2132
1884static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) 2133static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -1909,8 +2158,12 @@ static ssize_t show_rxbuf_min(struct device *dev,
1909{ 2158{
1910 struct net_device *netdev = to_net_dev(dev); 2159 struct net_device *netdev = to_net_dev(dev);
1911 struct netfront_info *info = netdev_priv(netdev); 2160 struct netfront_info *info = netdev_priv(netdev);
2161 unsigned int num_queues = netdev->real_num_tx_queues;
1912 2162
1913 return sprintf(buf, "%u\n", info->rx_min_target); 2163 if (num_queues)
2164 return sprintf(buf, "%u\n", info->queues[0].rx_min_target);
2165 else
2166 return sprintf(buf, "%u\n", RX_MIN_TARGET);
1914} 2167}
1915 2168
1916static ssize_t store_rxbuf_min(struct device *dev, 2169static ssize_t store_rxbuf_min(struct device *dev,
@@ -1919,8 +2172,11 @@ static ssize_t store_rxbuf_min(struct device *dev,
1919{ 2172{
1920 struct net_device *netdev = to_net_dev(dev); 2173 struct net_device *netdev = to_net_dev(dev);
1921 struct netfront_info *np = netdev_priv(netdev); 2174 struct netfront_info *np = netdev_priv(netdev);
2175 unsigned int num_queues = netdev->real_num_tx_queues;
1922 char *endp; 2176 char *endp;
1923 unsigned long target; 2177 unsigned long target;
2178 unsigned int i;
2179 struct netfront_queue *queue;
1924 2180
1925 if (!capable(CAP_NET_ADMIN)) 2181 if (!capable(CAP_NET_ADMIN))
1926 return -EPERM; 2182 return -EPERM;
@@ -1934,16 +2190,19 @@ static ssize_t store_rxbuf_min(struct device *dev,
1934 if (target > RX_MAX_TARGET) 2190 if (target > RX_MAX_TARGET)
1935 target = RX_MAX_TARGET; 2191 target = RX_MAX_TARGET;
1936 2192
1937 spin_lock_bh(&np->rx_lock); 2193 for (i = 0; i < num_queues; ++i) {
1938 if (target > np->rx_max_target) 2194 queue = &np->queues[i];
1939 np->rx_max_target = target; 2195 spin_lock_bh(&queue->rx_lock);
1940 np->rx_min_target = target; 2196 if (target > queue->rx_max_target)
1941 if (target > np->rx_target) 2197 queue->rx_max_target = target;
1942 np->rx_target = target; 2198 queue->rx_min_target = target;
2199 if (target > queue->rx_target)
2200 queue->rx_target = target;
1943 2201
1944 xennet_alloc_rx_buffers(netdev); 2202 xennet_alloc_rx_buffers(queue);
1945 2203
1946 spin_unlock_bh(&np->rx_lock); 2204 spin_unlock_bh(&queue->rx_lock);
2205 }
1947 return len; 2206 return len;
1948} 2207}
1949 2208
@@ -1952,8 +2211,12 @@ static ssize_t show_rxbuf_max(struct device *dev,
1952{ 2211{
1953 struct net_device *netdev = to_net_dev(dev); 2212 struct net_device *netdev = to_net_dev(dev);
1954 struct netfront_info *info = netdev_priv(netdev); 2213 struct netfront_info *info = netdev_priv(netdev);
2214 unsigned int num_queues = netdev->real_num_tx_queues;
1955 2215
1956 return sprintf(buf, "%u\n", info->rx_max_target); 2216 if (num_queues)
2217 return sprintf(buf, "%u\n", info->queues[0].rx_max_target);
2218 else
2219 return sprintf(buf, "%u\n", RX_MAX_TARGET);
1957} 2220}
1958 2221
1959static ssize_t store_rxbuf_max(struct device *dev, 2222static ssize_t store_rxbuf_max(struct device *dev,
@@ -1962,8 +2225,11 @@ static ssize_t store_rxbuf_max(struct device *dev,
1962{ 2225{
1963 struct net_device *netdev = to_net_dev(dev); 2226 struct net_device *netdev = to_net_dev(dev);
1964 struct netfront_info *np = netdev_priv(netdev); 2227 struct netfront_info *np = netdev_priv(netdev);
2228 unsigned int num_queues = netdev->real_num_tx_queues;
1965 char *endp; 2229 char *endp;
1966 unsigned long target; 2230 unsigned long target;
2231 unsigned int i = 0;
2232 struct netfront_queue *queue = NULL;
1967 2233
1968 if (!capable(CAP_NET_ADMIN)) 2234 if (!capable(CAP_NET_ADMIN))
1969 return -EPERM; 2235 return -EPERM;
@@ -1977,16 +2243,19 @@ static ssize_t store_rxbuf_max(struct device *dev,
1977 if (target > RX_MAX_TARGET) 2243 if (target > RX_MAX_TARGET)
1978 target = RX_MAX_TARGET; 2244 target = RX_MAX_TARGET;
1979 2245
1980 spin_lock_bh(&np->rx_lock); 2246 for (i = 0; i < num_queues; ++i) {
1981 if (target < np->rx_min_target) 2247 queue = &np->queues[i];
1982 np->rx_min_target = target; 2248 spin_lock_bh(&queue->rx_lock);
1983 np->rx_max_target = target; 2249 if (target < queue->rx_min_target)
1984 if (target < np->rx_target) 2250 queue->rx_min_target = target;
1985 np->rx_target = target; 2251 queue->rx_max_target = target;
2252 if (target < queue->rx_target)
2253 queue->rx_target = target;
1986 2254
1987 xennet_alloc_rx_buffers(netdev); 2255 xennet_alloc_rx_buffers(queue);
1988 2256
1989 spin_unlock_bh(&np->rx_lock); 2257 spin_unlock_bh(&queue->rx_lock);
2258 }
1990 return len; 2259 return len;
1991} 2260}
1992 2261
@@ -1995,8 +2264,12 @@ static ssize_t show_rxbuf_cur(struct device *dev,
1995{ 2264{
1996 struct net_device *netdev = to_net_dev(dev); 2265 struct net_device *netdev = to_net_dev(dev);
1997 struct netfront_info *info = netdev_priv(netdev); 2266 struct netfront_info *info = netdev_priv(netdev);
2267 unsigned int num_queues = netdev->real_num_tx_queues;
1998 2268
1999 return sprintf(buf, "%u\n", info->rx_target); 2269 if (num_queues)
2270 return sprintf(buf, "%u\n", info->queues[0].rx_target);
2271 else
2272 return sprintf(buf, "0\n");
2000} 2273}
2001 2274
2002static struct device_attribute xennet_attrs[] = { 2275static struct device_attribute xennet_attrs[] = {
@@ -2043,6 +2316,9 @@ static const struct xenbus_device_id netfront_ids[] = {
2043static int xennet_remove(struct xenbus_device *dev) 2316static int xennet_remove(struct xenbus_device *dev)
2044{ 2317{
2045 struct netfront_info *info = dev_get_drvdata(&dev->dev); 2318 struct netfront_info *info = dev_get_drvdata(&dev->dev);
2319 unsigned int num_queues = info->netdev->real_num_tx_queues;
2320 struct netfront_queue *queue = NULL;
2321 unsigned int i = 0;
2046 2322
2047 dev_dbg(&dev->dev, "%s\n", dev->nodename); 2323 dev_dbg(&dev->dev, "%s\n", dev->nodename);
2048 2324
@@ -2052,7 +2328,15 @@ static int xennet_remove(struct xenbus_device *dev)
2052 2328
2053 unregister_netdev(info->netdev); 2329 unregister_netdev(info->netdev);
2054 2330
2055 del_timer_sync(&info->rx_refill_timer); 2331 for (i = 0; i < num_queues; ++i) {
2332 queue = &info->queues[i];
2333 del_timer_sync(&queue->rx_refill_timer);
2334 }
2335
2336 if (num_queues) {
2337 kfree(info->queues);
2338 info->queues = NULL;
2339 }
2056 2340
2057 free_percpu(info->stats); 2341 free_percpu(info->stats);
2058 2342
@@ -2078,6 +2362,9 @@ static int __init netif_init(void)
2078 2362
2079 pr_info("Initialising Xen virtual ethernet driver\n"); 2363 pr_info("Initialising Xen virtual ethernet driver\n");
2080 2364
2365 /* Allow as many queues as there are CPUs, by default */
2366 xennet_max_queues = num_online_cpus();
2367
2081 return xenbus_register_frontend(&netfront_driver); 2368 return xenbus_register_frontend(&netfront_driver);
2082} 2369}
2083module_init(netif_init); 2370module_init(netif_init);
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 65d4ca19d132..26c66a126551 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -71,5 +71,6 @@ config NFC_PORT100
71source "drivers/nfc/pn544/Kconfig" 71source "drivers/nfc/pn544/Kconfig"
72source "drivers/nfc/microread/Kconfig" 72source "drivers/nfc/microread/Kconfig"
73source "drivers/nfc/nfcmrvl/Kconfig" 73source "drivers/nfc/nfcmrvl/Kconfig"
74source "drivers/nfc/st21nfca/Kconfig"
74 75
75endmenu 76endmenu
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index ae42a3fa60c9..23225b0287fd 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -11,5 +11,6 @@ obj-$(CONFIG_NFC_SIM) += nfcsim.o
11obj-$(CONFIG_NFC_PORT100) += port100.o 11obj-$(CONFIG_NFC_PORT100) += port100.o
12obj-$(CONFIG_NFC_MRVL) += nfcmrvl/ 12obj-$(CONFIG_NFC_MRVL) += nfcmrvl/
13obj-$(CONFIG_NFC_TRF7970A) += trf7970a.o 13obj-$(CONFIG_NFC_TRF7970A) += trf7970a.o
14obj-$(CONFIG_NFC_ST21NFCA) += st21nfca/
14 15
15ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG 16ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index f2acd85be86e..440291ab7263 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -22,6 +22,8 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/i2c.h> 23#include <linux/i2c.h>
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/of_gpio.h>
26#include <linux/of_irq.h>
25#include <linux/miscdevice.h> 27#include <linux/miscdevice.h>
26#include <linux/interrupt.h> 28#include <linux/interrupt.h>
27#include <linux/delay.h> 29#include <linux/delay.h>
@@ -857,6 +859,92 @@ exit_state_wait_secure_write_answer:
857 } 859 }
858} 860}
859 861
862#ifdef CONFIG_OF
863
864static int pn544_hci_i2c_of_request_resources(struct i2c_client *client)
865{
866 struct pn544_i2c_phy *phy = i2c_get_clientdata(client);
867 struct device_node *pp;
868 int ret;
869
870 pp = client->dev.of_node;
871 if (!pp) {
872 ret = -ENODEV;
873 goto err_dt;
874 }
875
876 /* Obtention of EN GPIO from device tree */
877 ret = of_get_named_gpio(pp, "enable-gpios", 0);
878 if (ret < 0) {
879 if (ret != -EPROBE_DEFER)
880 nfc_err(&client->dev,
881 "Failed to get EN gpio, error: %d\n", ret);
882 goto err_dt;
883 }
884 phy->gpio_en = ret;
885
886 /* Configuration of EN GPIO */
887 ret = gpio_request(phy->gpio_en, "pn544_en");
888 if (ret) {
889 nfc_err(&client->dev, "Fail EN pin\n");
890 goto err_dt;
891 }
892 ret = gpio_direction_output(phy->gpio_en, 0);
893 if (ret) {
894 nfc_err(&client->dev, "Fail EN pin direction\n");
895 goto err_gpio_en;
896 }
897
898 /* Obtention of FW GPIO from device tree */
899 ret = of_get_named_gpio(pp, "firmware-gpios", 0);
900 if (ret < 0) {
901 if (ret != -EPROBE_DEFER)
902 nfc_err(&client->dev,
903 "Failed to get FW gpio, error: %d\n", ret);
904 goto err_gpio_en;
905 }
906 phy->gpio_fw = ret;
907
908 /* Configuration of FW GPIO */
909 ret = gpio_request(phy->gpio_fw, "pn544_fw");
910 if (ret) {
911 nfc_err(&client->dev, "Fail FW pin\n");
912 goto err_gpio_en;
913 }
914 ret = gpio_direction_output(phy->gpio_fw, 0);
915 if (ret) {
916 nfc_err(&client->dev, "Fail FW pin direction\n");
917 goto err_gpio_fw;
918 }
919
920 /* IRQ */
921 ret = irq_of_parse_and_map(pp, 0);
922 if (ret < 0) {
923 nfc_err(&client->dev,
924 "Unable to get irq, error: %d\n", ret);
925 goto err_gpio_fw;
926 }
927 client->irq = ret;
928
929 return 0;
930
931err_gpio_fw:
932 gpio_free(phy->gpio_fw);
933err_gpio_en:
934 gpio_free(phy->gpio_en);
935err_dt:
936 return ret;
937}
938
939#else
940
941static int pn544_hci_i2c_of_request_resources(struct i2c_client *client)
942{
943 return -ENODEV;
944}
945
946#endif
947
860static int pn544_hci_i2c_probe(struct i2c_client *client, 948static int pn544_hci_i2c_probe(struct i2c_client *client,
861 const struct i2c_device_id *id) 949 const struct i2c_device_id *id)
862{ 950{
@@ -887,25 +975,36 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
887 i2c_set_clientdata(client, phy); 975 i2c_set_clientdata(client, phy);
888 976
889 pdata = client->dev.platform_data; 977 pdata = client->dev.platform_data;
890 if (pdata == NULL) {
891 nfc_err(&client->dev, "No platform data\n");
892 return -EINVAL;
893 }
894 978
895 if (pdata->request_resources == NULL) { 979 /* No platform data, using device tree. */
896 nfc_err(&client->dev, "request_resources() missing\n"); 980 if (!pdata && client->dev.of_node) {
897 return -EINVAL; 981 r = pn544_hci_i2c_of_request_resources(client);
898 } 982 if (r) {
983 nfc_err(&client->dev, "No DT data\n");
984 return r;
985 }
986 /* Using platform data. */
987 } else if (pdata) {
899 988
900 r = pdata->request_resources(client); 989 if (pdata->request_resources == NULL) {
901 if (r) { 990 nfc_err(&client->dev, "request_resources() missing\n");
902 nfc_err(&client->dev, "Cannot get platform resources\n"); 991 return -EINVAL;
903 return r; 992 }
904 }
905 993
906 phy->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE); 994 r = pdata->request_resources(client);
907 phy->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET); 995 if (r) {
908 phy->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ); 996 nfc_err(&client->dev,
997 "Cannot get platform resources\n");
998 return r;
999 }
1000
1001 phy->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
1002 phy->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
1003 phy->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
1004 } else {
1005 nfc_err(&client->dev, "No platform data\n");
1006 return -EINVAL;
1007 }
909 1008
910 pn544_hci_i2c_platform_init(phy); 1009 pn544_hci_i2c_platform_init(phy);
911 1010
@@ -930,8 +1029,12 @@ err_hci:
930 free_irq(client->irq, phy); 1029 free_irq(client->irq, phy);
931 1030
932err_rti: 1031err_rti:
933 if (pdata->free_resources != NULL) 1032 if (!pdata) {
1033 gpio_free(phy->gpio_en);
1034 gpio_free(phy->gpio_fw);
1035 } else if (pdata->free_resources) {
934 pdata->free_resources(); 1036 pdata->free_resources();
1037 }
935 1038
936 return r; 1039 return r;
937} 1040}
@@ -953,15 +1056,30 @@ static int pn544_hci_i2c_remove(struct i2c_client *client)
953 pn544_hci_i2c_disable(phy); 1056 pn544_hci_i2c_disable(phy);
954 1057
955 free_irq(client->irq, phy); 1058 free_irq(client->irq, phy);
956 if (pdata->free_resources) 1059
1060 /* No platform data, GPIOs have been requested by this driver */
1061 if (!pdata) {
1062 gpio_free(phy->gpio_en);
1063 gpio_free(phy->gpio_fw);
1064 /* Using platform data */
1065 } else if (pdata->free_resources) {
957 pdata->free_resources(); 1066 pdata->free_resources();
1067 }
958 1068
959 return 0; 1069 return 0;
960} 1070}
961 1071
1072static const struct of_device_id of_pn544_i2c_match[] = {
1073 { .compatible = "nxp,pn544-i2c", },
1074 {},
1075};
1076MODULE_DEVICE_TABLE(of, of_pn544_i2c_match);
1077
962static struct i2c_driver pn544_hci_i2c_driver = { 1078static struct i2c_driver pn544_hci_i2c_driver = {
963 .driver = { 1079 .driver = {
964 .name = PN544_HCI_I2C_DRIVER_NAME, 1080 .name = PN544_HCI_I2C_DRIVER_NAME,
1081 .owner = THIS_MODULE,
1082 .of_match_table = of_match_ptr(of_pn544_i2c_match),
965 }, 1083 },
966 .probe = pn544_hci_i2c_probe, 1084 .probe = pn544_hci_i2c_probe,
967 .id_table = pn544_hci_i2c_id_table, 1085 .id_table = pn544_hci_i2c_id_table,
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index b7a372af5eb7..4ac4d31f6c59 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -28,7 +28,8 @@
28 NFC_PROTO_MIFARE_MASK | \ 28 NFC_PROTO_MIFARE_MASK | \
29 NFC_PROTO_FELICA_MASK | \ 29 NFC_PROTO_FELICA_MASK | \
30 NFC_PROTO_NFC_DEP_MASK | \ 30 NFC_PROTO_NFC_DEP_MASK | \
31 NFC_PROTO_ISO14443_MASK) 31 NFC_PROTO_ISO14443_MASK | \
32 NFC_PROTO_ISO14443_B_MASK)
32 33
33#define PORT100_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \ 34#define PORT100_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \
34 NFC_DIGITAL_DRV_CAPS_TG_CRC) 35 NFC_DIGITAL_DRV_CAPS_TG_CRC)
@@ -120,6 +121,7 @@ struct port100_in_rf_setting {
120#define PORT100_COMM_TYPE_IN_212F 0x01 121#define PORT100_COMM_TYPE_IN_212F 0x01
121#define PORT100_COMM_TYPE_IN_424F 0x02 122#define PORT100_COMM_TYPE_IN_424F 0x02
122#define PORT100_COMM_TYPE_IN_106A 0x03 123#define PORT100_COMM_TYPE_IN_106A 0x03
124#define PORT100_COMM_TYPE_IN_106B 0x07
123 125
124static const struct port100_in_rf_setting in_rf_settings[] = { 126static const struct port100_in_rf_setting in_rf_settings[] = {
125 [NFC_DIGITAL_RF_TECH_212F] = { 127 [NFC_DIGITAL_RF_TECH_212F] = {
@@ -140,6 +142,12 @@ static const struct port100_in_rf_setting in_rf_settings[] = {
140 .in_recv_set_number = 15, 142 .in_recv_set_number = 15,
141 .in_recv_comm_type = PORT100_COMM_TYPE_IN_106A, 143 .in_recv_comm_type = PORT100_COMM_TYPE_IN_106A,
142 }, 144 },
145 [NFC_DIGITAL_RF_TECH_106B] = {
146 .in_send_set_number = 3,
147 .in_send_comm_type = PORT100_COMM_TYPE_IN_106B,
148 .in_recv_set_number = 15,
149 .in_recv_comm_type = PORT100_COMM_TYPE_IN_106B,
150 },
143 /* Ensures the array has NFC_DIGITAL_RF_TECH_LAST elements */ 151 /* Ensures the array has NFC_DIGITAL_RF_TECH_LAST elements */
144 [NFC_DIGITAL_RF_TECH_LAST] = { 0 }, 152 [NFC_DIGITAL_RF_TECH_LAST] = { 0 },
145}; 153};
@@ -340,6 +348,32 @@ in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
340 [NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = { 348 [NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = {
341 { PORT100_IN_PROT_END, 0 }, 349 { PORT100_IN_PROT_END, 0 },
342 }, 350 },
351 [NFC_DIGITAL_FRAMING_NFCB] = {
352 { PORT100_IN_PROT_INITIAL_GUARD_TIME, 20 },
353 { PORT100_IN_PROT_ADD_CRC, 1 },
354 { PORT100_IN_PROT_CHECK_CRC, 1 },
355 { PORT100_IN_PROT_MULTI_CARD, 0 },
356 { PORT100_IN_PROT_ADD_PARITY, 0 },
357 { PORT100_IN_PROT_CHECK_PARITY, 0 },
358 { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 },
359 { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 },
360 { PORT100_IN_PROT_CRYPTO1, 0 },
361 { PORT100_IN_PROT_ADD_SOF, 1 },
362 { PORT100_IN_PROT_CHECK_SOF, 1 },
363 { PORT100_IN_PROT_ADD_EOF, 1 },
364 { PORT100_IN_PROT_CHECK_EOF, 1 },
365 { PORT100_IN_PROT_DEAF_TIME, 4 },
366 { PORT100_IN_PROT_CRM, 0 },
367 { PORT100_IN_PROT_CRM_MIN_LEN, 0 },
368 { PORT100_IN_PROT_T1_TAG_FRAME, 0 },
369 { PORT100_IN_PROT_RFCA, 0 },
370 { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
371 { PORT100_IN_PROT_END, 0 },
372 },
373 [NFC_DIGITAL_FRAMING_NFCB_T4T] = {
374 /* nfc_digital_framing_nfcb */
375 { PORT100_IN_PROT_END, 0 },
376 },
343 /* Ensures the array has NFC_DIGITAL_FRAMING_LAST elements */ 377 /* Ensures the array has NFC_DIGITAL_FRAMING_LAST elements */
344 [NFC_DIGITAL_FRAMING_LAST] = { 378 [NFC_DIGITAL_FRAMING_LAST] = {
345 { PORT100_IN_PROT_END, 0 }, 379 { PORT100_IN_PROT_END, 0 },
diff --git a/drivers/nfc/st21nfca/Kconfig b/drivers/nfc/st21nfca/Kconfig
new file mode 100644
index 000000000000..ee459f066ade
--- /dev/null
+++ b/drivers/nfc/st21nfca/Kconfig
@@ -0,0 +1,23 @@
1config NFC_ST21NFCA
2 tristate "STMicroelectronics ST21NFCA NFC driver"
3 depends on NFC_HCI
4 select CRC_CCITT
5 default n
6 ---help---
7 STMicroelectronics ST21NFCA core driver. It implements the chipset
8 HCI logic and hooks into the NFC kernel APIs. Physical layers will
9 register against it.
10
11 To compile this driver as a module, choose m here. The module will
12 be called st21nfca.
13 Say N if unsure.
14
15config NFC_ST21NFCA_I2C
16 tristate "NFC ST21NFCA i2c support"
17 depends on NFC_ST21NFCA && I2C && NFC_SHDLC
18 ---help---
19 This module adds support for the STMicroelectronics st21nfca i2c interface.
20 Select this if your platform is using the i2c bus.
21
22 If you choose to build a module, it'll be called st21nfca_i2c.
23 Say N if unsure.
diff --git a/drivers/nfc/st21nfca/Makefile b/drivers/nfc/st21nfca/Makefile
new file mode 100644
index 000000000000..038ed093a119
--- /dev/null
+++ b/drivers/nfc/st21nfca/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for ST21NFCA HCI based NFC driver
3#
4
5st21nfca_i2c-objs = i2c.o
6
7obj-$(CONFIG_NFC_ST21NFCA) += st21nfca.o
8obj-$(CONFIG_NFC_ST21NFCA_I2C) += st21nfca_i2c.o
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
new file mode 100644
index 000000000000..3f954ed86d98
--- /dev/null
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -0,0 +1,724 @@
1/*
2 * I2C Link Layer for ST21NFCA HCI based Driver
3 * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/crc-ccitt.h>
21#include <linux/module.h>
22#include <linux/i2c.h>
23#include <linux/gpio.h>
24#include <linux/of_irq.h>
25#include <linux/of_gpio.h>
26#include <linux/miscdevice.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <linux/nfc.h>
30#include <linux/firmware.h>
31#include <linux/unaligned/access_ok.h>
32#include <linux/platform_data/st21nfca.h>
33
34#include <net/nfc/hci.h>
35#include <net/nfc/llc.h>
36#include <net/nfc/nfc.h>
37
38#include "st21nfca.h"
39
40/*
41 * Every frame starts with ST21NFCA_SOF_EOF and ends with ST21NFCA_SOF_EOF.
42 * Because ST21NFCA_SOF_EOF is a possible data value, there is a mecanism
43 * called byte stuffing has been introduced.
44 *
45 * if byte == ST21NFCA_SOF_EOF or ST21NFCA_ESCAPE_BYTE_STUFFING
46 * - insert ST21NFCA_ESCAPE_BYTE_STUFFING (escape byte)
47 * - xor byte with ST21NFCA_BYTE_STUFFING_MASK
48 */
49#define ST21NFCA_SOF_EOF 0x7e
50#define ST21NFCA_BYTE_STUFFING_MASK 0x20
51#define ST21NFCA_ESCAPE_BYTE_STUFFING 0x7d
52
53/* SOF + 00 */
54#define ST21NFCA_FRAME_HEADROOM 2
55
56/* 2 bytes crc + EOF */
57#define ST21NFCA_FRAME_TAILROOM 3
58#define IS_START_OF_FRAME(buf) (buf[0] == ST21NFCA_SOF_EOF && \
59 buf[1] == 0)
60
61#define ST21NFCA_HCI_I2C_DRIVER_NAME "st21nfca_hci_i2c"
62
63static struct i2c_device_id st21nfca_hci_i2c_id_table[] = {
64 {ST21NFCA_HCI_DRIVER_NAME, 0},
65 {}
66};
67
68MODULE_DEVICE_TABLE(i2c, st21nfca_hci_i2c_id_table);
69
70struct st21nfca_i2c_phy {
71 struct i2c_client *i2c_dev;
72 struct nfc_hci_dev *hdev;
73
74 unsigned int gpio_ena;
75 unsigned int gpio_irq;
76 unsigned int irq_polarity;
77
78 struct sk_buff *pending_skb;
79 int current_read_len;
80 /*
81 * crc might have fail because i2c macro
82 * is disable due to other interface activity
83 */
84 int crc_trials;
85
86 int powered;
87 int run_mode;
88
89 /*
90 * < 0 if hardware error occured (e.g. i2c err)
91 * and prevents normal operation.
92 */
93 int hard_fault;
94 struct mutex phy_lock;
95};
96static u8 len_seq[] = { 13, 24, 15, 29 };
97static u16 wait_tab[] = { 2, 3, 5, 15, 20, 40};
98
99#define I2C_DUMP_SKB(info, skb) \
100do { \
101 pr_debug("%s:\n", info); \
102 print_hex_dump(KERN_DEBUG, "i2c: ", DUMP_PREFIX_OFFSET, \
103 16, 1, (skb)->data, (skb)->len, 0); \
104} while (0)
105
106/*
107 * In order to get the CLF in a known state we generate an internal reboot
108 * using a proprietary command.
109 * Once the reboot is completed, we expect to receive a ST21NFCA_SOF_EOF
110 * fill buffer.
111 */
112static int st21nfca_hci_platform_init(struct st21nfca_i2c_phy *phy)
113{
114 u16 wait_reboot[] = { 50, 300, 1000 };
115 char reboot_cmd[] = { 0x7E, 0x66, 0x48, 0xF6, 0x7E };
116 u8 tmp[ST21NFCA_HCI_LLC_MAX_SIZE];
117 int i, r = -1;
118
119 for (i = 0; i < ARRAY_SIZE(wait_reboot) && r < 0; i++) {
120 r = i2c_master_send(phy->i2c_dev, reboot_cmd,
121 sizeof(reboot_cmd));
122 if (r < 0)
123 msleep(wait_reboot[i]);
124 }
125 if (r < 0)
126 return r;
127
128 /* CLF is spending about 20ms to do an internal reboot */
129 msleep(20);
130 r = -1;
131 for (i = 0; i < ARRAY_SIZE(wait_reboot) && r < 0; i++) {
132 r = i2c_master_recv(phy->i2c_dev, tmp,
133 ST21NFCA_HCI_LLC_MAX_SIZE);
134 if (r < 0)
135 msleep(wait_reboot[i]);
136 }
137 if (r < 0)
138 return r;
139
140 for (i = 0; i < ST21NFCA_HCI_LLC_MAX_SIZE &&
141 tmp[i] == ST21NFCA_SOF_EOF; i++)
142 ;
143
144 if (r != ST21NFCA_HCI_LLC_MAX_SIZE)
145 return -ENODEV;
146
147 usleep_range(1000, 1500);
148 return 0;
149}
150
151static int st21nfca_hci_i2c_enable(void *phy_id)
152{
153 struct st21nfca_i2c_phy *phy = phy_id;
154
155 gpio_set_value(phy->gpio_ena, 1);
156 phy->powered = 1;
157 phy->run_mode = ST21NFCA_HCI_MODE;
158
159 usleep_range(10000, 15000);
160
161 return 0;
162}
163
164static void st21nfca_hci_i2c_disable(void *phy_id)
165{
166 struct st21nfca_i2c_phy *phy = phy_id;
167
168 pr_info("\n");
169 gpio_set_value(phy->gpio_ena, 0);
170
171 phy->powered = 0;
172}
173
174static void st21nfca_hci_add_len_crc(struct sk_buff *skb)
175{
176 u16 crc;
177 u8 tmp;
178
179 *skb_push(skb, 1) = 0;
180
181 crc = crc_ccitt(0xffff, skb->data, skb->len);
182 crc = ~crc;
183
184 tmp = crc & 0x00ff;
185 *skb_put(skb, 1) = tmp;
186
187 tmp = (crc >> 8) & 0x00ff;
188 *skb_put(skb, 1) = tmp;
189}
190
191static void st21nfca_hci_remove_len_crc(struct sk_buff *skb)
192{
193 skb_pull(skb, ST21NFCA_FRAME_HEADROOM);
194 skb_trim(skb, skb->len - ST21NFCA_FRAME_TAILROOM);
195}
196
197/*
198 * Writing a frame must not return the number of written bytes.
199 * It must return either zero for success, or <0 for error.
200 * In addition, it must not alter the skb
201 */
202static int st21nfca_hci_i2c_write(void *phy_id, struct sk_buff *skb)
203{
204 int r = -1, i, j;
205 struct st21nfca_i2c_phy *phy = phy_id;
206 struct i2c_client *client = phy->i2c_dev;
207 u8 tmp[ST21NFCA_HCI_LLC_MAX_SIZE * 2];
208
209 I2C_DUMP_SKB("st21nfca_hci_i2c_write", skb);
210
211
212 if (phy->hard_fault != 0)
213 return phy->hard_fault;
214
215 /*
216 * Compute CRC before byte stuffing computation on frame
217 * Note st21nfca_hci_add_len_crc is doing a byte stuffing
218 * on its own value
219 */
220 st21nfca_hci_add_len_crc(skb);
221
222 /* add ST21NFCA_SOF_EOF on tail */
223 *skb_put(skb, 1) = ST21NFCA_SOF_EOF;
224 /* add ST21NFCA_SOF_EOF on head */
225 *skb_push(skb, 1) = ST21NFCA_SOF_EOF;
226
227 /*
228 * Compute byte stuffing
229 * if byte == ST21NFCA_SOF_EOF or ST21NFCA_ESCAPE_BYTE_STUFFING
230 * insert ST21NFCA_ESCAPE_BYTE_STUFFING (escape byte)
231 * xor byte with ST21NFCA_BYTE_STUFFING_MASK
232 */
233 tmp[0] = skb->data[0];
234 for (i = 1, j = 1; i < skb->len - 1; i++, j++) {
235 if (skb->data[i] == ST21NFCA_SOF_EOF
236 || skb->data[i] == ST21NFCA_ESCAPE_BYTE_STUFFING) {
237 tmp[j] = ST21NFCA_ESCAPE_BYTE_STUFFING;
238 j++;
239 tmp[j] = skb->data[i] ^ ST21NFCA_BYTE_STUFFING_MASK;
240 } else {
241 tmp[j] = skb->data[i];
242 }
243 }
244 tmp[j] = skb->data[i];
245 j++;
246
247 /*
248 * Manage sleep mode
249 * Try 3 times to send data with delay between each
250 */
251 mutex_lock(&phy->phy_lock);
252 for (i = 0; i < ARRAY_SIZE(wait_tab) && r < 0; i++) {
253 r = i2c_master_send(client, tmp, j);
254 if (r < 0)
255 msleep(wait_tab[i]);
256 }
257 mutex_unlock(&phy->phy_lock);
258
259 if (r >= 0) {
260 if (r != j)
261 r = -EREMOTEIO;
262 else
263 r = 0;
264 }
265
266 st21nfca_hci_remove_len_crc(skb);
267
268 return r;
269}
270
271static int get_frame_size(u8 *buf, int buflen)
272{
273 int len = 0;
274 if (buf[len + 1] == ST21NFCA_SOF_EOF)
275 return 0;
276
277 for (len = 1; len < buflen && buf[len] != ST21NFCA_SOF_EOF; len++)
278 ;
279
280 return len;
281}
282
283static int check_crc(u8 *buf, int buflen)
284{
285 u16 crc;
286
287 crc = crc_ccitt(0xffff, buf, buflen - 2);
288 crc = ~crc;
289
290 if (buf[buflen - 2] != (crc & 0xff) || buf[buflen - 1] != (crc >> 8)) {
291 pr_err(ST21NFCA_HCI_DRIVER_NAME
292 ": CRC error 0x%x != 0x%x 0x%x\n", crc, buf[buflen - 1],
293 buf[buflen - 2]);
294
295 pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
296 print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
297 16, 2, buf, buflen, false);
298 return -EPERM;
299 }
300 return 0;
301}
302
303/*
304 * Prepare received data for upper layer.
305 * Received data include byte stuffing, crc and sof/eof
306 * which is not usable by hci part.
307 * returns:
308 * frame size without sof/eof, header and byte stuffing
309 * -EBADMSG : frame was incorrect and discarded
310 */
311static int st21nfca_hci_i2c_repack(struct sk_buff *skb)
312{
313 int i, j, r, size;
314 if (skb->len < 1 || (skb->len > 1 && skb->data[1] != 0))
315 return -EBADMSG;
316
317 size = get_frame_size(skb->data, skb->len);
318 if (size > 0) {
319 skb_trim(skb, size);
320 /* remove ST21NFCA byte stuffing for upper layer */
321 for (i = 1, j = 0; i < skb->len; i++) {
322 if (skb->data[i + j] ==
323 (u8) ST21NFCA_ESCAPE_BYTE_STUFFING) {
324 skb->data[i] = skb->data[i + j + 1]
325 | ST21NFCA_BYTE_STUFFING_MASK;
326 i++;
327 j++;
328 }
329 skb->data[i] = skb->data[i + j];
330 }
331 /* remove byte stuffing useless byte */
332 skb_trim(skb, i - j);
333 /* remove ST21NFCA_SOF_EOF from head */
334 skb_pull(skb, 1);
335
336 r = check_crc(skb->data, skb->len);
337 if (r != 0) {
338 i = 0;
339 return -EBADMSG;
340 }
341
342 /* remove headbyte */
343 skb_pull(skb, 1);
344 /* remove crc. Byte Stuffing is already removed here */
345 skb_trim(skb, skb->len - 2);
346 return skb->len;
347 }
348 return 0;
349}
350
351/*
352 * Reads an shdlc frame and returns it in a newly allocated sk_buff. Guarantees
353 * that i2c bus will be flushed and that next read will start on a new frame.
354 * returned skb contains only LLC header and payload.
355 * returns:
356 * frame size : if received frame is complete (find ST21NFCA_SOF_EOF at
357 * end of read)
358 * -EAGAIN : if received frame is incomplete (not find ST21NFCA_SOF_EOF
359 * at end of read)
360 * -EREMOTEIO : i2c read error (fatal)
361 * -EBADMSG : frame was incorrect and discarded
362 * (value returned from st21nfca_hci_i2c_repack)
363 * -EIO : if no ST21NFCA_SOF_EOF is found after reaching
364 * the read length end sequence
365 */
366static int st21nfca_hci_i2c_read(struct st21nfca_i2c_phy *phy,
367 struct sk_buff *skb)
368{
369 int r, i;
370 u8 len;
371 u8 buf[ST21NFCA_HCI_LLC_MAX_PAYLOAD];
372 struct i2c_client *client = phy->i2c_dev;
373
374 if (phy->current_read_len < ARRAY_SIZE(len_seq)) {
375 len = len_seq[phy->current_read_len];
376
377 /*
378 * Add retry mecanism
379 * Operation on I2C interface may fail in case of operation on
380 * RF or SWP interface
381 */
382 r = 0;
383 mutex_lock(&phy->phy_lock);
384 for (i = 0; i < ARRAY_SIZE(wait_tab) && r <= 0; i++) {
385 r = i2c_master_recv(client, buf, len);
386 if (r < 0)
387 msleep(wait_tab[i]);
388 }
389 mutex_unlock(&phy->phy_lock);
390
391 if (r != len) {
392 phy->current_read_len = 0;
393 return -EREMOTEIO;
394 }
395
396 /*
397 * The first read sequence does not start with SOF.
398 * Data is corrupeted so we drop it.
399 */
400 if (!phy->current_read_len && buf[0] != ST21NFCA_SOF_EOF) {
401 skb_trim(skb, 0);
402 phy->current_read_len = 0;
403 return -EIO;
404 } else if (phy->current_read_len &&
405 IS_START_OF_FRAME(buf)) {
406 /*
407 * Previous frame transmission was interrupted and
408 * the frame got repeated.
409 * Received frame start with ST21NFCA_SOF_EOF + 00.
410 */
411 skb_trim(skb, 0);
412 phy->current_read_len = 0;
413 }
414
415 memcpy(skb_put(skb, len), buf, len);
416
417 if (skb->data[skb->len - 1] == ST21NFCA_SOF_EOF) {
418 phy->current_read_len = 0;
419 return st21nfca_hci_i2c_repack(skb);
420 }
421 phy->current_read_len++;
422 return -EAGAIN;
423 }
424 return -EIO;
425}
426
427/*
428 * Reads an shdlc frame from the chip. This is not as straightforward as it
429 * seems. The frame format is data-crc, and corruption can occur anywhere
430 * while transiting on i2c bus, such that we could read an invalid data.
431 * The tricky case is when we read a corrupted data or crc. We must detect
432 * this here in order to determine that data can be transmitted to the hci
433 * core. This is the reason why we check the crc here.
434 * The CLF will repeat a frame until we send a RR on that frame.
435 *
436 * On ST21NFCA, IRQ goes in idle when read starts. As no size information are
437 * available in the incoming data, other IRQ might come. Every IRQ will trigger
438 * a read sequence with different length and will fill the current frame.
439 * The reception is complete once we reach a ST21NFCA_SOF_EOF.
440 */
441static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id)
442{
443 struct st21nfca_i2c_phy *phy = phy_id;
444 struct i2c_client *client;
445
446 int r;
447
448 if (!phy || irq != phy->i2c_dev->irq) {
449 WARN_ON_ONCE(1);
450 return IRQ_NONE;
451 }
452
453 client = phy->i2c_dev;
454 dev_dbg(&client->dev, "IRQ\n");
455
456 if (phy->hard_fault != 0)
457 return IRQ_HANDLED;
458
459 r = st21nfca_hci_i2c_read(phy, phy->pending_skb);
460 if (r == -EREMOTEIO) {
461 phy->hard_fault = r;
462
463 nfc_hci_recv_frame(phy->hdev, NULL);
464
465 return IRQ_HANDLED;
466 } else if (r == -EAGAIN || r == -EIO) {
467 return IRQ_HANDLED;
468 } else if (r == -EBADMSG && phy->crc_trials < ARRAY_SIZE(wait_tab)) {
469 /*
470 * With ST21NFCA, only one interface (I2C, RF or SWP)
471 * may be active at a time.
472 * Having incorrect crc is usually due to i2c macrocell
473 * deactivation in the middle of a transmission.
474 * It may generate corrupted data on i2c.
475 * We give sometime to get i2c back.
476 * The complete frame will be repeated.
477 */
478 msleep(wait_tab[phy->crc_trials]);
479 phy->crc_trials++;
480 phy->current_read_len = 0;
481 kfree_skb(phy->pending_skb);
482 } else if (r > 0) {
483 /*
484 * We succeeded to read data from the CLF and
485 * data is valid.
486 * Reset counter.
487 */
488 nfc_hci_recv_frame(phy->hdev, phy->pending_skb);
489 phy->crc_trials = 0;
490 }
491
492 phy->pending_skb = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE * 2, GFP_KERNEL);
493 if (phy->pending_skb == NULL) {
494 phy->hard_fault = -ENOMEM;
495 nfc_hci_recv_frame(phy->hdev, NULL);
496 }
497
498 return IRQ_HANDLED;
499}
500
501static struct nfc_phy_ops i2c_phy_ops = {
502 .write = st21nfca_hci_i2c_write,
503 .enable = st21nfca_hci_i2c_enable,
504 .disable = st21nfca_hci_i2c_disable,
505};
506
507#ifdef CONFIG_OF
508static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client)
509{
510 struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
511 struct device_node *pp;
512 int gpio;
513 int r;
514
515 pp = client->dev.of_node;
516 if (!pp)
517 return -ENODEV;
518
519 /* Get GPIO from device tree */
520 gpio = of_get_named_gpio(pp, "enable-gpios", 0);
521 if (gpio < 0) {
522 nfc_err(&client->dev, "Failed to retrieve enable-gpios from device tree\n");
523 return gpio;
524 }
525
526 /* GPIO request and configuration */
527 r = devm_gpio_request(&client->dev, gpio, "clf_enable");
528 if (r) {
529 nfc_err(&client->dev, "Failed to request enable pin\n");
530 return -ENODEV;
531 }
532
533 r = gpio_direction_output(gpio, 1);
534 if (r) {
535 nfc_err(&client->dev, "Failed to set enable pin direction as output\n");
536 return -ENODEV;
537 }
538 phy->gpio_ena = gpio;
539
540 /* IRQ */
541 r = irq_of_parse_and_map(pp, 0);
542 if (r < 0) {
543 nfc_err(&client->dev,
544 "Unable to get irq, error: %d\n", r);
545 return r;
546 }
547
548 phy->irq_polarity = irq_get_trigger_type(r);
549 client->irq = r;
550
551 return 0;
552}
553#else
554static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client)
555{
556 return -ENODEV;
557}
558#endif
559
560static int st21nfca_hci_i2c_request_resources(struct i2c_client *client)
561{
562 struct st21nfca_nfc_platform_data *pdata;
563 struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
564 int r;
565 int irq;
566
567 pdata = client->dev.platform_data;
568 if (pdata == NULL) {
569 nfc_err(&client->dev, "No platform data\n");
570 return -EINVAL;
571 }
572
573 /* store for later use */
574 phy->gpio_irq = pdata->gpio_irq;
575 phy->gpio_ena = pdata->gpio_ena;
576 phy->irq_polarity = pdata->irq_polarity;
577
578 r = devm_gpio_request(&client->dev, phy->gpio_irq, "wake_up");
579 if (r) {
580 pr_err("%s : gpio_request failed\n", __FILE__);
581 return -ENODEV;
582 }
583
584 r = gpio_direction_input(phy->gpio_irq);
585 if (r) {
586 pr_err("%s : gpio_direction_input failed\n", __FILE__);
587 return -ENODEV;
588 }
589
590 if (phy->gpio_ena > 0) {
591 r = devm_gpio_request(&client->dev,
592 phy->gpio_ena, "clf_enable");
593 if (r) {
594 pr_err("%s : ena gpio_request failed\n", __FILE__);
595 return -ENODEV;
596 }
597 r = gpio_direction_output(phy->gpio_ena, 1);
598
599 if (r) {
600 pr_err("%s : ena gpio_direction_output failed\n",
601 __FILE__);
602 return -ENODEV;
603 }
604 }
605
606 /* IRQ */
607 irq = gpio_to_irq(phy->gpio_irq);
608 if (irq < 0) {
609 nfc_err(&client->dev,
610 "Unable to get irq number for GPIO %d error %d\n",
611 phy->gpio_irq, r);
612 return -ENODEV;
613 }
614 client->irq = irq;
615
616 return 0;
617}
618
619static int st21nfca_hci_i2c_probe(struct i2c_client *client,
620 const struct i2c_device_id *id)
621{
622 struct st21nfca_i2c_phy *phy;
623 struct st21nfca_nfc_platform_data *pdata;
624 int r;
625
626 dev_dbg(&client->dev, "%s\n", __func__);
627 dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
628
629 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
630 nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
631 return -ENODEV;
632 }
633
634 phy = devm_kzalloc(&client->dev, sizeof(struct st21nfca_i2c_phy),
635 GFP_KERNEL);
636 if (!phy) {
637 nfc_err(&client->dev,
638 "Cannot allocate memory for st21nfca i2c phy.\n");
639 return -ENOMEM;
640 }
641
642 phy->i2c_dev = client;
643 phy->pending_skb = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE * 2, GFP_KERNEL);
644 if (phy->pending_skb == NULL)
645 return -ENOMEM;
646
647 phy->current_read_len = 0;
648 phy->crc_trials = 0;
649 mutex_init(&phy->phy_lock);
650 i2c_set_clientdata(client, phy);
651
652 pdata = client->dev.platform_data;
653 if (!pdata && client->dev.of_node) {
654 r = st21nfca_hci_i2c_of_request_resources(client);
655 if (r) {
656 nfc_err(&client->dev, "No platform data\n");
657 return r;
658 }
659 } else if (pdata) {
660 r = st21nfca_hci_i2c_request_resources(client);
661 if (r) {
662 nfc_err(&client->dev, "Cannot get platform resources\n");
663 return r;
664 }
665 } else {
666 nfc_err(&client->dev, "st21nfca platform resources not available\n");
667 return -ENODEV;
668 }
669
670 r = st21nfca_hci_platform_init(phy);
671 if (r < 0) {
672 nfc_err(&client->dev, "Unable to reboot st21nfca\n");
673 return -ENODEV;
674 }
675
676 r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
677 st21nfca_hci_irq_thread_fn,
678 phy->irq_polarity | IRQF_ONESHOT,
679 ST21NFCA_HCI_DRIVER_NAME, phy);
680 if (r < 0) {
681 nfc_err(&client->dev, "Unable to register IRQ handler\n");
682 return r;
683 }
684
685 return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
686 ST21NFCA_FRAME_HEADROOM, ST21NFCA_FRAME_TAILROOM,
687 ST21NFCA_HCI_LLC_MAX_PAYLOAD, &phy->hdev);
688}
689
690static int st21nfca_hci_i2c_remove(struct i2c_client *client)
691{
692 struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
693
694 dev_dbg(&client->dev, "%s\n", __func__);
695
696 st21nfca_hci_remove(phy->hdev);
697
698 if (phy->powered)
699 st21nfca_hci_i2c_disable(phy);
700
701 return 0;
702}
703
704static const struct of_device_id of_st21nfca_i2c_match[] = {
705 { .compatible = "st,st21nfca_i2c", },
706 {}
707};
708
709static struct i2c_driver st21nfca_hci_i2c_driver = {
710 .driver = {
711 .owner = THIS_MODULE,
712 .name = ST21NFCA_HCI_I2C_DRIVER_NAME,
713 .owner = THIS_MODULE,
714 .of_match_table = of_match_ptr(of_st21nfca_i2c_match),
715 },
716 .probe = st21nfca_hci_i2c_probe,
717 .id_table = st21nfca_hci_i2c_id_table,
718 .remove = st21nfca_hci_i2c_remove,
719};
720
721module_i2c_driver(st21nfca_hci_i2c_driver);
722
723MODULE_LICENSE("GPL");
724MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
new file mode 100644
index 000000000000..51e0f00b3a4f
--- /dev/null
+++ b/drivers/nfc/st21nfca/st21nfca.c
@@ -0,0 +1,698 @@
1/*
2 * HCI based Driver for STMicroelectronics NFC Chip
3 *
4 * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/module.h>
20#include <linux/nfc.h>
21#include <net/nfc/hci.h>
22#include <net/nfc/llc.h>
23
24#include "st21nfca.h"
25
26#define DRIVER_DESC "HCI NFC driver for ST21NFCA"
27
28#define FULL_VERSION_LEN 3
29
30/* Proprietary gates, events, commands and registers */
31
32/* Commands that apply to all RF readers */
33#define ST21NFCA_RF_READER_CMD_PRESENCE_CHECK 0x30
34
35#define ST21NFCA_RF_READER_ISO15693_GATE 0x12
36#define ST21NFCA_RF_READER_ISO15693_INVENTORY 0x01
37
38/*
39 * Reader gate for communication with contact-less cards using Type A
40 * protocol ISO14443-3 but not compliant with ISO14443-4
41 */
42#define ST21NFCA_RF_READER_14443_3_A_GATE 0x15
43#define ST21NFCA_RF_READER_14443_3_A_UID 0x02
44#define ST21NFCA_RF_READER_14443_3_A_ATQA 0x03
45#define ST21NFCA_RF_READER_14443_3_A_SAK 0x04
46
47#define ST21NFCA_DEVICE_MGNT_GATE 0x01
48#define ST21NFCA_DEVICE_MGNT_PIPE 0x02
49
50#define ST21NFCA_DM_GETINFO 0x13
51#define ST21NFCA_DM_GETINFO_PIPE_LIST 0x02
52#define ST21NFCA_DM_GETINFO_PIPE_INFO 0x01
53#define ST21NFCA_DM_PIPE_CREATED 0x02
54#define ST21NFCA_DM_PIPE_OPEN 0x04
55#define ST21NFCA_DM_RF_ACTIVE 0x80
56
57#define ST21NFCA_DM_IS_PIPE_OPEN(p) \
58 ((p & 0x0f) == (ST21NFCA_DM_PIPE_CREATED | ST21NFCA_DM_PIPE_OPEN))
59
60#define ST21NFCA_NFC_MODE 0x03 /* NFC_MODE parameter*/
61
62static DECLARE_BITMAP(dev_mask, ST21NFCA_NUM_DEVICES);
63
64static struct nfc_hci_gate st21nfca_gates[] = {
65 {NFC_HCI_ADMIN_GATE, NFC_HCI_ADMIN_PIPE},
66 {NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE},
67 {NFC_HCI_ID_MGMT_GATE, NFC_HCI_INVALID_PIPE},
68 {NFC_HCI_LINK_MGMT_GATE, NFC_HCI_LINK_MGMT_PIPE},
69 {NFC_HCI_RF_READER_B_GATE, NFC_HCI_INVALID_PIPE},
70 {NFC_HCI_RF_READER_A_GATE, NFC_HCI_INVALID_PIPE},
71 {ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE},
72 {ST21NFCA_RF_READER_F_GATE, NFC_HCI_INVALID_PIPE},
73 {ST21NFCA_RF_READER_14443_3_A_GATE, NFC_HCI_INVALID_PIPE},
74 {ST21NFCA_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE},
75};
76
77struct st21nfca_pipe_info {
78 u8 pipe_state;
79 u8 src_host_id;
80 u8 src_gate_id;
81 u8 dst_host_id;
82 u8 dst_gate_id;
83} __packed;
84
85/* Largest headroom needed for outgoing custom commands */
86#define ST21NFCA_CMDS_HEADROOM 7
87
88static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
89{
90 int i, j, r;
91 struct sk_buff *skb_pipe_list, *skb_pipe_info;
92 struct st21nfca_pipe_info *info;
93
94 u8 pipe_list[] = { ST21NFCA_DM_GETINFO_PIPE_LIST,
95 NFC_HCI_TERMINAL_HOST_ID
96 };
97 u8 pipe_info[] = { ST21NFCA_DM_GETINFO_PIPE_INFO,
98 NFC_HCI_TERMINAL_HOST_ID, 0
99 };
100
101 skb_pipe_list = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE, GFP_KERNEL);
102 if (!skb_pipe_list) {
103 r = -ENOMEM;
104 goto free_list;
105 }
106
107 skb_pipe_info = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE, GFP_KERNEL);
108 if (!skb_pipe_info) {
109 r = -ENOMEM;
110 goto free_info;
111 }
112
113 /* On ST21NFCA device pipes number are dynamics
114 * A maximum of 16 pipes can be created at the same time
115 * If pipes are already created, hci_dev_up will fail.
116 * Doing a clear all pipe is a bad idea because:
117 * - It does useless EEPROM cycling
118 * - It might cause issue for secure elements support
119 * (such as removing connectivity or APDU reader pipe)
120 * A better approach on ST21NFCA is to:
121 * - get a pipe list for each host.
122 * (eg: NFC_HCI_HOST_CONTROLLER_ID for now).
123 * (TODO Later on UICC HOST and eSE HOST)
124 * - get pipe information
125 * - match retrieved pipe list in st21nfca_gates
126 * ST21NFCA_DEVICE_MGNT_GATE is a proprietary gate
127 * with ST21NFCA_DEVICE_MGNT_PIPE.
128 * Pipe can be closed and need to be open.
129 */
130 r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
131 ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE);
132 if (r < 0)
133 goto free_info;
134
135 /* Get pipe list */
136 r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
137 ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list),
138 &skb_pipe_list);
139 if (r < 0)
140 goto free_info;
141
142 /* Complete the existing gate_pipe table */
143 for (i = 0; i < skb_pipe_list->len; i++) {
144 pipe_info[2] = skb_pipe_list->data[i];
145 r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
146 ST21NFCA_DM_GETINFO, pipe_info,
147 sizeof(pipe_info), &skb_pipe_info);
148
149 if (r)
150 continue;
151
152 /*
153 * Match pipe ID and gate ID
154 * Output format from ST21NFC_DM_GETINFO is:
155 * - pipe state (1byte)
156 * - source hid (1byte)
157 * - source gid (1byte)
158 * - destination hid (1byte)
159 * - destination gid (1byte)
160 */
161 info = (struct st21nfca_pipe_info *) skb_pipe_info->data;
162 for (j = 0; (j < ARRAY_SIZE(st21nfca_gates)) &&
163 (st21nfca_gates[j].gate != info->dst_gate_id);
164 j++)
165 ;
166
167 if (j < ARRAY_SIZE(st21nfca_gates) &&
168 st21nfca_gates[j].gate == info->dst_gate_id &&
169 ST21NFCA_DM_IS_PIPE_OPEN(info->pipe_state)) {
170 st21nfca_gates[j].pipe = pipe_info[2];
171 hdev->gate2pipe[st21nfca_gates[j].gate] =
172 st21nfca_gates[j].pipe;
173 }
174 }
175
176 /*
177 * 3 gates have a well known pipe ID.
178 * They will never appear in the pipe list
179 */
180 if (skb_pipe_list->len + 3 < ARRAY_SIZE(st21nfca_gates)) {
181 for (i = skb_pipe_list->len + 3;
182 i < ARRAY_SIZE(st21nfca_gates); i++) {
183 r = nfc_hci_connect_gate(hdev,
184 NFC_HCI_HOST_CONTROLLER_ID,
185 st21nfca_gates[i].gate,
186 st21nfca_gates[i].pipe);
187 if (r < 0)
188 goto free_info;
189 }
190 }
191
192 memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
193free_info:
194 kfree_skb(skb_pipe_info);
195free_list:
196 kfree_skb(skb_pipe_list);
197 return r;
198}
199
200static int st21nfca_hci_open(struct nfc_hci_dev *hdev)
201{
202 struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
203 int r;
204
205 mutex_lock(&info->info_lock);
206
207 if (info->state != ST21NFCA_ST_COLD) {
208 r = -EBUSY;
209 goto out;
210 }
211
212 r = info->phy_ops->enable(info->phy_id);
213
214 if (r == 0)
215 info->state = ST21NFCA_ST_READY;
216
217out:
218 mutex_unlock(&info->info_lock);
219 return r;
220}
221
222static void st21nfca_hci_close(struct nfc_hci_dev *hdev)
223{
224 struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
225
226 mutex_lock(&info->info_lock);
227
228 if (info->state == ST21NFCA_ST_COLD)
229 goto out;
230
231 info->phy_ops->disable(info->phy_id);
232 info->state = ST21NFCA_ST_COLD;
233
234out:
235 mutex_unlock(&info->info_lock);
236}
237
238static int st21nfca_hci_ready(struct nfc_hci_dev *hdev)
239{
240 struct sk_buff *skb;
241
242 u8 param;
243 int r;
244
245 param = NFC_HCI_UICC_HOST_ID;
246 r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
247 NFC_HCI_ADMIN_WHITELIST, &param, 1);
248 if (r < 0)
249 return r;
250
251 /* Set NFC_MODE in device management gate to enable */
252 r = nfc_hci_get_param(hdev, ST21NFCA_DEVICE_MGNT_GATE,
253 ST21NFCA_NFC_MODE, &skb);
254 if (r < 0)
255 return r;
256
257 if (skb->data[0] == 0) {
258 kfree_skb(skb);
259 param = 1;
260
261 r = nfc_hci_set_param(hdev, ST21NFCA_DEVICE_MGNT_GATE,
262 ST21NFCA_NFC_MODE, &param, 1);
263 if (r < 0)
264 return r;
265 }
266
267 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
268 NFC_HCI_EVT_END_OPERATION, NULL, 0);
269 if (r < 0)
270 return r;
271
272 r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
273 NFC_HCI_ID_MGMT_VERSION_SW, &skb);
274 if (r < 0)
275 return r;
276
277 if (skb->len != FULL_VERSION_LEN) {
278 kfree_skb(skb);
279 return -EINVAL;
280 }
281
282 print_hex_dump(KERN_DEBUG, "FULL VERSION SOFTWARE INFO: ",
283 DUMP_PREFIX_NONE, 16, 1,
284 skb->data, FULL_VERSION_LEN, false);
285
286 kfree_skb(skb);
287
288 return 0;
289}
290
291static int st21nfca_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
292{
293 struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
294
295 return info->phy_ops->write(info->phy_id, skb);
296}
297
298static int st21nfca_hci_start_poll(struct nfc_hci_dev *hdev,
299 u32 im_protocols, u32 tm_protocols)
300{
301 int r;
302
303 pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n",
304 __func__, im_protocols, tm_protocols);
305
306 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
307 NFC_HCI_EVT_END_OPERATION, NULL, 0);
308 if (r < 0)
309 return r;
310 if (im_protocols) {
311 /*
312 * enable polling according to im_protocols & tm_protocols
313 * - CLOSE pipe according to im_protocols & tm_protocols
314 */
315 if ((NFC_HCI_RF_READER_B_GATE & im_protocols) == 0) {
316 r = nfc_hci_disconnect_gate(hdev,
317 NFC_HCI_RF_READER_B_GATE);
318 if (r < 0)
319 return r;
320 }
321
322 if ((NFC_HCI_RF_READER_A_GATE & im_protocols) == 0) {
323 r = nfc_hci_disconnect_gate(hdev,
324 NFC_HCI_RF_READER_A_GATE);
325 if (r < 0)
326 return r;
327 }
328
329 if ((ST21NFCA_RF_READER_F_GATE & im_protocols) == 0) {
330 r = nfc_hci_disconnect_gate(hdev,
331 ST21NFCA_RF_READER_F_GATE);
332 if (r < 0)
333 return r;
334 }
335
336 if ((ST21NFCA_RF_READER_14443_3_A_GATE & im_protocols) == 0) {
337 r = nfc_hci_disconnect_gate(hdev,
338 ST21NFCA_RF_READER_14443_3_A_GATE);
339 if (r < 0)
340 return r;
341 }
342
343 if ((ST21NFCA_RF_READER_ISO15693_GATE & im_protocols) == 0) {
344 r = nfc_hci_disconnect_gate(hdev,
345 ST21NFCA_RF_READER_ISO15693_GATE);
346 if (r < 0)
347 return r;
348 }
349
350 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
351 NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
352 if (r < 0)
353 nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
354 NFC_HCI_EVT_END_OPERATION, NULL, 0);
355 }
356 return r;
357}
358
359static int st21nfca_get_iso14443_3_atqa(struct nfc_hci_dev *hdev, u16 *atqa)
360{
361 int r;
362 struct sk_buff *atqa_skb = NULL;
363
364 r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_14443_3_A_GATE,
365 ST21NFCA_RF_READER_14443_3_A_ATQA, &atqa_skb);
366 if (r < 0)
367 goto exit;
368
369 if (atqa_skb->len != 2) {
370 r = -EPROTO;
371 goto exit;
372 }
373
374 *atqa = be16_to_cpu(*(__be16 *) atqa_skb->data);
375
376exit:
377 kfree_skb(atqa_skb);
378 return r;
379}
380
381static int st21nfca_get_iso14443_3_sak(struct nfc_hci_dev *hdev, u8 *sak)
382{
383 int r;
384 struct sk_buff *sak_skb = NULL;
385
386 r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_14443_3_A_GATE,
387 ST21NFCA_RF_READER_14443_3_A_SAK, &sak_skb);
388 if (r < 0)
389 goto exit;
390
391 if (sak_skb->len != 1) {
392 r = -EPROTO;
393 goto exit;
394 }
395
396 *sak = sak_skb->data[0];
397
398exit:
399 kfree_skb(sak_skb);
400 return r;
401}
402
403static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
404 int *len)
405{
406 int r;
407 struct sk_buff *uid_skb = NULL;
408
409 r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_14443_3_A_GATE,
410 ST21NFCA_RF_READER_14443_3_A_UID, &uid_skb);
411 if (r < 0)
412 goto exit;
413
414 if (uid_skb->len == 0 || uid_skb->len > NFC_NFCID1_MAXSIZE) {
415 r = -EPROTO;
416 goto exit;
417 }
418
419 gate = uid_skb->data;
420 *len = uid_skb->len;
421exit:
422 kfree_skb(uid_skb);
423 return r;
424}
425
426static int st21nfca_get_iso15693_inventory(struct nfc_hci_dev *hdev,
427 struct nfc_target *target)
428{
429 int r;
430 struct sk_buff *inventory_skb = NULL;
431
432 r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_ISO15693_GATE,
433 ST21NFCA_RF_READER_ISO15693_INVENTORY,
434 &inventory_skb);
435 if (r < 0)
436 goto exit;
437
438 skb_pull(inventory_skb, 2);
439
440 if (inventory_skb->len == 0 ||
441 inventory_skb->len > NFC_ISO15693_UID_MAXSIZE) {
442 r = -EPROTO;
443 goto exit;
444 }
445
446 memcpy(target->iso15693_uid, inventory_skb->data, inventory_skb->len);
447 target->iso15693_dsfid = inventory_skb->data[1];
448 target->is_iso15693 = 1;
449exit:
450 kfree_skb(inventory_skb);
451 return r;
452}
453
454static int st21nfca_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
455 struct nfc_target *target)
456{
457 int r, len;
458 u16 atqa;
459 u8 sak;
460 u8 uid[NFC_NFCID1_MAXSIZE];
461
462 switch (gate) {
463 case ST21NFCA_RF_READER_F_GATE:
464 target->supported_protocols = NFC_PROTO_FELICA_MASK;
465 break;
466 case ST21NFCA_RF_READER_14443_3_A_GATE:
467 /* ISO14443-3 type 1 or 2 tags */
468 r = st21nfca_get_iso14443_3_atqa(hdev, &atqa);
469 if (r < 0)
470 return r;
471 if (atqa == 0x000c) {
472 target->supported_protocols = NFC_PROTO_JEWEL_MASK;
473 target->sens_res = 0x0c00;
474 } else {
475 r = st21nfca_get_iso14443_3_sak(hdev, &sak);
476 if (r < 0)
477 return r;
478
479 r = st21nfca_get_iso14443_3_uid(hdev, uid, &len);
480 if (r < 0)
481 return r;
482
483 target->supported_protocols =
484 nfc_hci_sak_to_protocol(sak);
485 if (target->supported_protocols == 0xffffffff)
486 return -EPROTO;
487
488 target->sens_res = atqa;
489 target->sel_res = sak;
490 memcpy(target->nfcid1, uid, len);
491 target->nfcid1_len = len;
492 }
493
494 break;
495 case ST21NFCA_RF_READER_ISO15693_GATE:
496 target->supported_protocols = NFC_PROTO_ISO15693_MASK;
497 r = st21nfca_get_iso15693_inventory(hdev, target);
498 if (r < 0)
499 return r;
500 break;
501 default:
502 return -EPROTO;
503 }
504
505 return 0;
506}
507
508#define ST21NFCA_CB_TYPE_READER_ISO15693 1
509static void st21nfca_hci_data_exchange_cb(void *context, struct sk_buff *skb,
510 int err)
511{
512 struct st21nfca_hci_info *info = context;
513
514 switch (info->async_cb_type) {
515 case ST21NFCA_CB_TYPE_READER_ISO15693:
516 if (err == 0)
517 skb_trim(skb, skb->len - 1);
518 info->async_cb(info->async_cb_context, skb, err);
519 break;
520 default:
521 if (err == 0)
522 kfree_skb(skb);
523 break;
524 }
525}
526
527/*
528 * Returns:
529 * <= 0: driver handled the data exchange
530 * 1: driver doesn't especially handle, please do standard processing
531 */
532static int st21nfca_hci_im_transceive(struct nfc_hci_dev *hdev,
533 struct nfc_target *target,
534 struct sk_buff *skb,
535 data_exchange_cb_t cb, void *cb_context)
536{
537 struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
538
539 pr_info(DRIVER_DESC ": %s for gate=%d len=%d\n", __func__,
540 target->hci_reader_gate, skb->len);
541
542 switch (target->hci_reader_gate) {
543 case ST21NFCA_RF_READER_F_GATE:
544 *skb_push(skb, 1) = 0x1a;
545 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
546 ST21NFCA_WR_XCHG_DATA, skb->data,
547 skb->len, cb, cb_context);
548 case ST21NFCA_RF_READER_14443_3_A_GATE:
549 *skb_push(skb, 1) = 0x1a; /* CTR, see spec:10.2.2.1 */
550
551 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
552 ST21NFCA_WR_XCHG_DATA, skb->data,
553 skb->len, cb, cb_context);
554 case ST21NFCA_RF_READER_ISO15693_GATE:
555 info->async_cb_type = ST21NFCA_CB_TYPE_READER_ISO15693;
556 info->async_cb = cb;
557 info->async_cb_context = cb_context;
558
559 *skb_push(skb, 1) = 0x17;
560
561 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
562 ST21NFCA_WR_XCHG_DATA, skb->data,
563 skb->len,
564 st21nfca_hci_data_exchange_cb,
565 info);
566 break;
567 default:
568 return 1;
569 }
570}
571
572static int st21nfca_hci_check_presence(struct nfc_hci_dev *hdev,
573 struct nfc_target *target)
574{
575 u8 fwi = 0x11;
576 switch (target->hci_reader_gate) {
577 case NFC_HCI_RF_READER_A_GATE:
578 case NFC_HCI_RF_READER_B_GATE:
579 /*
580 * PRESENCE_CHECK on those gates is available
581 * However, the answer to this command is taking 3 * fwi
582 * if the card is no present.
583 * Instead, we send an empty I-Frame with a very short
584 * configurable fwi ~604µs.
585 */
586 return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
587 ST21NFCA_WR_XCHG_DATA, &fwi, 1, NULL);
588 case ST21NFCA_RF_READER_14443_3_A_GATE:
589 return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
590 ST21NFCA_RF_READER_CMD_PRESENCE_CHECK,
591 NULL, 0, NULL);
592 default:
593 return -EOPNOTSUPP;
594 }
595}
596
597static struct nfc_hci_ops st21nfca_hci_ops = {
598 .open = st21nfca_hci_open,
599 .close = st21nfca_hci_close,
600 .load_session = st21nfca_hci_load_session,
601 .hci_ready = st21nfca_hci_ready,
602 .xmit = st21nfca_hci_xmit,
603 .start_poll = st21nfca_hci_start_poll,
604 .target_from_gate = st21nfca_hci_target_from_gate,
605 .im_transceive = st21nfca_hci_im_transceive,
606 .check_presence = st21nfca_hci_check_presence,
607};
608
609int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
610 char *llc_name, int phy_headroom, int phy_tailroom,
611 int phy_payload, struct nfc_hci_dev **hdev)
612{
613 struct st21nfca_hci_info *info;
614 int r = 0;
615 int dev_num;
616 u32 protocols;
617 struct nfc_hci_init_data init_data;
618 unsigned long quirks = 0;
619
620 info = kzalloc(sizeof(struct st21nfca_hci_info), GFP_KERNEL);
621 if (!info) {
622 r = -ENOMEM;
623 goto err_alloc_hdev;
624 }
625
626 info->phy_ops = phy_ops;
627 info->phy_id = phy_id;
628 info->state = ST21NFCA_ST_COLD;
629 mutex_init(&info->info_lock);
630
631 init_data.gate_count = ARRAY_SIZE(st21nfca_gates);
632
633 memcpy(init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
634
635 /*
636 * Session id must include the driver name + i2c bus addr
637 * persistent info to discriminate 2 identical chips
638 */
639 dev_num = find_first_zero_bit(dev_mask, ST21NFCA_NUM_DEVICES);
640 if (dev_num >= ST21NFCA_NUM_DEVICES)
641 goto err_alloc_hdev;
642
643 scnprintf(init_data.session_id, sizeof(init_data.session_id), "%s%2x",
644 "ST21AH", dev_num);
645
646 protocols = NFC_PROTO_JEWEL_MASK |
647 NFC_PROTO_MIFARE_MASK |
648 NFC_PROTO_FELICA_MASK |
649 NFC_PROTO_ISO14443_MASK |
650 NFC_PROTO_ISO14443_B_MASK |
651 NFC_PROTO_ISO15693_MASK;
652
653 set_bit(NFC_HCI_QUIRK_SHORT_CLEAR, &quirks);
654
655 info->hdev =
656 nfc_hci_allocate_device(&st21nfca_hci_ops, &init_data, quirks,
657 protocols, llc_name,
658 phy_headroom + ST21NFCA_CMDS_HEADROOM,
659 phy_tailroom, phy_payload);
660
661 if (!info->hdev) {
662 pr_err("Cannot allocate nfc hdev.\n");
663 r = -ENOMEM;
664 goto err_alloc_hdev;
665 }
666
667 nfc_hci_set_clientdata(info->hdev, info);
668
669 r = nfc_hci_register_device(info->hdev);
670 if (r)
671 goto err_regdev;
672
673 *hdev = info->hdev;
674
675 return 0;
676
677err_regdev:
678 nfc_hci_free_device(info->hdev);
679
680err_alloc_hdev:
681 kfree(info);
682
683 return r;
684}
685EXPORT_SYMBOL(st21nfca_hci_probe);
686
687void st21nfca_hci_remove(struct nfc_hci_dev *hdev)
688{
689 struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
690
691 nfc_hci_unregister_device(hdev);
692 nfc_hci_free_device(hdev);
693 kfree(info);
694}
695EXPORT_SYMBOL(st21nfca_hci_remove);
696
697MODULE_LICENSE("GPL");
698MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h
new file mode 100644
index 000000000000..334cd90bcc8c
--- /dev/null
+++ b/drivers/nfc/st21nfca/st21nfca.h
@@ -0,0 +1,87 @@
1/*
2 * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __LOCAL_ST21NFCA_H_
18#define __LOCAL_ST21NFCA_H_
19
20#include <net/nfc/hci.h>
21
22#define HCI_MODE 0
23
24/* framing in HCI mode */
25#define ST21NFCA_SOF_EOF_LEN 2
26
27/* Almost every time value is 0 */
28#define ST21NFCA_HCI_LLC_LEN 1
29
30/* Size in worst case :
31 * In normal case CRC len = 2 but byte stuffing
32 * may appear in case one CRC byte = ST21NFCA_SOF_EOF
33 */
34#define ST21NFCA_HCI_LLC_CRC 4
35
36#define ST21NFCA_HCI_LLC_LEN_CRC (ST21NFCA_SOF_EOF_LEN + \
37 ST21NFCA_HCI_LLC_LEN + \
38 ST21NFCA_HCI_LLC_CRC)
39#define ST21NFCA_HCI_LLC_MIN_SIZE (1 + ST21NFCA_HCI_LLC_LEN_CRC)
40
41/* Worst case when adding byte stuffing between each byte */
42#define ST21NFCA_HCI_LLC_MAX_PAYLOAD 29
43#define ST21NFCA_HCI_LLC_MAX_SIZE (ST21NFCA_HCI_LLC_LEN_CRC + 1 + \
44 ST21NFCA_HCI_LLC_MAX_PAYLOAD)
45
46#define DRIVER_DESC "HCI NFC driver for ST21NFCA"
47
48#define ST21NFCA_HCI_MODE 0
49
50#define ST21NFCA_NUM_DEVICES 256
51
52int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
53 char *llc_name, int phy_headroom, int phy_tailroom,
54 int phy_payload, struct nfc_hci_dev **hdev);
55void st21nfca_hci_remove(struct nfc_hci_dev *hdev);
56
57enum st21nfca_state {
58 ST21NFCA_ST_COLD,
59 ST21NFCA_ST_READY,
60};
61
62struct st21nfca_hci_info {
63 struct nfc_phy_ops *phy_ops;
64 void *phy_id;
65
66 struct nfc_hci_dev *hdev;
67
68 enum st21nfca_state state;
69
70 struct mutex info_lock;
71
72 int async_cb_type;
73 data_exchange_cb_t async_cb;
74 void *async_cb_context;
75
76} __packed;
77
78/* Reader RF commands */
79#define ST21NFCA_WR_XCHG_DATA 0x10
80
81#define ST21NFCA_RF_READER_F_GATE 0x14
82#define ST21NFCA_RF_READER_F_DATARATE 0x01
83#define ST21NFCA_RF_READER_F_DATARATE_106 0x01
84#define ST21NFCA_RF_READER_F_DATARATE_212 0x02
85#define ST21NFCA_RF_READER_F_DATARATE_424 0x04
86
87#endif /* __LOCAL_ST21NFCA_H_ */
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index d9babe986473..3b78b031e617 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -16,6 +16,7 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/pm_runtime.h>
19#include <linux/nfc.h> 20#include <linux/nfc.h>
20#include <linux/skbuff.h> 21#include <linux/skbuff.h>
21#include <linux/delay.h> 22#include <linux/delay.h>
@@ -67,14 +68,14 @@
67 * only the SRX bit set, it means that all of the data has been received 68 * only the SRX bit set, it means that all of the data has been received
68 * (once what's in the fifo has been read). However, depending on timing 69 * (once what's in the fifo has been read). However, depending on timing
69 * an interrupt status with only the SRX bit set may not be recived. In 70 * an interrupt status with only the SRX bit set may not be recived. In
70 * those cases, the timeout mechanism is used to wait 5 ms in case more 71 * those cases, the timeout mechanism is used to wait 20 ms in case more
71 * data arrives. After 5 ms, it is assumed that all of the data has been 72 * data arrives. After 20 ms, it is assumed that all of the data has been
72 * received and the accumulated rx data is sent upstream. The 73 * received and the accumulated rx data is sent upstream. The
73 * 'TRF7970A_ST_WAIT_FOR_RX_DATA_CONT' state is used for this purpose 74 * 'TRF7970A_ST_WAIT_FOR_RX_DATA_CONT' state is used for this purpose
74 * (i.e., it indicates that some data has been received but we're not sure 75 * (i.e., it indicates that some data has been received but we're not sure
75 * if there is more coming so a timeout in this state means all data has 76 * if there is more coming so a timeout in this state means all data has
76 * been received and there isn't an error). The delay is 5 ms since delays 77 * been received and there isn't an error). The delay is 20 ms since delays
77 * over 2 ms have been observed during testing (a little extra just in case). 78 * of ~16 ms have been observed during testing.
78 * 79 *
79 * Type 2 write and sector select commands respond with a 4-bit ACK or NACK. 80 * Type 2 write and sector select commands respond with a 4-bit ACK or NACK.
80 * Having only 4 bits in the FIFO won't normally generate an interrupt so 81 * Having only 4 bits in the FIFO won't normally generate an interrupt so
@@ -104,8 +105,11 @@
104 105
105#define TRF7970A_SUPPORTED_PROTOCOLS \ 106#define TRF7970A_SUPPORTED_PROTOCOLS \
106 (NFC_PROTO_MIFARE_MASK | NFC_PROTO_ISO14443_MASK | \ 107 (NFC_PROTO_MIFARE_MASK | NFC_PROTO_ISO14443_MASK | \
108 NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_FELICA_MASK | \
107 NFC_PROTO_ISO15693_MASK) 109 NFC_PROTO_ISO15693_MASK)
108 110
111#define TRF7970A_AUTOSUSPEND_DELAY 30000 /* 30 seconds */
112
109/* TX data must be prefixed with a FIFO reset cmd, a cmd that depends 113/* TX data must be prefixed with a FIFO reset cmd, a cmd that depends
110 * on what the current framing is, the address of the TX length byte 1 114 * on what the current framing is, the address of the TX length byte 1
111 * register (0x1d), and the 2 byte length of the data to be transmitted. 115 * register (0x1d), and the 2 byte length of the data to be transmitted.
@@ -120,7 +124,7 @@
120/* TX length is 3 nibbles long ==> 4KB - 1 bytes max */ 124/* TX length is 3 nibbles long ==> 4KB - 1 bytes max */
121#define TRF7970A_TX_MAX (4096 - 1) 125#define TRF7970A_TX_MAX (4096 - 1)
122 126
123#define TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT 5 127#define TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT 20
124#define TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT 3 128#define TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT 3
125#define TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF 20 129#define TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF 20
126 130
@@ -330,13 +334,15 @@ struct trf7970a {
330 struct regulator *regulator; 334 struct regulator *regulator;
331 struct nfc_digital_dev *ddev; 335 struct nfc_digital_dev *ddev;
332 u32 quirks; 336 u32 quirks;
333 bool powering_up;
334 bool aborting; 337 bool aborting;
335 struct sk_buff *tx_skb; 338 struct sk_buff *tx_skb;
336 struct sk_buff *rx_skb; 339 struct sk_buff *rx_skb;
337 nfc_digital_cmd_complete_t cb; 340 nfc_digital_cmd_complete_t cb;
338 void *cb_arg; 341 void *cb_arg;
342 u8 chip_status_ctrl;
339 u8 iso_ctrl; 343 u8 iso_ctrl;
344 u8 iso_ctrl_tech;
345 u8 modulator_sys_clk_ctrl;
340 u8 special_fcn_reg1; 346 u8 special_fcn_reg1;
341 int technology; 347 int technology;
342 int framing; 348 int framing;
@@ -681,7 +687,9 @@ static irqreturn_t trf7970a_irq(int irq, void *dev_id)
681 trf->ignore_timeout = 687 trf->ignore_timeout =
682 !cancel_delayed_work(&trf->timeout_work); 688 !cancel_delayed_work(&trf->timeout_work);
683 trf7970a_drain_fifo(trf, status); 689 trf7970a_drain_fifo(trf, status);
684 } else if (!(status & TRF7970A_IRQ_STATUS_TX)) { 690 } else if (status == TRF7970A_IRQ_STATUS_TX) {
691 trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
692 } else {
685 trf7970a_send_err_upstream(trf, -EIO); 693 trf7970a_send_err_upstream(trf, -EIO);
686 } 694 }
687 break; 695 break;
@@ -757,8 +765,8 @@ static int trf7970a_init(struct trf7970a *trf)
757 if (ret) 765 if (ret)
758 goto err_out; 766 goto err_out;
759 767
760 ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL, 768 /* Must clear NFC Target Detection Level reg due to erratum */
761 TRF7970A_MODULATOR_DEPTH_OOK); 769 ret = trf7970a_write(trf, TRF7970A_NFC_TARGET_LEVEL, 0);
762 if (ret) 770 if (ret)
763 goto err_out; 771 goto err_out;
764 772
@@ -774,12 +782,7 @@ static int trf7970a_init(struct trf7970a *trf)
774 782
775 trf->special_fcn_reg1 = 0; 783 trf->special_fcn_reg1 = 0;
776 784
777 ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL, 785 trf->iso_ctrl = 0xff;
778 TRF7970A_CHIP_STATUS_RF_ON |
779 TRF7970A_CHIP_STATUS_VRS5_3);
780 if (ret)
781 goto err_out;
782
783 return 0; 786 return 0;
784 787
785err_out: 788err_out:
@@ -791,53 +794,29 @@ static void trf7970a_switch_rf_off(struct trf7970a *trf)
791{ 794{
792 dev_dbg(trf->dev, "Switching rf off\n"); 795 dev_dbg(trf->dev, "Switching rf off\n");
793 796
794 gpio_set_value(trf->en_gpio, 0); 797 trf->chip_status_ctrl &= ~TRF7970A_CHIP_STATUS_RF_ON;
795 gpio_set_value(trf->en2_gpio, 0); 798
799 trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL, trf->chip_status_ctrl);
796 800
797 trf->aborting = false; 801 trf->aborting = false;
798 trf->state = TRF7970A_ST_OFF; 802 trf->state = TRF7970A_ST_OFF;
803
804 pm_runtime_mark_last_busy(trf->dev);
805 pm_runtime_put_autosuspend(trf->dev);
799} 806}
800 807
801static int trf7970a_switch_rf_on(struct trf7970a *trf) 808static void trf7970a_switch_rf_on(struct trf7970a *trf)
802{ 809{
803 unsigned long delay;
804 int ret;
805
806 dev_dbg(trf->dev, "Switching rf on\n"); 810 dev_dbg(trf->dev, "Switching rf on\n");
807 811
808 if (trf->powering_up) 812 pm_runtime_get_sync(trf->dev);
809 usleep_range(5000, 6000);
810
811 gpio_set_value(trf->en2_gpio, 1);
812 usleep_range(1000, 2000);
813 gpio_set_value(trf->en_gpio, 1);
814 813
815 /* The delay between enabling the trf7970a and issuing the first 814 trf->state = TRF7970A_ST_IDLE;
816 * command is significantly longer the very first time after powering
817 * up. Make sure the longer delay is only done the first time.
818 */
819 if (trf->powering_up) {
820 delay = 20000;
821 trf->powering_up = false;
822 } else {
823 delay = 5000;
824 }
825
826 usleep_range(delay, delay + 1000);
827
828 ret = trf7970a_init(trf);
829 if (ret)
830 trf7970a_switch_rf_off(trf);
831 else
832 trf->state = TRF7970A_ST_IDLE;
833
834 return ret;
835} 815}
836 816
837static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on) 817static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on)
838{ 818{
839 struct trf7970a *trf = nfc_digital_get_drvdata(ddev); 819 struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
840 int ret = 0;
841 820
842 dev_dbg(trf->dev, "Switching RF - state: %d, on: %d\n", trf->state, on); 821 dev_dbg(trf->dev, "Switching RF - state: %d, on: %d\n", trf->state, on);
843 822
@@ -846,7 +825,7 @@ static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on)
846 if (on) { 825 if (on) {
847 switch (trf->state) { 826 switch (trf->state) {
848 case TRF7970A_ST_OFF: 827 case TRF7970A_ST_OFF:
849 ret = trf7970a_switch_rf_on(trf); 828 trf7970a_switch_rf_on(trf);
850 break; 829 break;
851 case TRF7970A_ST_IDLE: 830 case TRF7970A_ST_IDLE:
852 case TRF7970A_ST_IDLE_RX_BLOCKED: 831 case TRF7970A_ST_IDLE_RX_BLOCKED:
@@ -871,7 +850,7 @@ static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on)
871 } 850 }
872 851
873 mutex_unlock(&trf->lock); 852 mutex_unlock(&trf->lock);
874 return ret; 853 return 0;
875} 854}
876 855
877static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech) 856static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech)
@@ -882,10 +861,24 @@ static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech)
882 861
883 switch (tech) { 862 switch (tech) {
884 case NFC_DIGITAL_RF_TECH_106A: 863 case NFC_DIGITAL_RF_TECH_106A:
885 trf->iso_ctrl = TRF7970A_ISO_CTRL_14443A_106; 864 trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_14443A_106;
865 trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_OOK;
866 break;
867 case NFC_DIGITAL_RF_TECH_106B:
868 trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_14443B_106;
869 trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10;
870 break;
871 case NFC_DIGITAL_RF_TECH_212F:
872 trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_FELICA_212;
873 trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10;
874 break;
875 case NFC_DIGITAL_RF_TECH_424F:
876 trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_FELICA_424;
877 trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10;
886 break; 878 break;
887 case NFC_DIGITAL_RF_TECH_ISO15693: 879 case NFC_DIGITAL_RF_TECH_ISO15693:
888 trf->iso_ctrl = TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648; 880 trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648;
881 trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_OOK;
889 break; 882 break;
890 default: 883 default:
891 dev_dbg(trf->dev, "Unsupported rf technology: %d\n", tech); 884 dev_dbg(trf->dev, "Unsupported rf technology: %d\n", tech);
@@ -899,24 +892,31 @@ static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech)
899 892
900static int trf7970a_config_framing(struct trf7970a *trf, int framing) 893static int trf7970a_config_framing(struct trf7970a *trf, int framing)
901{ 894{
895 u8 iso_ctrl = trf->iso_ctrl_tech;
896 int ret;
897
902 dev_dbg(trf->dev, "framing: %d\n", framing); 898 dev_dbg(trf->dev, "framing: %d\n", framing);
903 899
904 switch (framing) { 900 switch (framing) {
905 case NFC_DIGITAL_FRAMING_NFCA_SHORT: 901 case NFC_DIGITAL_FRAMING_NFCA_SHORT:
906 case NFC_DIGITAL_FRAMING_NFCA_STANDARD: 902 case NFC_DIGITAL_FRAMING_NFCA_STANDARD:
907 trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC; 903 trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC;
908 trf->iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N; 904 iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
909 break; 905 break;
910 case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A: 906 case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A:
911 case NFC_DIGITAL_FRAMING_NFCA_T4T: 907 case NFC_DIGITAL_FRAMING_NFCA_T4T:
908 case NFC_DIGITAL_FRAMING_NFCB:
909 case NFC_DIGITAL_FRAMING_NFCB_T4T:
910 case NFC_DIGITAL_FRAMING_NFCF:
911 case NFC_DIGITAL_FRAMING_NFCF_T3T:
912 case NFC_DIGITAL_FRAMING_ISO15693_INVENTORY: 912 case NFC_DIGITAL_FRAMING_ISO15693_INVENTORY:
913 case NFC_DIGITAL_FRAMING_ISO15693_T5T: 913 case NFC_DIGITAL_FRAMING_ISO15693_T5T:
914 trf->tx_cmd = TRF7970A_CMD_TRANSMIT; 914 trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
915 trf->iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N; 915 iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N;
916 break; 916 break;
917 case NFC_DIGITAL_FRAMING_NFCA_T2T: 917 case NFC_DIGITAL_FRAMING_NFCA_T2T:
918 trf->tx_cmd = TRF7970A_CMD_TRANSMIT; 918 trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
919 trf->iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N; 919 iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
920 break; 920 break;
921 default: 921 default:
922 dev_dbg(trf->dev, "Unsupported Framing: %d\n", framing); 922 dev_dbg(trf->dev, "Unsupported Framing: %d\n", framing);
@@ -925,24 +925,46 @@ static int trf7970a_config_framing(struct trf7970a *trf, int framing)
925 925
926 trf->framing = framing; 926 trf->framing = framing;
927 927
928 return trf7970a_write(trf, TRF7970A_ISO_CTRL, trf->iso_ctrl); 928 if (iso_ctrl != trf->iso_ctrl) {
929 ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl);
930 if (ret)
931 return ret;
932
933 trf->iso_ctrl = iso_ctrl;
934
935 ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL,
936 trf->modulator_sys_clk_ctrl);
937 if (ret)
938 return ret;
939 }
940
941 if (!(trf->chip_status_ctrl & TRF7970A_CHIP_STATUS_RF_ON)) {
942 ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL,
943 trf->chip_status_ctrl |
944 TRF7970A_CHIP_STATUS_RF_ON);
945 if (ret)
946 return ret;
947
948 trf->chip_status_ctrl |= TRF7970A_CHIP_STATUS_RF_ON;
949
950 usleep_range(5000, 6000);
951 }
952
953 return 0;
929} 954}
930 955
931static int trf7970a_in_configure_hw(struct nfc_digital_dev *ddev, int type, 956static int trf7970a_in_configure_hw(struct nfc_digital_dev *ddev, int type,
932 int param) 957 int param)
933{ 958{
934 struct trf7970a *trf = nfc_digital_get_drvdata(ddev); 959 struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
935 int ret = 0; 960 int ret;
936 961
937 dev_dbg(trf->dev, "Configure hw - type: %d, param: %d\n", type, param); 962 dev_dbg(trf->dev, "Configure hw - type: %d, param: %d\n", type, param);
938 963
939 mutex_lock(&trf->lock); 964 mutex_lock(&trf->lock);
940 965
941 if (trf->state == TRF7970A_ST_OFF) { 966 if (trf->state == TRF7970A_ST_OFF)
942 ret = trf7970a_switch_rf_on(trf); 967 trf7970a_switch_rf_on(trf);
943 if (ret)
944 goto err_out;
945 }
946 968
947 switch (type) { 969 switch (type) {
948 case NFC_DIGITAL_CONFIG_RF_TECH: 970 case NFC_DIGITAL_CONFIG_RF_TECH:
@@ -956,7 +978,6 @@ static int trf7970a_in_configure_hw(struct nfc_digital_dev *ddev, int type,
956 ret = -EINVAL; 978 ret = -EINVAL;
957 } 979 }
958 980
959err_out:
960 mutex_unlock(&trf->lock); 981 mutex_unlock(&trf->lock);
961 return ret; 982 return ret;
962} 983}
@@ -1191,7 +1212,18 @@ static void trf7970a_abort_cmd(struct nfc_digital_dev *ddev)
1191 dev_dbg(trf->dev, "Abort process initiated\n"); 1212 dev_dbg(trf->dev, "Abort process initiated\n");
1192 1213
1193 mutex_lock(&trf->lock); 1214 mutex_lock(&trf->lock);
1194 trf->aborting = true; 1215
1216 switch (trf->state) {
1217 case TRF7970A_ST_WAIT_FOR_TX_FIFO:
1218 case TRF7970A_ST_WAIT_FOR_RX_DATA:
1219 case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
1220 case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
1221 trf->aborting = true;
1222 break;
1223 default:
1224 break;
1225 }
1226
1195 mutex_unlock(&trf->lock); 1227 mutex_unlock(&trf->lock);
1196} 1228}
1197 1229
@@ -1206,12 +1238,25 @@ static struct nfc_digital_ops trf7970a_nfc_ops = {
1206 .abort_cmd = trf7970a_abort_cmd, 1238 .abort_cmd = trf7970a_abort_cmd,
1207}; 1239};
1208 1240
1241static int trf7970a_get_autosuspend_delay(struct device_node *np)
1242{
1243 int autosuspend_delay, ret;
1244
1245 ret = of_property_read_u32(np, "autosuspend-delay", &autosuspend_delay);
1246 if (ret)
1247 autosuspend_delay = TRF7970A_AUTOSUSPEND_DELAY;
1248
1249 of_node_put(np);
1250
1251 return autosuspend_delay;
1252}
1253
1209static int trf7970a_probe(struct spi_device *spi) 1254static int trf7970a_probe(struct spi_device *spi)
1210{ 1255{
1211 struct device_node *np = spi->dev.of_node; 1256 struct device_node *np = spi->dev.of_node;
1212 const struct spi_device_id *id = spi_get_device_id(spi); 1257 const struct spi_device_id *id = spi_get_device_id(spi);
1213 struct trf7970a *trf; 1258 struct trf7970a *trf;
1214 int ret; 1259 int uvolts, autosuspend_delay, ret;
1215 1260
1216 if (!np) { 1261 if (!np) {
1217 dev_err(&spi->dev, "No Device Tree entry\n"); 1262 dev_err(&spi->dev, "No Device Tree entry\n");
@@ -1281,7 +1326,10 @@ static int trf7970a_probe(struct spi_device *spi)
1281 goto err_destroy_lock; 1326 goto err_destroy_lock;
1282 } 1327 }
1283 1328
1284 trf->powering_up = true; 1329 uvolts = regulator_get_voltage(trf->regulator);
1330
1331 if (uvolts > 4000000)
1332 trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
1285 1333
1286 trf->ddev = nfc_digital_allocate_device(&trf7970a_nfc_ops, 1334 trf->ddev = nfc_digital_allocate_device(&trf7970a_nfc_ops,
1287 TRF7970A_SUPPORTED_PROTOCOLS, 1335 TRF7970A_SUPPORTED_PROTOCOLS,
@@ -1297,6 +1345,12 @@ static int trf7970a_probe(struct spi_device *spi)
1297 nfc_digital_set_drvdata(trf->ddev, trf); 1345 nfc_digital_set_drvdata(trf->ddev, trf);
1298 spi_set_drvdata(spi, trf); 1346 spi_set_drvdata(spi, trf);
1299 1347
1348 autosuspend_delay = trf7970a_get_autosuspend_delay(np);
1349
1350 pm_runtime_set_autosuspend_delay(trf->dev, autosuspend_delay);
1351 pm_runtime_use_autosuspend(trf->dev);
1352 pm_runtime_enable(trf->dev);
1353
1300 ret = nfc_digital_register_device(trf->ddev); 1354 ret = nfc_digital_register_device(trf->ddev);
1301 if (ret) { 1355 if (ret) {
1302 dev_err(trf->dev, "Can't register NFC digital device: %d\n", 1356 dev_err(trf->dev, "Can't register NFC digital device: %d\n",
@@ -1307,6 +1361,7 @@ static int trf7970a_probe(struct spi_device *spi)
1307 return 0; 1361 return 0;
1308 1362
1309err_free_ddev: 1363err_free_ddev:
1364 pm_runtime_disable(trf->dev);
1310 nfc_digital_free_device(trf->ddev); 1365 nfc_digital_free_device(trf->ddev);
1311err_disable_regulator: 1366err_disable_regulator:
1312 regulator_disable(trf->regulator); 1367 regulator_disable(trf->regulator);
@@ -1321,15 +1376,16 @@ static int trf7970a_remove(struct spi_device *spi)
1321 1376
1322 mutex_lock(&trf->lock); 1377 mutex_lock(&trf->lock);
1323 1378
1324 trf7970a_switch_rf_off(trf);
1325 trf7970a_init(trf);
1326
1327 switch (trf->state) { 1379 switch (trf->state) {
1328 case TRF7970A_ST_WAIT_FOR_TX_FIFO: 1380 case TRF7970A_ST_WAIT_FOR_TX_FIFO:
1329 case TRF7970A_ST_WAIT_FOR_RX_DATA: 1381 case TRF7970A_ST_WAIT_FOR_RX_DATA:
1330 case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT: 1382 case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
1331 case TRF7970A_ST_WAIT_TO_ISSUE_EOF: 1383 case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
1332 trf7970a_send_err_upstream(trf, -ECANCELED); 1384 trf7970a_send_err_upstream(trf, -ECANCELED);
1385 /* FALLTHROUGH */
1386 case TRF7970A_ST_IDLE:
1387 case TRF7970A_ST_IDLE_RX_BLOCKED:
1388 pm_runtime_put_sync(trf->dev);
1333 break; 1389 break;
1334 default: 1390 default:
1335 break; 1391 break;
@@ -1337,6 +1393,8 @@ static int trf7970a_remove(struct spi_device *spi)
1337 1393
1338 mutex_unlock(&trf->lock); 1394 mutex_unlock(&trf->lock);
1339 1395
1396 pm_runtime_disable(trf->dev);
1397
1340 nfc_digital_unregister_device(trf->ddev); 1398 nfc_digital_unregister_device(trf->ddev);
1341 nfc_digital_free_device(trf->ddev); 1399 nfc_digital_free_device(trf->ddev);
1342 1400
@@ -1347,6 +1405,70 @@ static int trf7970a_remove(struct spi_device *spi)
1347 return 0; 1405 return 0;
1348} 1406}
1349 1407
1408#ifdef CONFIG_PM_RUNTIME
1409static int trf7970a_pm_runtime_suspend(struct device *dev)
1410{
1411 struct spi_device *spi = container_of(dev, struct spi_device, dev);
1412 struct trf7970a *trf = spi_get_drvdata(spi);
1413 int ret;
1414
1415 dev_dbg(dev, "Runtime suspend\n");
1416
1417 if (trf->state != TRF7970A_ST_OFF) {
1418 dev_dbg(dev, "Can't suspend - not in OFF state (%d)\n",
1419 trf->state);
1420 return -EBUSY;
1421 }
1422
1423 gpio_set_value(trf->en_gpio, 0);
1424 gpio_set_value(trf->en2_gpio, 0);
1425
1426 ret = regulator_disable(trf->regulator);
1427 if (ret)
1428 dev_err(dev, "%s - Can't disable VIN: %d\n", __func__, ret);
1429
1430 return ret;
1431}
1432
1433static int trf7970a_pm_runtime_resume(struct device *dev)
1434{
1435 struct spi_device *spi = container_of(dev, struct spi_device, dev);
1436 struct trf7970a *trf = spi_get_drvdata(spi);
1437 int ret;
1438
1439 dev_dbg(dev, "Runtime resume\n");
1440
1441 ret = regulator_enable(trf->regulator);
1442 if (ret) {
1443 dev_err(dev, "%s - Can't enable VIN: %d\n", __func__, ret);
1444 return ret;
1445 }
1446
1447 usleep_range(5000, 6000);
1448
1449 gpio_set_value(trf->en2_gpio, 1);
1450 usleep_range(1000, 2000);
1451 gpio_set_value(trf->en_gpio, 1);
1452
1453 usleep_range(20000, 21000);
1454
1455 ret = trf7970a_init(trf);
1456 if (ret) {
1457 dev_err(dev, "%s - Can't initialize: %d\n", __func__, ret);
1458 return ret;
1459 }
1460
1461 pm_runtime_mark_last_busy(dev);
1462
1463 return 0;
1464}
1465#endif
1466
1467static const struct dev_pm_ops trf7970a_pm_ops = {
1468 SET_RUNTIME_PM_OPS(trf7970a_pm_runtime_suspend,
1469 trf7970a_pm_runtime_resume, NULL)
1470};
1471
1350static const struct spi_device_id trf7970a_id_table[] = { 1472static const struct spi_device_id trf7970a_id_table[] = {
1351 { "trf7970a", TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA }, 1473 { "trf7970a", TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA },
1352 { } 1474 { }
@@ -1360,6 +1482,7 @@ static struct spi_driver trf7970a_spi_driver = {
1360 .driver = { 1482 .driver = {
1361 .name = "trf7970a", 1483 .name = "trf7970a",
1362 .owner = THIS_MODULE, 1484 .owner = THIS_MODULE,
1485 .pm = &trf7970a_pm_ops,
1363 }, 1486 },
1364}; 1487};
1365 1488
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 9a95831bd065..fb4a59830648 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -14,6 +14,7 @@
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/phy.h> 16#include <linux/phy.h>
17#include <linux/phy_fixed.h>
17#include <linux/of.h> 18#include <linux/of.h>
18#include <linux/of_irq.h> 19#include <linux/of_irq.h>
19#include <linux/of_mdio.h> 20#include <linux/of_mdio.h>
@@ -22,27 +23,6 @@
22MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); 23MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
23MODULE_LICENSE("GPL"); 24MODULE_LICENSE("GPL");
24 25
25static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
26{
27 /* The default values for phydev->supported are provided by the PHY
28 * driver "features" member, we want to reset to sane defaults fist
29 * before supporting higher speeds.
30 */
31 phydev->supported &= PHY_DEFAULT_FEATURES;
32
33 switch (max_speed) {
34 default:
35 return;
36
37 case SPEED_1000:
38 phydev->supported |= PHY_1000BT_FEATURES;
39 case SPEED_100:
40 phydev->supported |= PHY_100BT_FEATURES;
41 case SPEED_10:
42 phydev->supported |= PHY_10BT_FEATURES;
43 }
44}
45
46/* Extract the clause 22 phy ID from the compatible string of the form 26/* Extract the clause 22 phy ID from the compatible string of the form
47 * ethernet-phy-idAAAA.BBBB */ 27 * ethernet-phy-idAAAA.BBBB */
48static int of_get_phy_id(struct device_node *device, u32 *phy_id) 28static int of_get_phy_id(struct device_node *device, u32 *phy_id)
@@ -66,7 +46,6 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
66 struct phy_device *phy; 46 struct phy_device *phy;
67 bool is_c45; 47 bool is_c45;
68 int rc; 48 int rc;
69 u32 max_speed = 0;
70 u32 phy_id; 49 u32 phy_id;
71 50
72 is_c45 = of_device_is_compatible(child, 51 is_c45 = of_device_is_compatible(child,
@@ -103,17 +82,33 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
103 return 1; 82 return 1;
104 } 83 }
105 84
106 /* Set phydev->supported based on the "max-speed" property
107 * if present */
108 if (!of_property_read_u32(child, "max-speed", &max_speed))
109 of_set_phy_supported(phy, max_speed);
110
111 dev_dbg(&mdio->dev, "registered phy %s at address %i\n", 85 dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
112 child->name, addr); 86 child->name, addr);
113 87
114 return 0; 88 return 0;
115} 89}
116 90
91static int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
92{
93 u32 addr;
94 int ret;
95
96 ret = of_property_read_u32(np, "reg", &addr);
97 if (ret < 0) {
98 dev_err(dev, "%s has invalid PHY address\n", np->full_name);
99 return ret;
100 }
101
102 /* A PHY must have a reg property in the range [0-31] */
103 if (addr >= PHY_MAX_ADDR) {
104 dev_err(dev, "%s PHY address %i is too large\n",
105 np->full_name, addr);
106 return -EINVAL;
107 }
108
109 return addr;
110}
111
117/** 112/**
118 * of_mdiobus_register - Register mii_bus and create PHYs from the device tree 113 * of_mdiobus_register - Register mii_bus and create PHYs from the device tree
119 * @mdio: pointer to mii_bus structure 114 * @mdio: pointer to mii_bus structure
@@ -126,9 +121,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
126{ 121{
127 struct device_node *child; 122 struct device_node *child;
128 const __be32 *paddr; 123 const __be32 *paddr;
129 u32 addr;
130 bool scanphys = false; 124 bool scanphys = false;
131 int rc, i, len; 125 int addr, rc, i;
132 126
133 /* Mask out all PHYs from auto probing. Instead the PHYs listed in 127 /* Mask out all PHYs from auto probing. Instead the PHYs listed in
134 * the device tree are populated after the bus has been registered */ 128 * the device tree are populated after the bus has been registered */
@@ -148,19 +142,9 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
148 142
149 /* Loop over the child nodes and register a phy_device for each one */ 143 /* Loop over the child nodes and register a phy_device for each one */
150 for_each_available_child_of_node(np, child) { 144 for_each_available_child_of_node(np, child) {
151 /* A PHY must have a reg property in the range [0-31] */ 145 addr = of_mdio_parse_addr(&mdio->dev, child);
152 paddr = of_get_property(child, "reg", &len); 146 if (addr < 0) {
153 if (!paddr || len < sizeof(*paddr)) {
154 scanphys = true; 147 scanphys = true;
155 dev_err(&mdio->dev, "%s has invalid PHY address\n",
156 child->full_name);
157 continue;
158 }
159
160 addr = be32_to_cpup(paddr);
161 if (addr >= PHY_MAX_ADDR) {
162 dev_err(&mdio->dev, "%s PHY address %i is too large\n",
163 child->full_name, addr);
164 continue; 148 continue;
165 } 149 }
166 150
@@ -175,7 +159,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
175 /* auto scan for PHYs with empty reg property */ 159 /* auto scan for PHYs with empty reg property */
176 for_each_available_child_of_node(np, child) { 160 for_each_available_child_of_node(np, child) {
177 /* Skip PHYs with reg property set */ 161 /* Skip PHYs with reg property set */
178 paddr = of_get_property(child, "reg", &len); 162 paddr = of_get_property(child, "reg", NULL);
179 if (paddr) 163 if (paddr)
180 continue; 164 continue;
181 165
@@ -198,6 +182,40 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
198} 182}
199EXPORT_SYMBOL(of_mdiobus_register); 183EXPORT_SYMBOL(of_mdiobus_register);
200 184
185/**
186 * of_mdiobus_link_phydev - Find a device node for a phy
187 * @mdio: pointer to mii_bus structure
188 * @phydev: phydev for which the of_node pointer should be set
189 *
190 * Walk the list of subnodes of a mdio bus and look for a node that matches the
191 * phy's address with its 'reg' property. If found, set the of_node pointer for
192 * the phy. This allows auto-probed pyh devices to be supplied with information
193 * passed in via DT.
194 */
195void of_mdiobus_link_phydev(struct mii_bus *mdio,
196 struct phy_device *phydev)
197{
198 struct device *dev = &phydev->dev;
199 struct device_node *child;
200
201 if (dev->of_node || !mdio->dev.of_node)
202 return;
203
204 for_each_available_child_of_node(mdio->dev.of_node, child) {
205 int addr;
206
207 addr = of_mdio_parse_addr(&mdio->dev, child);
208 if (addr < 0)
209 continue;
210
211 if (addr == phydev->addr) {
212 dev->of_node = child;
213 return;
214 }
215 }
216}
217EXPORT_SYMBOL(of_mdiobus_link_phydev);
218
201/* Helper function for of_phy_find_device */ 219/* Helper function for of_phy_find_device */
202static int of_phy_match(struct device *dev, void *phy_np) 220static int of_phy_match(struct device *dev, void *phy_np)
203{ 221{
@@ -245,44 +263,6 @@ struct phy_device *of_phy_connect(struct net_device *dev,
245EXPORT_SYMBOL(of_phy_connect); 263EXPORT_SYMBOL(of_phy_connect);
246 264
247/** 265/**
248 * of_phy_connect_fixed_link - Parse fixed-link property and return a dummy phy
249 * @dev: pointer to net_device claiming the phy
250 * @hndlr: Link state callback for the network device
251 * @iface: PHY data interface type
252 *
253 * This function is a temporary stop-gap and will be removed soon. It is
254 * only to support the fs_enet, ucc_geth and gianfar Ethernet drivers. Do
255 * not call this function from new drivers.
256 */
257struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
258 void (*hndlr)(struct net_device *),
259 phy_interface_t iface)
260{
261 struct device_node *net_np;
262 char bus_id[MII_BUS_ID_SIZE + 3];
263 struct phy_device *phy;
264 const __be32 *phy_id;
265 int sz;
266
267 if (!dev->dev.parent)
268 return NULL;
269
270 net_np = dev->dev.parent->of_node;
271 if (!net_np)
272 return NULL;
273
274 phy_id = of_get_property(net_np, "fixed-link", &sz);
275 if (!phy_id || sz < sizeof(*phy_id))
276 return NULL;
277
278 sprintf(bus_id, PHY_ID_FMT, "fixed-0", be32_to_cpu(phy_id[0]));
279
280 phy = phy_connect(dev, bus_id, hndlr, iface);
281 return IS_ERR(phy) ? NULL : phy;
282}
283EXPORT_SYMBOL(of_phy_connect_fixed_link);
284
285/**
286 * of_phy_attach - Attach to a PHY without starting the state machine 266 * of_phy_attach - Attach to a PHY without starting the state machine
287 * @dev: pointer to net_device claiming the phy 267 * @dev: pointer to net_device claiming the phy
288 * @phy_np: Node pointer for the PHY 268 * @phy_np: Node pointer for the PHY
@@ -301,3 +281,69 @@ struct phy_device *of_phy_attach(struct net_device *dev,
301 return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy; 281 return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy;
302} 282}
303EXPORT_SYMBOL(of_phy_attach); 283EXPORT_SYMBOL(of_phy_attach);
284
285#if defined(CONFIG_FIXED_PHY)
286/*
287 * of_phy_is_fixed_link() and of_phy_register_fixed_link() must
288 * support two DT bindings:
289 * - the old DT binding, where 'fixed-link' was a property with 5
290 * cells encoding various informations about the fixed PHY
291 * - the new DT binding, where 'fixed-link' is a sub-node of the
292 * Ethernet device.
293 */
294bool of_phy_is_fixed_link(struct device_node *np)
295{
296 struct device_node *dn;
297 int len;
298
299 /* New binding */
300 dn = of_get_child_by_name(np, "fixed-link");
301 if (dn) {
302 of_node_put(dn);
303 return true;
304 }
305
306 /* Old binding */
307 if (of_get_property(np, "fixed-link", &len) &&
308 len == (5 * sizeof(__be32)))
309 return true;
310
311 return false;
312}
313EXPORT_SYMBOL(of_phy_is_fixed_link);
314
315int of_phy_register_fixed_link(struct device_node *np)
316{
317 struct fixed_phy_status status = {};
318 struct device_node *fixed_link_node;
319 const __be32 *fixed_link_prop;
320 int len;
321
322 /* New binding */
323 fixed_link_node = of_get_child_by_name(np, "fixed-link");
324 if (fixed_link_node) {
325 status.link = 1;
326 status.duplex = of_property_read_bool(np, "full-duplex");
327 if (of_property_read_u32(fixed_link_node, "speed", &status.speed))
328 return -EINVAL;
329 status.pause = of_property_read_bool(np, "pause");
330 status.asym_pause = of_property_read_bool(np, "asym-pause");
331 of_node_put(fixed_link_node);
332 return fixed_phy_register(PHY_POLL, &status, np);
333 }
334
335 /* Old binding */
336 fixed_link_prop = of_get_property(np, "fixed-link", &len);
337 if (fixed_link_prop && len == (5 * sizeof(__be32))) {
338 status.link = 1;
339 status.duplex = be32_to_cpu(fixed_link_prop[1]);
340 status.speed = be32_to_cpu(fixed_link_prop[2]);
341 status.pause = be32_to_cpu(fixed_link_prop[3]);
342 status.asym_pause = be32_to_cpu(fixed_link_prop[4]);
343 return fixed_phy_register(PHY_POLL, &status, np);
344 }
345
346 return -ENODEV;
347}
348EXPORT_SYMBOL(of_phy_register_fixed_link);
349#endif
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index e25d2bc898e5..296b0ec8744d 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -142,7 +142,10 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
142 delta = ktime_to_ns(kt); 142 delta = ktime_to_ns(kt);
143 err = ops->adjtime(ops, delta); 143 err = ops->adjtime(ops, delta);
144 } else if (tx->modes & ADJ_FREQUENCY) { 144 } else if (tx->modes & ADJ_FREQUENCY) {
145 err = ops->adjfreq(ops, scaled_ppm_to_ppb(tx->freq)); 145 s32 ppb = scaled_ppm_to_ppb(tx->freq);
146 if (ppb > ops->max_adj || ppb < -ops->max_adj)
147 return -ERANGE;
148 err = ops->adjfreq(ops, ppb);
146 ptp->dialed_frequency = tx->freq; 149 ptp->dialed_frequency = tx->freq;
147 } else if (tx->modes == 0) { 150 } else if (tx->modes == 0) {
148 tx->freq = ptp->dialed_frequency; 151 tx->freq = ptp->dialed_frequency;
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index fd7b3bd80789..d837c3c5330f 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -3348,7 +3348,7 @@ static int __init claw_init(void)
3348 } 3348 }
3349 CLAW_DBF_TEXT(2, setup, "init_mod"); 3349 CLAW_DBF_TEXT(2, setup, "init_mod");
3350 claw_root_dev = root_device_register("claw"); 3350 claw_root_dev = root_device_register("claw");
3351 ret = PTR_RET(claw_root_dev); 3351 ret = PTR_ERR_OR_ZERO(claw_root_dev);
3352 if (ret) 3352 if (ret)
3353 goto register_err; 3353 goto register_err;
3354 ret = ccw_driver_register(&claw_ccw_driver); 3354 ret = ccw_driver_register(&claw_ccw_driver);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 70b3a023100e..03b6ad035577 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1837,7 +1837,7 @@ static int __init ctcm_init(void)
1837 if (ret) 1837 if (ret)
1838 goto out_err; 1838 goto out_err;
1839 ctcm_root_dev = root_device_register("ctcm"); 1839 ctcm_root_dev = root_device_register("ctcm");
1840 ret = PTR_RET(ctcm_root_dev); 1840 ret = PTR_ERR_OR_ZERO(ctcm_root_dev);
1841 if (ret) 1841 if (ret)
1842 goto register_err; 1842 goto register_err;
1843 ret = ccw_driver_register(&ctcm_ccw_driver); 1843 ret = ccw_driver_register(&ctcm_ccw_driver);
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 985b5dcbdac8..6bcfbbb20f04 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -34,8 +34,9 @@ static ssize_t ctcm_buffer_write(struct device *dev,
34 struct device_attribute *attr, const char *buf, size_t count) 34 struct device_attribute *attr, const char *buf, size_t count)
35{ 35{
36 struct net_device *ndev; 36 struct net_device *ndev;
37 int bs1; 37 unsigned int bs1;
38 struct ctcm_priv *priv = dev_get_drvdata(dev); 38 struct ctcm_priv *priv = dev_get_drvdata(dev);
39 int rc;
39 40
40 ndev = priv->channel[CTCM_READ]->netdev; 41 ndev = priv->channel[CTCM_READ]->netdev;
41 if (!(priv && priv->channel[CTCM_READ] && ndev)) { 42 if (!(priv && priv->channel[CTCM_READ] && ndev)) {
@@ -43,7 +44,9 @@ static ssize_t ctcm_buffer_write(struct device *dev,
43 return -ENODEV; 44 return -ENODEV;
44 } 45 }
45 46
46 sscanf(buf, "%u", &bs1); 47 rc = sscanf(buf, "%u", &bs1);
48 if (rc != 1)
49 goto einval;
47 if (bs1 > CTCM_BUFSIZE_LIMIT) 50 if (bs1 > CTCM_BUFSIZE_LIMIT)
48 goto einval; 51 goto einval;
49 if (bs1 < (576 + LL_HEADER_LENGTH + 2)) 52 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
@@ -143,13 +146,14 @@ static ssize_t ctcm_proto_show(struct device *dev,
143static ssize_t ctcm_proto_store(struct device *dev, 146static ssize_t ctcm_proto_store(struct device *dev,
144 struct device_attribute *attr, const char *buf, size_t count) 147 struct device_attribute *attr, const char *buf, size_t count)
145{ 148{
146 int value; 149 int value, rc;
147 struct ctcm_priv *priv = dev_get_drvdata(dev); 150 struct ctcm_priv *priv = dev_get_drvdata(dev);
148 151
149 if (!priv) 152 if (!priv)
150 return -ENODEV; 153 return -ENODEV;
151 sscanf(buf, "%u", &value); 154 rc = sscanf(buf, "%d", &value);
152 if (!((value == CTCM_PROTO_S390) || 155 if ((rc != 1) ||
156 !((value == CTCM_PROTO_S390) ||
153 (value == CTCM_PROTO_LINUX) || 157 (value == CTCM_PROTO_LINUX) ||
154 (value == CTCM_PROTO_MPC) || 158 (value == CTCM_PROTO_MPC) ||
155 (value == CTCM_PROTO_OS390))) 159 (value == CTCM_PROTO_OS390)))
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index c461f2aac610..0a7d87c372b8 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1943,14 +1943,16 @@ static ssize_t
1943lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1943lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1944{ 1944{
1945 struct lcs_card *card; 1945 struct lcs_card *card;
1946 int value; 1946 int value, rc;
1947 1947
1948 card = dev_get_drvdata(dev); 1948 card = dev_get_drvdata(dev);
1949 1949
1950 if (!card) 1950 if (!card)
1951 return 0; 1951 return 0;
1952 1952
1953 sscanf(buf, "%u", &value); 1953 rc = sscanf(buf, "%d", &value);
1954 if (rc != 1)
1955 return -EINVAL;
1954 /* TODO: sanity checks */ 1956 /* TODO: sanity checks */
1955 card->portno = value; 1957 card->portno = value;
1956 1958
@@ -1997,14 +1999,17 @@ static ssize_t
1997lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1999lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1998{ 2000{
1999 struct lcs_card *card; 2001 struct lcs_card *card;
2000 int value; 2002 unsigned int value;
2003 int rc;
2001 2004
2002 card = dev_get_drvdata(dev); 2005 card = dev_get_drvdata(dev);
2003 2006
2004 if (!card) 2007 if (!card)
2005 return 0; 2008 return 0;
2006 2009
2007 sscanf(buf, "%u", &value); 2010 rc = sscanf(buf, "%u", &value);
2011 if (rc != 1)
2012 return -EINVAL;
2008 /* TODO: sanity checks */ 2013 /* TODO: sanity checks */
2009 card->lancmd_timeout = value; 2014 card->lancmd_timeout = value;
2010 2015
@@ -2442,7 +2447,7 @@ __init lcs_init_module(void)
2442 if (rc) 2447 if (rc)
2443 goto out_err; 2448 goto out_err;
2444 lcs_root_dev = root_device_register("lcs"); 2449 lcs_root_dev = root_device_register("lcs");
2445 rc = PTR_RET(lcs_root_dev); 2450 rc = PTR_ERR_OR_ZERO(lcs_root_dev);
2446 if (rc) 2451 if (rc)
2447 goto register_err; 2452 goto register_err;
2448 rc = ccw_driver_register(&lcs_ccw_driver); 2453 rc = ccw_driver_register(&lcs_ccw_driver);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 5333b2c018e7..a2088af51cc5 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -268,10 +268,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
268#define QETH_NO_PRIO_QUEUEING 0 268#define QETH_NO_PRIO_QUEUEING 0
269#define QETH_PRIO_Q_ING_PREC 1 269#define QETH_PRIO_Q_ING_PREC 1
270#define QETH_PRIO_Q_ING_TOS 2 270#define QETH_PRIO_Q_ING_TOS 2
271#define IP_TOS_LOWDELAY 0x10 271#define QETH_PRIO_Q_ING_SKB 3
272#define IP_TOS_HIGHTHROUGHPUT 0x08 272#define QETH_PRIO_Q_ING_VLAN 4
273#define IP_TOS_HIGHRELIABILITY 0x04
274#define IP_TOS_NOTIMPORTANT 0x02
275 273
276/* Packing */ 274/* Packing */
277#define QETH_LOW_WATERMARK_PACK 2 275#define QETH_LOW_WATERMARK_PACK 2
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e89f38c31176..f54bec54d677 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -20,6 +20,7 @@
20#include <linux/kthread.h> 20#include <linux/kthread.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <net/iucv/af_iucv.h> 22#include <net/iucv/af_iucv.h>
23#include <net/dsfield.h>
23 24
24#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
25#include <asm/chpid.h> 26#include <asm/chpid.h>
@@ -1013,7 +1014,7 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
1013 1014
1014 card = CARD_FROM_CDEV(cdev); 1015 card = CARD_FROM_CDEV(cdev);
1015 1016
1016 if (!IS_ERR(irb)) 1017 if (!card || !IS_ERR(irb))
1017 return 0; 1018 return 0;
1018 1019
1019 switch (PTR_ERR(irb)) { 1020 switch (PTR_ERR(irb)) {
@@ -1029,7 +1030,7 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
1029 QETH_CARD_TEXT(card, 2, "ckirberr"); 1030 QETH_CARD_TEXT(card, 2, "ckirberr");
1030 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); 1031 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
1031 if (intparm == QETH_RCD_PARM) { 1032 if (intparm == QETH_RCD_PARM) {
1032 if (card && (card->data.ccwdev == cdev)) { 1033 if (card->data.ccwdev == cdev) {
1033 card->data.state = CH_STATE_DOWN; 1034 card->data.state = CH_STATE_DOWN;
1034 wake_up(&card->wait_q); 1035 wake_up(&card->wait_q);
1035 } 1036 }
@@ -3662,42 +3663,56 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3662} 3663}
3663EXPORT_SYMBOL_GPL(qeth_qdio_output_handler); 3664EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
3664 3665
3666/**
3667 * Note: Function assumes that we have 4 outbound queues.
3668 */
3665int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, 3669int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3666 int ipv, int cast_type) 3670 int ipv, int cast_type)
3667{ 3671{
3668 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD || 3672 __be16 *tci;
3669 card->info.type == QETH_CARD_TYPE_OSX)) 3673 u8 tos;
3670 return card->qdio.default_out_queue; 3674
3671 switch (card->qdio.no_out_queues) { 3675 if (cast_type && card->info.is_multicast_different)
3672 case 4: 3676 return card->info.is_multicast_different &
3673 if (cast_type && card->info.is_multicast_different) 3677 (card->qdio.no_out_queues - 1);
3674 return card->info.is_multicast_different & 3678
3675 (card->qdio.no_out_queues - 1); 3679 switch (card->qdio.do_prio_queueing) {
3676 if (card->qdio.do_prio_queueing && (ipv == 4)) { 3680 case QETH_PRIO_Q_ING_TOS:
3677 const u8 tos = ip_hdr(skb)->tos; 3681 case QETH_PRIO_Q_ING_PREC:
3678 3682 switch (ipv) {
3679 if (card->qdio.do_prio_queueing == 3683 case 4:
3680 QETH_PRIO_Q_ING_TOS) { 3684 tos = ipv4_get_dsfield(ip_hdr(skb));
3681 if (tos & IP_TOS_NOTIMPORTANT) 3685 break;
3682 return 3; 3686 case 6:
3683 if (tos & IP_TOS_HIGHRELIABILITY) 3687 tos = ipv6_get_dsfield(ipv6_hdr(skb));
3684 return 2; 3688 break;
3685 if (tos & IP_TOS_HIGHTHROUGHPUT) 3689 default:
3686 return 1; 3690 return card->qdio.default_out_queue;
3687 if (tos & IP_TOS_LOWDELAY)
3688 return 0;
3689 }
3690 if (card->qdio.do_prio_queueing ==
3691 QETH_PRIO_Q_ING_PREC)
3692 return 3 - (tos >> 6);
3693 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3694 /* TODO: IPv6!!! */
3695 } 3691 }
3696 return card->qdio.default_out_queue; 3692 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3697 case 1: /* fallthrough for single-out-queue 1920-device */ 3693 return ~tos >> 6 & 3;
3694 if (tos & IPTOS_MINCOST)
3695 return 3;
3696 if (tos & IPTOS_RELIABILITY)
3697 return 2;
3698 if (tos & IPTOS_THROUGHPUT)
3699 return 1;
3700 if (tos & IPTOS_LOWDELAY)
3701 return 0;
3702 break;
3703 case QETH_PRIO_Q_ING_SKB:
3704 if (skb->priority > 5)
3705 return 0;
3706 return ~skb->priority >> 1 & 3;
3707 case QETH_PRIO_Q_ING_VLAN:
3708 tci = &((struct ethhdr *)skb->data)->h_proto;
3709 if (*tci == ETH_P_8021Q)
3710 return ~*(tci + 1) >> (VLAN_PRIO_SHIFT + 1) & 3;
3711 break;
3698 default: 3712 default:
3699 return card->qdio.default_out_queue; 3713 break;
3700 } 3714 }
3715 return card->qdio.default_out_queue;
3701} 3716}
3702EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3717EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3703 3718
@@ -5703,6 +5718,7 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
5703 struct qeth_card *card = netdev->ml_priv; 5718 struct qeth_card *card = netdev->ml_priv;
5704 enum qeth_link_types link_type; 5719 enum qeth_link_types link_type;
5705 struct carrier_info carrier_info; 5720 struct carrier_info carrier_info;
5721 u32 speed;
5706 5722
5707 if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) 5723 if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
5708 link_type = QETH_LINK_TYPE_10GBIT_ETH; 5724 link_type = QETH_LINK_TYPE_10GBIT_ETH;
@@ -5717,28 +5733,29 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
5717 case QETH_LINK_TYPE_FAST_ETH: 5733 case QETH_LINK_TYPE_FAST_ETH:
5718 case QETH_LINK_TYPE_LANE_ETH100: 5734 case QETH_LINK_TYPE_LANE_ETH100:
5719 qeth_set_ecmd_adv_sup(ecmd, SPEED_100, PORT_TP); 5735 qeth_set_ecmd_adv_sup(ecmd, SPEED_100, PORT_TP);
5720 ecmd->speed = SPEED_100; 5736 speed = SPEED_100;
5721 ecmd->port = PORT_TP; 5737 ecmd->port = PORT_TP;
5722 break; 5738 break;
5723 5739
5724 case QETH_LINK_TYPE_GBIT_ETH: 5740 case QETH_LINK_TYPE_GBIT_ETH:
5725 case QETH_LINK_TYPE_LANE_ETH1000: 5741 case QETH_LINK_TYPE_LANE_ETH1000:
5726 qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE); 5742 qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE);
5727 ecmd->speed = SPEED_1000; 5743 speed = SPEED_1000;
5728 ecmd->port = PORT_FIBRE; 5744 ecmd->port = PORT_FIBRE;
5729 break; 5745 break;
5730 5746
5731 case QETH_LINK_TYPE_10GBIT_ETH: 5747 case QETH_LINK_TYPE_10GBIT_ETH:
5732 qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE); 5748 qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE);
5733 ecmd->speed = SPEED_10000; 5749 speed = SPEED_10000;
5734 ecmd->port = PORT_FIBRE; 5750 ecmd->port = PORT_FIBRE;
5735 break; 5751 break;
5736 5752
5737 default: 5753 default:
5738 qeth_set_ecmd_adv_sup(ecmd, SPEED_10, PORT_TP); 5754 qeth_set_ecmd_adv_sup(ecmd, SPEED_10, PORT_TP);
5739 ecmd->speed = SPEED_10; 5755 speed = SPEED_10;
5740 ecmd->port = PORT_TP; 5756 ecmd->port = PORT_TP;
5741 } 5757 }
5758 ethtool_cmd_speed_set(ecmd, speed);
5742 5759
5743 /* Check if we can obtain more accurate information. */ 5760 /* Check if we can obtain more accurate information. */
5744 /* If QUERY_CARD_INFO command is not supported or fails, */ 5761 /* If QUERY_CARD_INFO command is not supported or fails, */
@@ -5783,18 +5800,19 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
5783 5800
5784 switch (carrier_info.port_speed) { 5801 switch (carrier_info.port_speed) {
5785 case CARD_INFO_PORTS_10M: 5802 case CARD_INFO_PORTS_10M:
5786 ecmd->speed = SPEED_10; 5803 speed = SPEED_10;
5787 break; 5804 break;
5788 case CARD_INFO_PORTS_100M: 5805 case CARD_INFO_PORTS_100M:
5789 ecmd->speed = SPEED_100; 5806 speed = SPEED_100;
5790 break; 5807 break;
5791 case CARD_INFO_PORTS_1G: 5808 case CARD_INFO_PORTS_1G:
5792 ecmd->speed = SPEED_1000; 5809 speed = SPEED_1000;
5793 break; 5810 break;
5794 case CARD_INFO_PORTS_10G: 5811 case CARD_INFO_PORTS_10G:
5795 ecmd->speed = SPEED_10000; 5812 speed = SPEED_10000;
5796 break; 5813 break;
5797 } 5814 }
5815 ethtool_cmd_speed_set(ecmd, speed);
5798 5816
5799 return 0; 5817 return 0;
5800} 5818}
@@ -5816,7 +5834,7 @@ static int __init qeth_core_init(void)
5816 if (rc) 5834 if (rc)
5817 goto out_err; 5835 goto out_err;
5818 qeth_core_root_dev = root_device_register("qeth"); 5836 qeth_core_root_dev = root_device_register("qeth");
5819 rc = PTR_RET(qeth_core_root_dev); 5837 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
5820 if (rc) 5838 if (rc)
5821 goto register_err; 5839 goto register_err;
5822 qeth_core_header_cache = kmem_cache_create("qeth_hdr", 5840 qeth_core_header_cache = kmem_cache_create("qeth_hdr",
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 425c0ecf1f3b..8a25a2be9890 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -217,6 +217,10 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev,
217 return sprintf(buf, "%s\n", "by precedence"); 217 return sprintf(buf, "%s\n", "by precedence");
218 case QETH_PRIO_Q_ING_TOS: 218 case QETH_PRIO_Q_ING_TOS:
219 return sprintf(buf, "%s\n", "by type of service"); 219 return sprintf(buf, "%s\n", "by type of service");
220 case QETH_PRIO_Q_ING_SKB:
221 return sprintf(buf, "%s\n", "by skb-priority");
222 case QETH_PRIO_Q_ING_VLAN:
223 return sprintf(buf, "%s\n", "by VLAN headers");
220 default: 224 default:
221 return sprintf(buf, "always queue %i\n", 225 return sprintf(buf, "always queue %i\n",
222 card->qdio.default_out_queue); 226 card->qdio.default_out_queue);
@@ -250,11 +254,23 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
250 } 254 }
251 255
252 tmp = strsep((char **) &buf, "\n"); 256 tmp = strsep((char **) &buf, "\n");
253 if (!strcmp(tmp, "prio_queueing_prec")) 257 if (!strcmp(tmp, "prio_queueing_prec")) {
254 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC; 258 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
255 else if (!strcmp(tmp, "prio_queueing_tos")) 259 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
260 } else if (!strcmp(tmp, "prio_queueing_skb")) {
261 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_SKB;
262 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
263 } else if (!strcmp(tmp, "prio_queueing_tos")) {
256 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS; 264 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
257 else if (!strcmp(tmp, "no_prio_queueing:0")) { 265 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
266 } else if (!strcmp(tmp, "prio_queueing_vlan")) {
267 if (!card->options.layer2) {
268 rc = -ENOTSUPP;
269 goto out;
270 }
271 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
272 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
273 } else if (!strcmp(tmp, "no_prio_queueing:0")) {
258 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; 274 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
259 card->qdio.default_out_queue = 0; 275 card->qdio.default_out_queue = 0;
260 } else if (!strcmp(tmp, "no_prio_queueing:1")) { 276 } else if (!strcmp(tmp, "no_prio_queueing:1")) {
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8dea3f12ccc1..5ef5b4f45758 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -725,15 +725,20 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
725 int elements = 0; 725 int elements = 0;
726 struct qeth_card *card = dev->ml_priv; 726 struct qeth_card *card = dev->ml_priv;
727 struct sk_buff *new_skb = skb; 727 struct sk_buff *new_skb = skb;
728 int ipv = qeth_get_ip_version(skb);
729 int cast_type = qeth_l2_get_cast_type(card, skb); 728 int cast_type = qeth_l2_get_cast_type(card, skb);
730 struct qeth_qdio_out_q *queue = card->qdio.out_qs 729 struct qeth_qdio_out_q *queue;
731 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
732 int tx_bytes = skb->len; 730 int tx_bytes = skb->len;
733 int data_offset = -1; 731 int data_offset = -1;
734 int elements_needed = 0; 732 int elements_needed = 0;
735 int hd_len = 0; 733 int hd_len = 0;
736 734
735 if (card->qdio.do_prio_queueing || (cast_type &&
736 card->info.is_multicast_different))
737 queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb,
738 qeth_get_ip_version(skb), cast_type)];
739 else
740 queue = card->qdio.out_qs[card->qdio.default_out_queue];
741
737 if ((card->state != CARD_STATE_UP) || !card->lan_online) { 742 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
738 card->stats.tx_carrier_errors++; 743 card->stats.tx_carrier_errors++;
739 goto tx_drop; 744 goto tx_drop;
@@ -964,10 +969,9 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
964 card->dev->watchdog_timeo = QETH_TX_TIMEOUT; 969 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
965 card->dev->mtu = card->info.initial_mtu; 970 card->dev->mtu = card->info.initial_mtu;
966 card->dev->netdev_ops = &qeth_l2_netdev_ops; 971 card->dev->netdev_ops = &qeth_l2_netdev_ops;
967 if (card->info.type != QETH_CARD_TYPE_OSN) 972 card->dev->ethtool_ops =
968 SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops); 973 (card->info.type != QETH_CARD_TYPE_OSN) ?
969 else 974 &qeth_l2_ethtool_ops : &qeth_l2_osn_ops;
970 SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
971 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 975 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
972 card->info.broadcast_capable = 1; 976 card->info.broadcast_capable = 1;
973 qeth_l2_request_initial_mac(card); 977 qeth_l2_request_initial_mac(card);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 3524d34ff694..14e0b5810e8c 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -63,7 +63,7 @@ void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
63int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr) 63int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
64{ 64{
65 int count = 0, rc = 0; 65 int count = 0, rc = 0;
66 int in[4]; 66 unsigned int in[4];
67 char c; 67 char c;
68 68
69 rc = sscanf(buf, "%u.%u.%u.%u%c", 69 rc = sscanf(buf, "%u.%u.%u.%u%c",
@@ -1659,7 +1659,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1659 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { 1659 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
1660 struct net_device *netdev; 1660 struct net_device *netdev;
1661 1661
1662 netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), 1662 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
1663 vid); 1663 vid);
1664 if (netdev == NULL || 1664 if (netdev == NULL ||
1665 !(netdev->flags & IFF_UP)) 1665 !(netdev->flags & IFF_UP))
@@ -1721,7 +1721,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
1721 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { 1721 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
1722 struct net_device *netdev; 1722 struct net_device *netdev;
1723 1723
1724 netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), 1724 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
1725 vid); 1725 vid);
1726 if (netdev == NULL || 1726 if (netdev == NULL ||
1727 !(netdev->flags & IFF_UP)) 1727 !(netdev->flags & IFF_UP))
@@ -1766,7 +1766,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
1766 1766
1767 QETH_CARD_TEXT(card, 4, "frvaddr4"); 1767 QETH_CARD_TEXT(card, 4, "frvaddr4");
1768 1768
1769 netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), vid); 1769 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
1770 if (!netdev) 1770 if (!netdev)
1771 return; 1771 return;
1772 in_dev = in_dev_get(netdev); 1772 in_dev = in_dev_get(netdev);
@@ -1796,7 +1796,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
1796 1796
1797 QETH_CARD_TEXT(card, 4, "frvaddr6"); 1797 QETH_CARD_TEXT(card, 4, "frvaddr6");
1798 1798
1799 netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), vid); 1799 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
1800 if (!netdev) 1800 if (!netdev)
1801 return; 1801 return;
1802 in6_dev = in6_dev_get(netdev); 1802 in6_dev = in6_dev_get(netdev);
@@ -2089,7 +2089,7 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
2089 struct net_device *netdev; 2089 struct net_device *netdev;
2090 2090
2091 rcu_read_lock(); 2091 rcu_read_lock();
2092 netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), 2092 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
2093 vid); 2093 vid);
2094 rcu_read_unlock(); 2094 rcu_read_unlock();
2095 if (netdev == dev) { 2095 if (netdev == dev) {
@@ -2926,8 +2926,11 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2926 struct sk_buff *new_skb = NULL; 2926 struct sk_buff *new_skb = NULL;
2927 int ipv = qeth_get_ip_version(skb); 2927 int ipv = qeth_get_ip_version(skb);
2928 int cast_type = qeth_l3_get_cast_type(card, skb); 2928 int cast_type = qeth_l3_get_cast_type(card, skb);
2929 struct qeth_qdio_out_q *queue = card->qdio.out_qs 2929 struct qeth_qdio_out_q *queue =
2930 [qeth_get_priority_queue(card, skb, ipv, cast_type)]; 2930 card->qdio.out_qs[card->qdio.do_prio_queueing
2931 || (cast_type && card->info.is_multicast_different) ?
2932 qeth_get_priority_queue(card, skb, ipv, cast_type) :
2933 card->qdio.default_out_queue];
2931 int tx_bytes = skb->len; 2934 int tx_bytes = skb->len;
2932 bool large_send; 2935 bool large_send;
2933 int data_offset = -1; 2936 int data_offset = -1;
@@ -3298,7 +3301,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3298 card->dev->ml_priv = card; 3301 card->dev->ml_priv = card;
3299 card->dev->watchdog_timeo = QETH_TX_TIMEOUT; 3302 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
3300 card->dev->mtu = card->info.initial_mtu; 3303 card->dev->mtu = card->info.initial_mtu;
3301 SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops); 3304 card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
3302 card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | 3305 card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
3303 NETIF_F_HW_VLAN_CTAG_RX | 3306 NETIF_F_HW_VLAN_CTAG_RX |
3304 NETIF_F_HW_VLAN_CTAG_FILTER; 3307 NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 11854845393b..a669f2d11c31 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -244,7 +244,7 @@ iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
244 sk->sk_data_ready = tcp_sw_conn->old_data_ready; 244 sk->sk_data_ready = tcp_sw_conn->old_data_ready;
245 sk->sk_state_change = tcp_sw_conn->old_state_change; 245 sk->sk_state_change = tcp_sw_conn->old_state_change;
246 sk->sk_write_space = tcp_sw_conn->old_write_space; 246 sk->sk_write_space = tcp_sw_conn->old_write_space;
247 sk->sk_no_check = 0; 247 sk->sk_no_check_tx = 0;
248 write_unlock_bh(&sk->sk_callback_lock); 248 write_unlock_bh(&sk->sk_callback_lock);
249} 249}
250 250
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index 0901ef5d6e8a..08356b6955a4 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -4605,7 +4605,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
4605 netdev->netdev_ops = &et131x_netdev_ops; 4605 netdev->netdev_ops = &et131x_netdev_ops;
4606 4606
4607 SET_NETDEV_DEV(netdev, &pdev->dev); 4607 SET_NETDEV_DEV(netdev, &pdev->dev);
4608 SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops); 4608 netdev->ethtool_ops = &et131x_ethtool_ops;
4609 4609
4610 adapter = et131x_adapter_init(netdev, pdev); 4610 adapter = et131x_adapter_init(netdev, pdev);
4611 4611
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index d6421b9b5981..a6158bef58e5 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -2249,7 +2249,7 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
2249 2249
2250 ft1000InitProc(dev); 2250 ft1000InitProc(dev);
2251 ft1000_card_present = 1; 2251 ft1000_card_present = 1;
2252 SET_ETHTOOL_OPS(dev, &ops); 2252 dev->ethtool_ops = &ops;
2253 printk(KERN_INFO "ft1000: %s: addr 0x%04lx irq %d, MAC addr %pM\n", 2253 printk(KERN_INFO "ft1000: %s: addr 0x%04lx irq %d, MAC addr %pM\n",
2254 dev->name, dev->base_addr, dev->irq, dev->dev_addr); 2254 dev->name, dev->base_addr, dev->irq, dev->dev_addr);
2255 return dev; 2255 return dev;
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 75d7c63cb413..e320d6bae913 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -1067,7 +1067,7 @@ static int xlr_net_probe(struct platform_device *pdev)
1067 xlr_set_rx_mode(ndev); 1067 xlr_set_rx_mode(ndev);
1068 1068
1069 priv->num_rx_desc += MAX_NUM_DESC_SPILL; 1069 priv->num_rx_desc += MAX_NUM_DESC_SPILL;
1070 SET_ETHTOOL_OPS(ndev, &xlr_ethtool_ops); 1070 ndev->ethtool_ops = &xlr_ethtool_ops;
1071 SET_NETDEV_DEV(ndev, &pdev->dev); 1071 SET_NETDEV_DEV(ndev, &pdev->dev);
1072 1072
1073 /* Common registers, do one time initialization */ 1073 /* Common registers, do one time initialization */
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index ff7214aac9dd..da9dd6bc5660 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -469,7 +469,7 @@ int cvm_oct_common_init(struct net_device *dev)
469 469
470 /* We do our own locking, Linux doesn't need to */ 470 /* We do our own locking, Linux doesn't need to */
471 dev->features |= NETIF_F_LLTX; 471 dev->features |= NETIF_F_LLTX;
472 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops); 472 dev->ethtool_ops = &cvm_oct_ethtool_ops;
473 473
474 cvm_oct_phy_setup_device(dev); 474 cvm_oct_phy_setup_device(dev);
475 cvm_oct_set_mac_filter(dev); 475 cvm_oct_set_mac_filter(dev);
diff --git a/drivers/staging/rtl8192ee/core.c b/drivers/staging/rtl8192ee/core.c
index 76ea356163b6..7f6accd59986 100644
--- a/drivers/staging/rtl8192ee/core.c
+++ b/drivers/staging/rtl8192ee/core.c
@@ -322,7 +322,7 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
322 struct rtl_mac *mac = &(rtlpriv->mac80211); 322 struct rtl_mac *mac = &(rtlpriv->mac80211);
323 struct cfg80211_pkt_pattern *patterns = wow->patterns; 323 struct cfg80211_pkt_pattern *patterns = wow->patterns;
324 struct rtl_wow_pattern rtl_pattern; 324 struct rtl_wow_pattern rtl_pattern;
325 u8 *pattern_os, *mask_os; 325 const u8 *pattern_os, *mask_os;
326 u8 mask[MAX_WOL_BIT_MASK_SIZE] = {0}; 326 u8 mask[MAX_WOL_BIT_MASK_SIZE] = {0};
327 u8 content[MAX_WOL_PATTERN_SIZE] = {0}; 327 u8 content[MAX_WOL_PATTERN_SIZE] = {0};
328 u8 broadcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 328 u8 broadcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -1561,7 +1561,7 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
1561 * before switch channle or power save, or tx buffer packet 1561 * before switch channle or power save, or tx buffer packet
1562 * maybe send after offchannel or rf sleep, this may cause 1562 * maybe send after offchannel or rf sleep, this may cause
1563 * dis-association by AP */ 1563 * dis-association by AP */
1564static void rtl_op_flush(struct ieee80211_hw *hw, 1564static void rtl_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1565 u32 queues, bool drop) 1565 u32 queues, bool drop)
1566{ 1566{
1567 struct rtl_priv *rtlpriv = rtl_priv(hw); 1567 struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index 0c9f5cebfb42..f0839f6a9345 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -1227,7 +1227,7 @@ static int cfg80211_rtw_set_default_key(struct wiphy *wiphy,
1227 1227
1228static int cfg80211_rtw_get_station(struct wiphy *wiphy, 1228static int cfg80211_rtw_get_station(struct wiphy *wiphy,
1229 struct net_device *ndev, 1229 struct net_device *ndev,
1230 u8 *mac, struct station_info *sinfo) 1230 const u8 *mac, struct station_info *sinfo)
1231{ 1231{
1232 int ret = 0; 1232 int ret = 0;
1233 struct rtw_adapter *padapter = wiphy_to_adapter(wiphy); 1233 struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
@@ -2903,7 +2903,7 @@ static int cfg80211_rtw_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
2903} 2903}
2904 2904
2905static int cfg80211_rtw_add_station(struct wiphy *wiphy, 2905static int cfg80211_rtw_add_station(struct wiphy *wiphy,
2906 struct net_device *ndev, u8 *mac, 2906 struct net_device *ndev, const u8 *mac,
2907 struct station_parameters *params) 2907 struct station_parameters *params)
2908{ 2908{
2909 DBG_8723A("%s(%s)\n", __func__, ndev->name); 2909 DBG_8723A("%s(%s)\n", __func__, ndev->name);
@@ -2912,7 +2912,7 @@ static int cfg80211_rtw_add_station(struct wiphy *wiphy,
2912} 2912}
2913 2913
2914static int cfg80211_rtw_del_station(struct wiphy *wiphy, 2914static int cfg80211_rtw_del_station(struct wiphy *wiphy,
2915 struct net_device *ndev, u8 *mac) 2915 struct net_device *ndev, const u8 *mac)
2916{ 2916{
2917 int ret = 0; 2917 int ret = 0;
2918 struct list_head *phead, *plist, *ptmp; 2918 struct list_head *phead, *plist, *ptmp;
@@ -2988,7 +2988,7 @@ static int cfg80211_rtw_del_station(struct wiphy *wiphy,
2988} 2988}
2989 2989
2990static int cfg80211_rtw_change_station(struct wiphy *wiphy, 2990static int cfg80211_rtw_change_station(struct wiphy *wiphy,
2991 struct net_device *ndev, u8 *mac, 2991 struct net_device *ndev, const u8 *mac,
2992 struct station_parameters *params) 2992 struct station_parameters *params)
2993{ 2993{
2994 DBG_8723A("%s(%s)\n", __func__, ndev->name); 2994 DBG_8723A("%s(%s)\n", __func__, ndev->name);
diff --git a/drivers/staging/rtl8821ae/core.c b/drivers/staging/rtl8821ae/core.c
index 9a37408708f4..046be2ce9c1a 100644
--- a/drivers/staging/rtl8821ae/core.c
+++ b/drivers/staging/rtl8821ae/core.c
@@ -1278,7 +1278,9 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
1278 * before switch channel or power save, or tx buffer packet 1278 * before switch channel or power save, or tx buffer packet
1279 * maybe send after offchannel or rf sleep, this may cause 1279 * maybe send after offchannel or rf sleep, this may cause
1280 * dis-association by AP */ 1280 * dis-association by AP */
1281static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 1281static void rtl_op_flush(struct ieee80211_hw *hw,
1282 struct ieee80211_vif *vif,
1283 u32 queues, bool drop)
1282{ 1284{
1283 struct rtl_priv *rtlpriv = rtl_priv(hw); 1285 struct rtl_priv *rtlpriv = rtl_priv(hw);
1284 1286
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index f76f95c29617..723319ee08f3 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -84,7 +84,7 @@ static int prism2_domibset_uint32(wlandevice_t *wlandev, u32 did, u32 data)
84} 84}
85 85
86static int prism2_domibset_pstr32(wlandevice_t *wlandev, 86static int prism2_domibset_pstr32(wlandevice_t *wlandev,
87 u32 did, u8 len, u8 *data) 87 u32 did, u8 len, const u8 *data)
88{ 88{
89 struct p80211msg_dot11req_mibset msg; 89 struct p80211msg_dot11req_mibset msg;
90 p80211item_pstr32_t *mibitem = 90 p80211item_pstr32_t *mibitem =
@@ -298,7 +298,7 @@ static int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev,
298 298
299 299
300static int prism2_get_station(struct wiphy *wiphy, struct net_device *dev, 300static int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
301 u8 *mac, struct station_info *sinfo) 301 const u8 *mac, struct station_info *sinfo)
302{ 302{
303 wlandevice_t *wlandev = dev->ml_priv; 303 wlandevice_t *wlandev = dev->ml_priv;
304 struct p80211msg_lnxreq_commsquality quality; 304 struct p80211msg_lnxreq_commsquality quality;
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index fe0880d0873e..3d78a8844e43 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -793,7 +793,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
793 793
794 net->netdev_ops = &eth_netdev_ops; 794 net->netdev_ops = &eth_netdev_ops;
795 795
796 SET_ETHTOOL_OPS(net, &ops); 796 net->ethtool_ops = &ops;
797 797
798 dev->gadget = g; 798 dev->gadget = g;
799 SET_NETDEV_DEV(net, &g->dev); 799 SET_NETDEV_DEV(net, &g->dev);
@@ -850,7 +850,7 @@ struct net_device *gether_setup_name_default(const char *netname)
850 850
851 net->netdev_ops = &eth_netdev_ops; 851 net->netdev_ops = &eth_netdev_ops;
852 852
853 SET_ETHTOOL_OPS(net, &ops); 853 net->ethtool_ops = &ops;
854 SET_NETDEV_DEVTYPE(net, &gadget_type); 854 SET_NETDEV_DEVTYPE(net, &gadget_type);
855 855
856 return net; 856 return net;
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
index 8598f8eacb20..a495a959e8a7 100644
--- a/include/linux/ath9k_platform.h
+++ b/include/linux/ath9k_platform.h
@@ -36,6 +36,8 @@ struct ath9k_platform_data {
36 36
37 int (*get_mac_revision)(void); 37 int (*get_mac_revision)(void);
38 int (*external_reset)(void); 38 int (*external_reset)(void);
39
40 bool use_eeprom;
39}; 41};
40 42
41#endif /* _LINUX_ATH9K_PLATFORM_H */ 43#endif /* _LINUX_ATH9K_PLATFORM_H */
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index 78c6c52073ad..a0875001b13c 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -10,8 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#ifndef CAN_CORE_H 13#ifndef _CAN_CORE_H
14#define CAN_CORE_H 14#define _CAN_CORE_H
15 15
16#include <linux/can.h> 16#include <linux/can.h>
17#include <linux/skbuff.h> 17#include <linux/skbuff.h>
@@ -58,4 +58,4 @@ extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
58extern int can_send(struct sk_buff *skb, int loop); 58extern int can_send(struct sk_buff *skb, int loop);
59extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 59extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
60 60
61#endif /* CAN_CORE_H */ 61#endif /* !_CAN_CORE_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 3ce5e526525f..6992afc6ba7f 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -10,8 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#ifndef CAN_DEV_H 13#ifndef _CAN_DEV_H
14#define CAN_DEV_H 14#define _CAN_DEV_H
15 15
16#include <linux/can.h> 16#include <linux/can.h>
17#include <linux/can/netlink.h> 17#include <linux/can/netlink.h>
@@ -132,4 +132,4 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
132struct sk_buff *alloc_can_err_skb(struct net_device *dev, 132struct sk_buff *alloc_can_err_skb(struct net_device *dev,
133 struct can_frame **cf); 133 struct can_frame **cf);
134 134
135#endif /* CAN_DEV_H */ 135#endif /* !_CAN_DEV_H */
diff --git a/include/linux/can/led.h b/include/linux/can/led.h
index 9c1167baf273..e0475c5cbb92 100644
--- a/include/linux/can/led.h
+++ b/include/linux/can/led.h
@@ -6,8 +6,8 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9#ifndef CAN_LED_H 9#ifndef _CAN_LED_H
10#define CAN_LED_H 10#define _CAN_LED_H
11 11
12#include <linux/if.h> 12#include <linux/if.h>
13#include <linux/leds.h> 13#include <linux/leds.h>
@@ -48,4 +48,4 @@ static inline void can_led_notifier_exit(void)
48 48
49#endif 49#endif
50 50
51#endif 51#endif /* !_CAN_LED_H */
diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h
index 7702641f87ee..78b2d44f04cf 100644
--- a/include/linux/can/platform/cc770.h
+++ b/include/linux/can/platform/cc770.h
@@ -1,5 +1,5 @@
1#ifndef _CAN_PLATFORM_CC770_H_ 1#ifndef _CAN_PLATFORM_CC770_H
2#define _CAN_PLATFORM_CC770_H_ 2#define _CAN_PLATFORM_CC770_H
3 3
4/* CPU Interface Register (0x02) */ 4/* CPU Interface Register (0x02) */
5#define CPUIF_CEN 0x01 /* Clock Out Enable */ 5#define CPUIF_CEN 0x01 /* Clock Out Enable */
@@ -30,4 +30,4 @@ struct cc770_platform_data {
30 u8 bcr; /* Bus Configuration Register */ 30 u8 bcr; /* Bus Configuration Register */
31}; 31};
32 32
33#endif /* !_CAN_PLATFORM_CC770_H_ */ 33#endif /* !_CAN_PLATFORM_CC770_H */
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h
index dc029dba7a03..d44fcae274ff 100644
--- a/include/linux/can/platform/mcp251x.h
+++ b/include/linux/can/platform/mcp251x.h
@@ -1,5 +1,5 @@
1#ifndef __CAN_PLATFORM_MCP251X_H__ 1#ifndef _CAN_PLATFORM_MCP251X_H
2#define __CAN_PLATFORM_MCP251X_H__ 2#define _CAN_PLATFORM_MCP251X_H
3 3
4/* 4/*
5 * 5 *
@@ -18,4 +18,4 @@ struct mcp251x_platform_data {
18 unsigned long oscillator_frequency; 18 unsigned long oscillator_frequency;
19}; 19};
20 20
21#endif /* __CAN_PLATFORM_MCP251X_H__ */ 21#endif /* !_CAN_PLATFORM_MCP251X_H */
diff --git a/include/linux/can/platform/rcar_can.h b/include/linux/can/platform/rcar_can.h
new file mode 100644
index 000000000000..0f4a2f3df504
--- /dev/null
+++ b/include/linux/can/platform/rcar_can.h
@@ -0,0 +1,17 @@
1#ifndef _CAN_PLATFORM_RCAR_CAN_H_
2#define _CAN_PLATFORM_RCAR_CAN_H_
3
4#include <linux/types.h>
5
6/* Clock Select Register settings */
7enum CLKR {
8 CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */
9 CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */
10 CLKR_CLKEXT = 3 /* Externally input clock */
11};
12
13struct rcar_can_platform_data {
14 enum CLKR clock_select; /* Clock source select */
15};
16
17#endif /* !_CAN_PLATFORM_RCAR_CAN_H_ */
diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h
index 96f8fcc78d78..93570b61ec6c 100644
--- a/include/linux/can/platform/sja1000.h
+++ b/include/linux/can/platform/sja1000.h
@@ -1,5 +1,5 @@
1#ifndef _CAN_PLATFORM_SJA1000_H_ 1#ifndef _CAN_PLATFORM_SJA1000_H
2#define _CAN_PLATFORM_SJA1000_H_ 2#define _CAN_PLATFORM_SJA1000_H
3 3
4/* clock divider register */ 4/* clock divider register */
5#define CDR_CLKOUT_MASK 0x07 5#define CDR_CLKOUT_MASK 0x07
@@ -32,4 +32,4 @@ struct sja1000_platform_data {
32 u8 cdr; /* clock divider register */ 32 u8 cdr; /* clock divider register */
33}; 33};
34 34
35#endif /* !_CAN_PLATFORM_SJA1000_H_ */ 35#endif /* !_CAN_PLATFORM_SJA1000_H */
diff --git a/include/linux/can/platform/ti_hecc.h b/include/linux/can/platform/ti_hecc.h
index af17cb3f7a84..a52f47ca6c8a 100644
--- a/include/linux/can/platform/ti_hecc.h
+++ b/include/linux/can/platform/ti_hecc.h
@@ -1,5 +1,5 @@
1#ifndef __CAN_PLATFORM_TI_HECC_H__ 1#ifndef _CAN_PLATFORM_TI_HECC_H
2#define __CAN_PLATFORM_TI_HECC_H__ 2#define _CAN_PLATFORM_TI_HECC_H
3 3
4/* 4/*
5 * TI HECC (High End CAN Controller) driver platform header 5 * TI HECC (High End CAN Controller) driver platform header
@@ -41,4 +41,4 @@ struct ti_hecc_platform_data {
41 u32 version; 41 u32 version;
42 void (*transceiver_switch) (int); 42 void (*transceiver_switch) (int);
43}; 43};
44#endif 44#endif /* !_CAN_PLATFORM_TI_HECC_H */
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index f9bbbb472663..cc00d15c6107 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -7,8 +7,8 @@
7 * 7 *
8 */ 8 */
9 9
10#ifndef CAN_SKB_H 10#ifndef _CAN_SKB_H
11#define CAN_SKB_H 11#define _CAN_SKB_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
@@ -80,4 +80,4 @@ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
80 return skb; 80 return skb;
81} 81}
82 82
83#endif /* CAN_SKB_H */ 83#endif /* !_CAN_SKB_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 3557ea7b2049..2997af6d2ccd 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -142,6 +142,13 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask,
142 return 1; 142 return 1;
143} 143}
144 144
145static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
146{
147 set_bit(0, cpumask_bits(dstp));
148
149 return 0;
150}
151
145#define for_each_cpu(cpu, mask) \ 152#define for_each_cpu(cpu, mask) \
146 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 153 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
147#define for_each_cpu_not(cpu, mask) \ 154#define for_each_cpu_not(cpu, mask) \
@@ -192,6 +199,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
192 199
193int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); 200int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
194int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); 201int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
202int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
195 203
196/** 204/**
197 * for_each_cpu - iterate over every cpu in a mask 205 * for_each_cpu - iterate over every cpu in a mask
diff --git a/include/linux/crc7.h b/include/linux/crc7.h
index 1786e772d5c6..d590765106f3 100644
--- a/include/linux/crc7.h
+++ b/include/linux/crc7.h
@@ -2,13 +2,13 @@
2#define _LINUX_CRC7_H 2#define _LINUX_CRC7_H
3#include <linux/types.h> 3#include <linux/types.h>
4 4
5extern const u8 crc7_syndrome_table[256]; 5extern const u8 crc7_be_syndrome_table[256];
6 6
7static inline u8 crc7_byte(u8 crc, u8 data) 7static inline u8 crc7_be_byte(u8 crc, u8 data)
8{ 8{
9 return crc7_syndrome_table[(crc << 1) ^ data]; 9 return crc7_be_syndrome_table[crc ^ data];
10} 10}
11 11
12extern u8 crc7(u8 crc, const u8 *buffer, size_t len); 12extern u8 crc7_be(u8 crc, const u8 *buffer, size_t len);
13 13
14#endif 14#endif
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 0a114d05f68d..e658229fee39 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -154,13 +154,20 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
154 * @reset: Reset (part of) the device, as specified by a bitmask of 154 * @reset: Reset (part of) the device, as specified by a bitmask of
155 * flags from &enum ethtool_reset_flags. Returns a negative 155 * flags from &enum ethtool_reset_flags. Returns a negative
156 * error code or zero. 156 * error code or zero.
157 * @get_rxfh_key_size: Get the size of the RX flow hash key.
158 * Returns zero if not supported for this specific device.
157 * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. 159 * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
158 * Returns zero if not supported for this specific device. 160 * Returns zero if not supported for this specific device.
159 * @get_rxfh_indir: Get the contents of the RX flow hash indirection table. 161 * @get_rxfh: Get the contents of the RX flow hash indirection table and hash
160 * Will not be called if @get_rxfh_indir_size returns zero. 162 * key.
163 * Will only be called if one or both of @get_rxfh_indir_size and
164 * @get_rxfh_key_size are implemented and return non-zero.
161 * Returns a negative error code or zero. 165 * Returns a negative error code or zero.
162 * @set_rxfh_indir: Set the contents of the RX flow hash indirection table. 166 * @set_rxfh: Set the contents of the RX flow hash indirection table and/or
163 * Will not be called if @get_rxfh_indir_size returns zero. 167 * hash key. In case only the indirection table or hash key is to be
168 * changed, the other argument will be %NULL.
169 * Will only be called if one or both of @get_rxfh_indir_size and
170 * @get_rxfh_key_size are implemented and return non-zero.
164 * Returns a negative error code or zero. 171 * Returns a negative error code or zero.
165 * @get_channels: Get number of channels. 172 * @get_channels: Get number of channels.
166 * @set_channels: Set number of channels. Returns a negative error code or 173 * @set_channels: Set number of channels. Returns a negative error code or
@@ -232,9 +239,11 @@ struct ethtool_ops {
232 int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); 239 int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
233 int (*flash_device)(struct net_device *, struct ethtool_flash *); 240 int (*flash_device)(struct net_device *, struct ethtool_flash *);
234 int (*reset)(struct net_device *, u32 *); 241 int (*reset)(struct net_device *, u32 *);
242 u32 (*get_rxfh_key_size)(struct net_device *);
235 u32 (*get_rxfh_indir_size)(struct net_device *); 243 u32 (*get_rxfh_indir_size)(struct net_device *);
236 int (*get_rxfh_indir)(struct net_device *, u32 *); 244 int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key);
237 int (*set_rxfh_indir)(struct net_device *, const u32 *); 245 int (*set_rxfh)(struct net_device *, const u32 *indir,
246 const u8 *key);
238 void (*get_channels)(struct net_device *, struct ethtool_channels *); 247 void (*get_channels)(struct net_device *, struct ethtool_channels *);
239 int (*set_channels)(struct net_device *, struct ethtool_channels *); 248 int (*set_channels)(struct net_device *, struct ethtool_channels *);
240 int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); 249 int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 024fd03e5d18..a7e3c48d73a7 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -37,21 +37,270 @@
37#define BPF_CALL 0x80 /* function call */ 37#define BPF_CALL 0x80 /* function call */
38#define BPF_EXIT 0x90 /* function return */ 38#define BPF_EXIT 0x90 /* function return */
39 39
40/* Register numbers */
41enum {
42 BPF_REG_0 = 0,
43 BPF_REG_1,
44 BPF_REG_2,
45 BPF_REG_3,
46 BPF_REG_4,
47 BPF_REG_5,
48 BPF_REG_6,
49 BPF_REG_7,
50 BPF_REG_8,
51 BPF_REG_9,
52 BPF_REG_10,
53 __MAX_BPF_REG,
54};
55
40/* BPF has 10 general purpose 64-bit registers and stack frame. */ 56/* BPF has 10 general purpose 64-bit registers and stack frame. */
41#define MAX_BPF_REG 11 57#define MAX_BPF_REG __MAX_BPF_REG
58
59/* ArgX, context and stack frame pointer register positions. Note,
60 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
61 * calls in BPF_CALL instruction.
62 */
63#define BPF_REG_ARG1 BPF_REG_1
64#define BPF_REG_ARG2 BPF_REG_2
65#define BPF_REG_ARG3 BPF_REG_3
66#define BPF_REG_ARG4 BPF_REG_4
67#define BPF_REG_ARG5 BPF_REG_5
68#define BPF_REG_CTX BPF_REG_6
69#define BPF_REG_FP BPF_REG_10
70
71/* Additional register mappings for converted user programs. */
72#define BPF_REG_A BPF_REG_0
73#define BPF_REG_X BPF_REG_7
74#define BPF_REG_TMP BPF_REG_8
42 75
43/* BPF program can access up to 512 bytes of stack space. */ 76/* BPF program can access up to 512 bytes of stack space. */
44#define MAX_BPF_STACK 512 77#define MAX_BPF_STACK 512
45 78
46/* Arg1, context and stack frame pointer register positions. */ 79/* Helper macros for filter block array initializers. */
47#define ARG1_REG 1 80
48#define CTX_REG 6 81/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
49#define FP_REG 10 82
83#define BPF_ALU64_REG(OP, DST, SRC) \
84 ((struct sock_filter_int) { \
85 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
86 .dst_reg = DST, \
87 .src_reg = SRC, \
88 .off = 0, \
89 .imm = 0 })
90
91#define BPF_ALU32_REG(OP, DST, SRC) \
92 ((struct sock_filter_int) { \
93 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
94 .dst_reg = DST, \
95 .src_reg = SRC, \
96 .off = 0, \
97 .imm = 0 })
98
99/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
100
101#define BPF_ALU64_IMM(OP, DST, IMM) \
102 ((struct sock_filter_int) { \
103 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
104 .dst_reg = DST, \
105 .src_reg = 0, \
106 .off = 0, \
107 .imm = IMM })
108
109#define BPF_ALU32_IMM(OP, DST, IMM) \
110 ((struct sock_filter_int) { \
111 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
112 .dst_reg = DST, \
113 .src_reg = 0, \
114 .off = 0, \
115 .imm = IMM })
116
117/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
118
119#define BPF_ENDIAN(TYPE, DST, LEN) \
120 ((struct sock_filter_int) { \
121 .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
122 .dst_reg = DST, \
123 .src_reg = 0, \
124 .off = 0, \
125 .imm = LEN })
126
127/* Short form of mov, dst_reg = src_reg */
128
129#define BPF_MOV64_REG(DST, SRC) \
130 ((struct sock_filter_int) { \
131 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
132 .dst_reg = DST, \
133 .src_reg = SRC, \
134 .off = 0, \
135 .imm = 0 })
136
137#define BPF_MOV32_REG(DST, SRC) \
138 ((struct sock_filter_int) { \
139 .code = BPF_ALU | BPF_MOV | BPF_X, \
140 .dst_reg = DST, \
141 .src_reg = SRC, \
142 .off = 0, \
143 .imm = 0 })
144
145/* Short form of mov, dst_reg = imm32 */
146
147#define BPF_MOV64_IMM(DST, IMM) \
148 ((struct sock_filter_int) { \
149 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
150 .dst_reg = DST, \
151 .src_reg = 0, \
152 .off = 0, \
153 .imm = IMM })
154
155#define BPF_MOV32_IMM(DST, IMM) \
156 ((struct sock_filter_int) { \
157 .code = BPF_ALU | BPF_MOV | BPF_K, \
158 .dst_reg = DST, \
159 .src_reg = 0, \
160 .off = 0, \
161 .imm = IMM })
162
163/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
164
165#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
166 ((struct sock_filter_int) { \
167 .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
168 .dst_reg = DST, \
169 .src_reg = SRC, \
170 .off = 0, \
171 .imm = IMM })
172
173#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
174 ((struct sock_filter_int) { \
175 .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
176 .dst_reg = DST, \
177 .src_reg = SRC, \
178 .off = 0, \
179 .imm = IMM })
180
181/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
182
183#define BPF_LD_ABS(SIZE, IMM) \
184 ((struct sock_filter_int) { \
185 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
186 .dst_reg = 0, \
187 .src_reg = 0, \
188 .off = 0, \
189 .imm = IMM })
190
191/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
192
193#define BPF_LD_IND(SIZE, SRC, IMM) \
194 ((struct sock_filter_int) { \
195 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
196 .dst_reg = 0, \
197 .src_reg = SRC, \
198 .off = 0, \
199 .imm = IMM })
200
201/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
202
203#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
204 ((struct sock_filter_int) { \
205 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
206 .dst_reg = DST, \
207 .src_reg = SRC, \
208 .off = OFF, \
209 .imm = 0 })
210
211/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
212
213#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
214 ((struct sock_filter_int) { \
215 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
216 .dst_reg = DST, \
217 .src_reg = SRC, \
218 .off = OFF, \
219 .imm = 0 })
220
221/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
222
223#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
224 ((struct sock_filter_int) { \
225 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
226 .dst_reg = DST, \
227 .src_reg = 0, \
228 .off = OFF, \
229 .imm = IMM })
230
231/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
232
233#define BPF_JMP_REG(OP, DST, SRC, OFF) \
234 ((struct sock_filter_int) { \
235 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
236 .dst_reg = DST, \
237 .src_reg = SRC, \
238 .off = OFF, \
239 .imm = 0 })
240
241/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
242
243#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
244 ((struct sock_filter_int) { \
245 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
246 .dst_reg = DST, \
247 .src_reg = 0, \
248 .off = OFF, \
249 .imm = IMM })
250
251/* Function call */
252
253#define BPF_EMIT_CALL(FUNC) \
254 ((struct sock_filter_int) { \
255 .code = BPF_JMP | BPF_CALL, \
256 .dst_reg = 0, \
257 .src_reg = 0, \
258 .off = 0, \
259 .imm = ((FUNC) - __bpf_call_base) })
260
261/* Raw code statement block */
262
263#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
264 ((struct sock_filter_int) { \
265 .code = CODE, \
266 .dst_reg = DST, \
267 .src_reg = SRC, \
268 .off = OFF, \
269 .imm = IMM })
270
271/* Program exit */
272
273#define BPF_EXIT_INSN() \
274 ((struct sock_filter_int) { \
275 .code = BPF_JMP | BPF_EXIT, \
276 .dst_reg = 0, \
277 .src_reg = 0, \
278 .off = 0, \
279 .imm = 0 })
280
281#define bytes_to_bpf_size(bytes) \
282({ \
283 int bpf_size = -EINVAL; \
284 \
285 if (bytes == sizeof(u8)) \
286 bpf_size = BPF_B; \
287 else if (bytes == sizeof(u16)) \
288 bpf_size = BPF_H; \
289 else if (bytes == sizeof(u32)) \
290 bpf_size = BPF_W; \
291 else if (bytes == sizeof(u64)) \
292 bpf_size = BPF_DW; \
293 \
294 bpf_size; \
295})
296
297/* Macro to invoke filter function. */
298#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
50 299
51struct sock_filter_int { 300struct sock_filter_int {
52 __u8 code; /* opcode */ 301 __u8 code; /* opcode */
53 __u8 a_reg:4; /* dest register */ 302 __u8 dst_reg:4; /* dest register */
54 __u8 x_reg:4; /* source register */ 303 __u8 src_reg:4; /* source register */
55 __s16 off; /* signed offset */ 304 __s16 off; /* signed offset */
56 __s32 imm; /* signed immediate constant */ 305 __s32 imm; /* signed immediate constant */
57}; 306};
@@ -97,21 +346,16 @@ static inline unsigned int sk_filter_size(unsigned int proglen)
97#define sk_filter_proglen(fprog) \ 346#define sk_filter_proglen(fprog) \
98 (fprog->len * sizeof(fprog->filter[0])) 347 (fprog->len * sizeof(fprog->filter[0]))
99 348
100#define SK_RUN_FILTER(filter, ctx) \
101 (*filter->bpf_func)(ctx, filter->insnsi)
102
103int sk_filter(struct sock *sk, struct sk_buff *skb); 349int sk_filter(struct sock *sk, struct sk_buff *skb);
104 350
105u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx, 351void sk_filter_select_runtime(struct sk_filter *fp);
106 const struct sock_filter_int *insni); 352void sk_filter_free(struct sk_filter *fp);
107u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
108 const struct sock_filter_int *insni);
109 353
110int sk_convert_filter(struct sock_filter *prog, int len, 354int sk_convert_filter(struct sock_filter *prog, int len,
111 struct sock_filter_int *new_prog, int *new_len); 355 struct sock_filter_int *new_prog, int *new_len);
112 356
113int sk_unattached_filter_create(struct sk_filter **pfp, 357int sk_unattached_filter_create(struct sk_filter **pfp,
114 struct sock_fprog *fprog); 358 struct sock_fprog_kern *fprog);
115void sk_unattached_filter_destroy(struct sk_filter *fp); 359void sk_unattached_filter_destroy(struct sk_filter *fp);
116 360
117int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 361int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
@@ -120,11 +364,48 @@ int sk_detach_filter(struct sock *sk);
120int sk_chk_filter(struct sock_filter *filter, unsigned int flen); 364int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
121int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, 365int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
122 unsigned int len); 366 unsigned int len);
123void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
124 367
125void sk_filter_charge(struct sock *sk, struct sk_filter *fp); 368void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
126void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); 369void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
127 370
371u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
372void bpf_int_jit_compile(struct sk_filter *fp);
373
374#define BPF_ANC BIT(15)
375
376static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
377{
378 BUG_ON(ftest->code & BPF_ANC);
379
380 switch (ftest->code) {
381 case BPF_LD | BPF_W | BPF_ABS:
382 case BPF_LD | BPF_H | BPF_ABS:
383 case BPF_LD | BPF_B | BPF_ABS:
384#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
385 return BPF_ANC | SKF_AD_##CODE
386 switch (ftest->k) {
387 BPF_ANCILLARY(PROTOCOL);
388 BPF_ANCILLARY(PKTTYPE);
389 BPF_ANCILLARY(IFINDEX);
390 BPF_ANCILLARY(NLATTR);
391 BPF_ANCILLARY(NLATTR_NEST);
392 BPF_ANCILLARY(MARK);
393 BPF_ANCILLARY(QUEUE);
394 BPF_ANCILLARY(HATYPE);
395 BPF_ANCILLARY(RXHASH);
396 BPF_ANCILLARY(CPU);
397 BPF_ANCILLARY(ALU_XOR_X);
398 BPF_ANCILLARY(VLAN_TAG);
399 BPF_ANCILLARY(VLAN_TAG_PRESENT);
400 BPF_ANCILLARY(PAY_OFFSET);
401 BPF_ANCILLARY(RANDOM);
402 }
403 /* Fallthrough. */
404 default:
405 return ftest->code;
406 }
407}
408
128#ifdef CONFIG_BPF_JIT 409#ifdef CONFIG_BPF_JIT
129#include <stdarg.h> 410#include <stdarg.h>
130#include <linux/linkage.h> 411#include <linux/linkage.h>
@@ -144,85 +425,20 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
144} 425}
145#else 426#else
146#include <linux/slab.h> 427#include <linux/slab.h>
428
147static inline void bpf_jit_compile(struct sk_filter *fp) 429static inline void bpf_jit_compile(struct sk_filter *fp)
148{ 430{
149} 431}
432
150static inline void bpf_jit_free(struct sk_filter *fp) 433static inline void bpf_jit_free(struct sk_filter *fp)
151{ 434{
152 kfree(fp); 435 kfree(fp);
153} 436}
154#endif 437#endif /* CONFIG_BPF_JIT */
155 438
156static inline int bpf_tell_extensions(void) 439static inline int bpf_tell_extensions(void)
157{ 440{
158 return SKF_AD_MAX; 441 return SKF_AD_MAX;
159} 442}
160 443
161enum {
162 BPF_S_RET_K = 1,
163 BPF_S_RET_A,
164 BPF_S_ALU_ADD_K,
165 BPF_S_ALU_ADD_X,
166 BPF_S_ALU_SUB_K,
167 BPF_S_ALU_SUB_X,
168 BPF_S_ALU_MUL_K,
169 BPF_S_ALU_MUL_X,
170 BPF_S_ALU_DIV_X,
171 BPF_S_ALU_MOD_K,
172 BPF_S_ALU_MOD_X,
173 BPF_S_ALU_AND_K,
174 BPF_S_ALU_AND_X,
175 BPF_S_ALU_OR_K,
176 BPF_S_ALU_OR_X,
177 BPF_S_ALU_XOR_K,
178 BPF_S_ALU_XOR_X,
179 BPF_S_ALU_LSH_K,
180 BPF_S_ALU_LSH_X,
181 BPF_S_ALU_RSH_K,
182 BPF_S_ALU_RSH_X,
183 BPF_S_ALU_NEG,
184 BPF_S_LD_W_ABS,
185 BPF_S_LD_H_ABS,
186 BPF_S_LD_B_ABS,
187 BPF_S_LD_W_LEN,
188 BPF_S_LD_W_IND,
189 BPF_S_LD_H_IND,
190 BPF_S_LD_B_IND,
191 BPF_S_LD_IMM,
192 BPF_S_LDX_W_LEN,
193 BPF_S_LDX_B_MSH,
194 BPF_S_LDX_IMM,
195 BPF_S_MISC_TAX,
196 BPF_S_MISC_TXA,
197 BPF_S_ALU_DIV_K,
198 BPF_S_LD_MEM,
199 BPF_S_LDX_MEM,
200 BPF_S_ST,
201 BPF_S_STX,
202 BPF_S_JMP_JA,
203 BPF_S_JMP_JEQ_K,
204 BPF_S_JMP_JEQ_X,
205 BPF_S_JMP_JGE_K,
206 BPF_S_JMP_JGE_X,
207 BPF_S_JMP_JGT_K,
208 BPF_S_JMP_JGT_X,
209 BPF_S_JMP_JSET_K,
210 BPF_S_JMP_JSET_X,
211 /* Ancillary data */
212 BPF_S_ANC_PROTOCOL,
213 BPF_S_ANC_PKTTYPE,
214 BPF_S_ANC_IFINDEX,
215 BPF_S_ANC_NLATTR,
216 BPF_S_ANC_NLATTR_NEST,
217 BPF_S_ANC_MARK,
218 BPF_S_ANC_QUEUE,
219 BPF_S_ANC_HATYPE,
220 BPF_S_ANC_RXHASH,
221 BPF_S_ANC_CPU,
222 BPF_S_ANC_ALU_XOR_X,
223 BPF_S_ANC_VLAN_TAG,
224 BPF_S_ANC_VLAN_TAG_PRESENT,
225 BPF_S_ANC_PAY_OFFSET,
226};
227
228#endif /* __LINUX_FILTER_H__ */ 444#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index f194ccb8539c..6bff13f74050 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1711,6 +1711,7 @@ enum ieee80211_eid {
1711 WLAN_EID_RRM_ENABLED_CAPABILITIES = 70, 1711 WLAN_EID_RRM_ENABLED_CAPABILITIES = 70,
1712 WLAN_EID_MULTIPLE_BSSID = 71, 1712 WLAN_EID_MULTIPLE_BSSID = 71,
1713 WLAN_EID_BSS_COEX_2040 = 72, 1713 WLAN_EID_BSS_COEX_2040 = 72,
1714 WLAN_EID_BSS_INTOLERANT_CHL_REPORT = 73,
1714 WLAN_EID_OVERLAP_BSS_SCAN_PARAM = 74, 1715 WLAN_EID_OVERLAP_BSS_SCAN_PARAM = 74,
1715 WLAN_EID_RIC_DESCRIPTOR = 75, 1716 WLAN_EID_RIC_DESCRIPTOR = 75,
1716 WLAN_EID_MMIE = 76, 1717 WLAN_EID_MMIE = 76,
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 1085ffeef956..fd22789d7b2e 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -16,9 +16,28 @@
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17#include <uapi/linux/if_bridge.h> 17#include <uapi/linux/if_bridge.h>
18 18
19struct br_ip {
20 union {
21 __be32 ip4;
22#if IS_ENABLED(CONFIG_IPV6)
23 struct in6_addr ip6;
24#endif
25 } u;
26 __be16 proto;
27 __u16 vid;
28};
29
30struct br_ip_list {
31 struct list_head list;
32 struct br_ip addr;
33};
34
19extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); 35extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
20 36
21typedef int br_should_route_hook_t(struct sk_buff *skb); 37typedef int br_should_route_hook_t(struct sk_buff *skb);
22extern br_should_route_hook_t __rcu *br_should_route_hook; 38extern br_should_route_hook_t __rcu *br_should_route_hook;
39int br_multicast_list_adjacent(struct net_device *dev,
40 struct list_head *br_ip_list);
41bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
23 42
24#endif 43#endif
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index a86784dec3d3..119130e9298b 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -10,8 +10,9 @@ struct ifla_vf_info {
10 __u8 mac[32]; 10 __u8 mac[32];
11 __u32 vlan; 11 __u32 vlan;
12 __u32 qos; 12 __u32 qos;
13 __u32 tx_rate;
14 __u32 spoofchk; 13 __u32 spoofchk;
15 __u32 linkstate; 14 __u32 linkstate;
15 __u32 min_tx_rate;
16 __u32 max_tx_rate;
16}; 17};
17#endif /* _LINUX_IF_LINK_H */ 18#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index a9a53b12397b..6b2c7cf352a5 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -57,6 +57,9 @@ struct macvlan_dev {
57 netdev_features_t tap_features; 57 netdev_features_t tap_features;
58 int minor; 58 int minor;
59 int nest_level; 59 int nest_level;
60#ifdef CONFIG_NET_POLL_CONTROLLER
61 struct netpoll *netpoll;
62#endif
60}; 63};
61 64
62static inline void macvlan_count_rx(const struct macvlan_dev *vlan, 65static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index b2acc4a1b13c..4967916fe4ac 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -106,7 +106,7 @@ struct vlan_pcpu_stats {
106 106
107#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 107#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
108 108
109extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, 109extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
110 __be16 vlan_proto, u16 vlan_id); 110 __be16 vlan_proto, u16 vlan_id);
111extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); 111extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
112extern u16 vlan_dev_vlan_id(const struct net_device *dev); 112extern u16 vlan_dev_vlan_id(const struct net_device *dev);
@@ -206,7 +206,7 @@ static inline int vlan_get_encap_level(struct net_device *dev)
206} 206}
207#else 207#else
208static inline struct net_device * 208static inline struct net_device *
209__vlan_find_dev_deep(struct net_device *real_dev, 209__vlan_find_dev_deep_rcu(struct net_device *real_dev,
210 __be16 vlan_proto, u16 vlan_id) 210 __be16 vlan_proto, u16 vlan_id)
211{ 211{
212 return NULL; 212 return NULL;
diff --git a/include/linux/isdn/capiutil.h b/include/linux/isdn/capiutil.h
index 5a52f2c94f3f..44bd6046e6e2 100644
--- a/include/linux/isdn/capiutil.h
+++ b/include/linux/isdn/capiutil.h
@@ -164,11 +164,6 @@ unsigned capi_cmsg_header(_cmsg * cmsg, __u16 _ApplId,
164 __u8 _Command, __u8 _Subcommand, 164 __u8 _Command, __u8 _Subcommand,
165 __u16 _Messagenumber, __u32 _Controller); 165 __u16 _Messagenumber, __u32 _Controller);
166 166
167/*
168 * capi_info2str generated a readable string for Capi2.0 reasons.
169 */
170char *capi_info2str(__u16 reason);
171
172/*-----------------------------------------------------------------------*/ 167/*-----------------------------------------------------------------------*/
173 168
174/* 169/*
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 31c0cd1c941a..de9e46e6bcc9 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -304,6 +304,30 @@ static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
304 return 0; 304 return 0;
305} 305}
306 306
307/**
308 * ktime_after - Compare if a ktime_t value is bigger than another one.
309 * @cmp1: comparable1
310 * @cmp2: comparable2
311 *
312 * Return: true if cmp1 happened after cmp2.
313 */
314static inline bool ktime_after(const ktime_t cmp1, const ktime_t cmp2)
315{
316 return ktime_compare(cmp1, cmp2) > 0;
317}
318
319/**
320 * ktime_before - Compare if a ktime_t value is smaller than another one.
321 * @cmp1: comparable1
322 * @cmp2: comparable2
323 *
324 * Return: true if cmp1 happened before cmp2.
325 */
326static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
327{
328 return ktime_compare(cmp1, cmp2) < 0;
329}
330
307static inline s64 ktime_to_us(const ktime_t kt) 331static inline s64 ktime_to_us(const ktime_t kt)
308{ 332{
309 struct timeval tv = ktime_to_timeval(kt); 333 struct timeval tv = ktime_to_timeval(kt);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 3447bead9620..b12f4bbd064c 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -450,7 +450,6 @@ struct mlx4_caps {
450 int reserved_qps_base[MLX4_NUM_QP_REGION]; 450 int reserved_qps_base[MLX4_NUM_QP_REGION];
451 int log_num_macs; 451 int log_num_macs;
452 int log_num_vlans; 452 int log_num_vlans;
453 int log_num_prios;
454 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 453 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
455 u8 supported_type[MLX4_MAX_PORTS + 1]; 454 u8 supported_type[MLX4_MAX_PORTS + 1];
456 u8 suggested_type[MLX4_MAX_PORTS + 1]; 455 u8 suggested_type[MLX4_MAX_PORTS + 1];
@@ -578,6 +577,9 @@ struct mlx4_cq {
578 577
579 u32 cons_index; 578 u32 cons_index;
580 579
580 u16 irq;
581 bool irq_affinity_change;
582
581 __be32 *set_ci_db; 583 __be32 *set_ci_db;
582 __be32 *arm_db; 584 __be32 *arm_db;
583 int arm_sn; 585 int arm_sn;
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index c26d0ec2ef3a..e5a589435e2b 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -42,9 +42,11 @@ enum {
42 NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ 42 NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */
43 NETIF_F_FSO_BIT, /* ... FCoE segmentation */ 43 NETIF_F_FSO_BIT, /* ... FCoE segmentation */
44 NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ 44 NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */
45 NETIF_F_GSO_GRE_CSUM_BIT, /* ... GRE with csum with TSO */
45 NETIF_F_GSO_IPIP_BIT, /* ... IPIP tunnel with TSO */ 46 NETIF_F_GSO_IPIP_BIT, /* ... IPIP tunnel with TSO */
46 NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ 47 NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */
47 NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ 48 NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
49 NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */
48 NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ 50 NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */
49 /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ 51 /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
50 NETIF_F_GSO_MPLS_BIT, 52 NETIF_F_GSO_MPLS_BIT,
@@ -111,6 +113,7 @@ enum {
111#define NETIF_F_RXFCS __NETIF_F(RXFCS) 113#define NETIF_F_RXFCS __NETIF_F(RXFCS)
112#define NETIF_F_RXALL __NETIF_F(RXALL) 114#define NETIF_F_RXALL __NETIF_F(RXALL)
113#define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE) 115#define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE)
116#define NETIF_F_GSO_GRE_CSUM __NETIF_F(GSO_GRE_CSUM)
114#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP) 117#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP)
115#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) 118#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
116#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) 119#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 6c1ae9fd9505..abe3de1db932 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -56,9 +56,6 @@ struct device;
56struct phy_device; 56struct phy_device;
57/* 802.11 specific */ 57/* 802.11 specific */
58struct wireless_dev; 58struct wireless_dev;
59 /* source back-compat hooks */
60#define SET_ETHTOOL_OPS(netdev,ops) \
61 ( (netdev)->ethtool_ops = (ops) )
62 59
63void netdev_set_default_ethtool_ops(struct net_device *dev, 60void netdev_set_default_ethtool_ops(struct net_device *dev,
64 const struct ethtool_ops *ops); 61 const struct ethtool_ops *ops);
@@ -853,7 +850,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
853 * SR-IOV management functions. 850 * SR-IOV management functions.
854 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 851 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
855 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); 852 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
856 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); 853 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
854 * int max_tx_rate);
857 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 855 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
858 * int (*ndo_get_vf_config)(struct net_device *dev, 856 * int (*ndo_get_vf_config)(struct net_device *dev,
859 * int vf, struct ifla_vf_info *ivf); 857 * int vf, struct ifla_vf_info *ivf);
@@ -1047,8 +1045,9 @@ struct net_device_ops {
1047 int queue, u8 *mac); 1045 int queue, u8 *mac);
1048 int (*ndo_set_vf_vlan)(struct net_device *dev, 1046 int (*ndo_set_vf_vlan)(struct net_device *dev,
1049 int queue, u16 vlan, u8 qos); 1047 int queue, u16 vlan, u8 qos);
1050 int (*ndo_set_vf_tx_rate)(struct net_device *dev, 1048 int (*ndo_set_vf_rate)(struct net_device *dev,
1051 int vf, int rate); 1049 int vf, int min_tx_rate,
1050 int max_tx_rate);
1052 int (*ndo_set_vf_spoofchk)(struct net_device *dev, 1051 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1053 int vf, bool setting); 1052 int vf, bool setting);
1054 int (*ndo_get_vf_config)(struct net_device *dev, 1053 int (*ndo_get_vf_config)(struct net_device *dev,
@@ -2634,6 +2633,7 @@ int dev_get_phys_port_id(struct net_device *dev,
2634 struct netdev_phys_port_id *ppid); 2633 struct netdev_phys_port_id *ppid);
2635int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2634int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2636 struct netdev_queue *txq); 2635 struct netdev_queue *txq);
2636int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2637int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 2637int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2638bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); 2638bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
2639 2639
@@ -3003,6 +3003,15 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3003 struct netdev_hw_addr_list *from_list, int addr_len); 3003 struct netdev_hw_addr_list *from_list, int addr_len);
3004void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 3004void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3005 struct netdev_hw_addr_list *from_list, int addr_len); 3005 struct netdev_hw_addr_list *from_list, int addr_len);
3006int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
3007 struct net_device *dev,
3008 int (*sync)(struct net_device *, const unsigned char *),
3009 int (*unsync)(struct net_device *,
3010 const unsigned char *));
3011void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
3012 struct net_device *dev,
3013 int (*unsync)(struct net_device *,
3014 const unsigned char *));
3006void __hw_addr_init(struct netdev_hw_addr_list *list); 3015void __hw_addr_init(struct netdev_hw_addr_list *list);
3007 3016
3008/* Functions used for device addresses handling */ 3017/* Functions used for device addresses handling */
@@ -3023,6 +3032,38 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from);
3023void dev_uc_flush(struct net_device *dev); 3032void dev_uc_flush(struct net_device *dev);
3024void dev_uc_init(struct net_device *dev); 3033void dev_uc_init(struct net_device *dev);
3025 3034
3035/**
3036 * __dev_uc_sync - Synchonize device's unicast list
3037 * @dev: device to sync
3038 * @sync: function to call if address should be added
3039 * @unsync: function to call if address should be removed
3040 *
3041 * Add newly added addresses to the interface, and release
3042 * addresses that have been deleted.
3043 **/
3044static inline int __dev_uc_sync(struct net_device *dev,
3045 int (*sync)(struct net_device *,
3046 const unsigned char *),
3047 int (*unsync)(struct net_device *,
3048 const unsigned char *))
3049{
3050 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
3051}
3052
3053/**
3054 * __dev_uc_unsync - Remove synchonized addresses from device
3055 * @dev: device to sync
3056 * @unsync: function to call if address should be removed
3057 *
3058 * Remove all addresses that were added to the device by dev_uc_sync().
3059 **/
3060static inline void __dev_uc_unsync(struct net_device *dev,
3061 int (*unsync)(struct net_device *,
3062 const unsigned char *))
3063{
3064 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
3065}
3066
3026/* Functions used for multicast addresses handling */ 3067/* Functions used for multicast addresses handling */
3027int dev_mc_add(struct net_device *dev, const unsigned char *addr); 3068int dev_mc_add(struct net_device *dev, const unsigned char *addr);
3028int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); 3069int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
@@ -3035,6 +3076,38 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from);
3035void dev_mc_flush(struct net_device *dev); 3076void dev_mc_flush(struct net_device *dev);
3036void dev_mc_init(struct net_device *dev); 3077void dev_mc_init(struct net_device *dev);
3037 3078
3079/**
3080 * __dev_mc_sync - Synchonize device's multicast list
3081 * @dev: device to sync
3082 * @sync: function to call if address should be added
3083 * @unsync: function to call if address should be removed
3084 *
3085 * Add newly added addresses to the interface, and release
3086 * addresses that have been deleted.
3087 **/
3088static inline int __dev_mc_sync(struct net_device *dev,
3089 int (*sync)(struct net_device *,
3090 const unsigned char *),
3091 int (*unsync)(struct net_device *,
3092 const unsigned char *))
3093{
3094 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
3095}
3096
3097/**
3098 * __dev_mc_unsync - Remove synchonized addresses from device
3099 * @dev: device to sync
3100 * @unsync: function to call if address should be removed
3101 *
3102 * Remove all addresses that were added to the device by dev_mc_sync().
3103 **/
3104static inline void __dev_mc_unsync(struct net_device *dev,
3105 int (*unsync)(struct net_device *,
3106 const unsigned char *))
3107{
3108 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
3109}
3110
3038/* Functions used for secondary unicast and multicast support */ 3111/* Functions used for secondary unicast and multicast support */
3039void dev_set_rx_mode(struct net_device *dev); 3112void dev_set_rx_mode(struct net_device *dev);
3040void __dev_set_rx_mode(struct net_device *dev); 3113void __dev_set_rx_mode(struct net_device *dev);
@@ -3180,6 +3253,20 @@ const char *netdev_drivername(const struct net_device *dev);
3180 3253
3181void linkwatch_run_queue(void); 3254void linkwatch_run_queue(void);
3182 3255
3256static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
3257 netdev_features_t f2)
3258{
3259 if (f1 & NETIF_F_GEN_CSUM)
3260 f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3261 if (f2 & NETIF_F_GEN_CSUM)
3262 f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3263 f1 &= f2;
3264 if (f1 & NETIF_F_GEN_CSUM)
3265 f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3266
3267 return f1;
3268}
3269
3183static inline netdev_features_t netdev_get_wanted_features( 3270static inline netdev_features_t netdev_get_wanted_features(
3184 struct net_device *dev) 3271 struct net_device *dev)
3185{ 3272{
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index b2e85e59f760..6ec975748742 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -3,11 +3,17 @@
3 3
4#include <uapi/linux/netfilter/nfnetlink_acct.h> 4#include <uapi/linux/netfilter/nfnetlink_acct.h>
5 5
6enum {
7 NFACCT_NO_QUOTA = -1,
8 NFACCT_UNDERQUOTA,
9 NFACCT_OVERQUOTA,
10};
6 11
7struct nf_acct; 12struct nf_acct;
8 13
9struct nf_acct *nfnl_acct_find_get(const char *filter_name); 14struct nf_acct *nfnl_acct_find_get(const char *filter_name);
10void nfnl_acct_put(struct nf_acct *acct); 15void nfnl_acct_put(struct nf_acct *acct);
11void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); 16void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
12 17extern int nfnl_acct_overquota(const struct sk_buff *skb,
18 struct nf_acct *nfacct);
13#endif /* _NFNL_ACCT_H */ 19#endif /* _NFNL_ACCT_H */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 034cda789a15..9e572daa15d5 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -46,7 +46,8 @@ struct netlink_kernel_cfg {
46 unsigned int flags; 46 unsigned int flags;
47 void (*input)(struct sk_buff *skb); 47 void (*input)(struct sk_buff *skb);
48 struct mutex *cb_mutex; 48 struct mutex *cb_mutex;
49 void (*bind)(int group); 49 int (*bind)(int group);
50 void (*unbind)(int group);
50 bool (*compare)(struct net *net, struct sock *sk); 51 bool (*compare)(struct net *net, struct sock *sk);
51}; 52};
52 53
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index c8d7f3965fff..20163b9a0eae 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -80,6 +80,22 @@ enum {
80 80
81 IEEE802154_ATTR_FRAME_RETRIES, 81 IEEE802154_ATTR_FRAME_RETRIES,
82 82
83 IEEE802154_ATTR_LLSEC_ENABLED,
84 IEEE802154_ATTR_LLSEC_SECLEVEL,
85 IEEE802154_ATTR_LLSEC_KEY_MODE,
86 IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT,
87 IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED,
88 IEEE802154_ATTR_LLSEC_KEY_ID,
89 IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
90 IEEE802154_ATTR_LLSEC_KEY_BYTES,
91 IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES,
92 IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS,
93 IEEE802154_ATTR_LLSEC_FRAME_TYPE,
94 IEEE802154_ATTR_LLSEC_CMD_FRAME_ID,
95 IEEE802154_ATTR_LLSEC_SECLEVELS,
96 IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
97 IEEE802154_ATTR_LLSEC_DEV_KEY_MODE,
98
83 __IEEE802154_ATTR_MAX, 99 __IEEE802154_ATTR_MAX,
84}; 100};
85 101
@@ -134,6 +150,21 @@ enum {
134 150
135 IEEE802154_SET_MACPARAMS, 151 IEEE802154_SET_MACPARAMS,
136 152
153 IEEE802154_LLSEC_GETPARAMS,
154 IEEE802154_LLSEC_SETPARAMS,
155 IEEE802154_LLSEC_LIST_KEY,
156 IEEE802154_LLSEC_ADD_KEY,
157 IEEE802154_LLSEC_DEL_KEY,
158 IEEE802154_LLSEC_LIST_DEV,
159 IEEE802154_LLSEC_ADD_DEV,
160 IEEE802154_LLSEC_DEL_DEV,
161 IEEE802154_LLSEC_LIST_DEVKEY,
162 IEEE802154_LLSEC_ADD_DEVKEY,
163 IEEE802154_LLSEC_DEL_DEVKEY,
164 IEEE802154_LLSEC_LIST_SECLEVEL,
165 IEEE802154_LLSEC_ADD_SECLEVEL,
166 IEEE802154_LLSEC_DEL_SECLEVEL,
167
137 __IEEE802154_CMD_MAX, 168 __IEEE802154_CMD_MAX,
138}; 169};
139 170
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 881a7c3571f4..a70c9493d55a 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -22,12 +22,12 @@ extern struct phy_device *of_phy_connect(struct net_device *dev,
22struct phy_device *of_phy_attach(struct net_device *dev, 22struct phy_device *of_phy_attach(struct net_device *dev,
23 struct device_node *phy_np, u32 flags, 23 struct device_node *phy_np, u32 flags,
24 phy_interface_t iface); 24 phy_interface_t iface);
25extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
26 void (*hndlr)(struct net_device *),
27 phy_interface_t iface);
28 25
29extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); 26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
30 27
28extern void of_mdiobus_link_phydev(struct mii_bus *mdio,
29 struct phy_device *phydev);
30
31#else /* CONFIG_OF */ 31#else /* CONFIG_OF */
32static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) 32static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
33{ 33{
@@ -59,17 +59,30 @@ static inline struct phy_device *of_phy_attach(struct net_device *dev,
59 return NULL; 59 return NULL;
60} 60}
61 61
62static inline struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, 62static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
63 void (*hndlr)(struct net_device *),
64 phy_interface_t iface)
65{ 63{
66 return NULL; 64 return NULL;
67} 65}
68 66
69static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np) 67static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
68 struct phy_device *phydev)
70{ 69{
71 return NULL;
72} 70}
73#endif /* CONFIG_OF */ 71#endif /* CONFIG_OF */
74 72
73#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
74extern int of_phy_register_fixed_link(struct device_node *np);
75extern bool of_phy_is_fixed_link(struct device_node *np);
76#else
77static inline int of_phy_register_fixed_link(struct device_node *np)
78{
79 return -ENOSYS;
80}
81static inline bool of_phy_is_fixed_link(struct device_node *np)
82{
83 return false;
84}
85#endif
86
87
75#endif /* __LINUX_OF_MDIO_H */ 88#endif /* __LINUX_OF_MDIO_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 4d0221fd0688..864ddafad8cc 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -198,6 +198,13 @@ static inline struct mii_bus *mdiobus_alloc(void)
198int mdiobus_register(struct mii_bus *bus); 198int mdiobus_register(struct mii_bus *bus);
199void mdiobus_unregister(struct mii_bus *bus); 199void mdiobus_unregister(struct mii_bus *bus);
200void mdiobus_free(struct mii_bus *bus); 200void mdiobus_free(struct mii_bus *bus);
201struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv);
202static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
203{
204 return devm_mdiobus_alloc_size(dev, 0);
205}
206
207void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
201struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); 208struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
202int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); 209int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
203int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); 210int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
@@ -666,6 +673,7 @@ static inline int phy_read_status(struct phy_device *phydev)
666 return phydev->drv->read_status(phydev); 673 return phydev->drv->read_status(phydev);
667} 674}
668 675
676int genphy_config_init(struct phy_device *phydev);
669int genphy_setup_forced(struct phy_device *phydev); 677int genphy_setup_forced(struct phy_device *phydev);
670int genphy_restart_aneg(struct phy_device *phydev); 678int genphy_restart_aneg(struct phy_device *phydev);
671int genphy_config_aneg(struct phy_device *phydev); 679int genphy_config_aneg(struct phy_device *phydev);
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 509d8f5f984e..ae612acebb53 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -9,15 +9,31 @@ struct fixed_phy_status {
9 int asym_pause; 9 int asym_pause;
10}; 10};
11 11
12struct device_node;
13
12#ifdef CONFIG_FIXED_PHY 14#ifdef CONFIG_FIXED_PHY
13extern int fixed_phy_add(unsigned int irq, int phy_id, 15extern int fixed_phy_add(unsigned int irq, int phy_id,
14 struct fixed_phy_status *status); 16 struct fixed_phy_status *status);
17extern int fixed_phy_register(unsigned int irq,
18 struct fixed_phy_status *status,
19 struct device_node *np);
20extern void fixed_phy_del(int phy_addr);
15#else 21#else
16static inline int fixed_phy_add(unsigned int irq, int phy_id, 22static inline int fixed_phy_add(unsigned int irq, int phy_id,
17 struct fixed_phy_status *status) 23 struct fixed_phy_status *status)
18{ 24{
19 return -ENODEV; 25 return -ENODEV;
20} 26}
27static inline int fixed_phy_register(unsigned int irq,
28 struct fixed_phy_status *status,
29 struct device_node *np)
30{
31 return -ENODEV;
32}
33static inline int fixed_phy_del(int phy_addr)
34{
35 return -ENODEV;
36}
21#endif /* CONFIG_FIXED_PHY */ 37#endif /* CONFIG_FIXED_PHY */
22 38
23/* 39/*
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h
new file mode 100644
index 000000000000..1730312398ff
--- /dev/null
+++ b/include/linux/platform_data/st21nfca.h
@@ -0,0 +1,32 @@
1/*
2 * Driver include for the ST21NFCA NFC chip.
3 *
4 * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _ST21NFCA_HCI_H_
20#define _ST21NFCA_HCI_H_
21
22#include <linux/i2c.h>
23
24#define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci"
25
26struct st21nfca_nfc_platform_data {
27 unsigned int gpio_irq;
28 unsigned int gpio_ena;
29 unsigned int irq_polarity;
30};
31
32#endif /* _ST21NFCA_HCI_H_ */
diff --git a/include/linux/rfkill-gpio.h b/include/linux/rfkill-gpio.h
index 4d09f6eab359..20bcb55498cd 100644
--- a/include/linux/rfkill-gpio.h
+++ b/include/linux/rfkill-gpio.h
@@ -27,21 +27,11 @@
27 * struct rfkill_gpio_platform_data - platform data for rfkill gpio device. 27 * struct rfkill_gpio_platform_data - platform data for rfkill gpio device.
28 * for unused gpio's, the expected value is -1. 28 * for unused gpio's, the expected value is -1.
29 * @name: name for the gpio rf kill instance 29 * @name: name for the gpio rf kill instance
30 * @reset_gpio: GPIO which is used for reseting rfkill switch
31 * @shutdown_gpio: GPIO which is used for shutdown of rfkill switch
32 * @power_clk_name: [optional] name of clk to turn off while blocked
33 * @gpio_runtime_close: clean up platform specific gpio configuration
34 * @gpio_runtime_setup: set up platform specific gpio configuration
35 */ 30 */
36 31
37struct rfkill_gpio_platform_data { 32struct rfkill_gpio_platform_data {
38 char *name; 33 char *name;
39 int reset_gpio;
40 int shutdown_gpio;
41 const char *power_clk_name;
42 enum rfkill_type type; 34 enum rfkill_type type;
43 void (*gpio_runtime_close)(struct platform_device *);
44 int (*gpio_runtime_setup)(struct platform_device *);
45}; 35};
46 36
47#endif /* __RFKILL_GPIO_H */ 37#endif /* __RFKILL_GPIO_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 08074a810164..5b5cd3189c98 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -345,6 +345,10 @@ enum {
345 SKB_GSO_UDP_TUNNEL = 1 << 9, 345 SKB_GSO_UDP_TUNNEL = 1 << 9,
346 346
347 SKB_GSO_MPLS = 1 << 10, 347 SKB_GSO_MPLS = 1 << 10,
348
349 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
350
351 SKB_GSO_GRE_CSUM = 1 << 12,
348}; 352};
349 353
350#if BITS_PER_LONG > 32 354#if BITS_PER_LONG > 32
@@ -426,7 +430,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
426 * @csum_start: Offset from skb->head where checksumming should start 430 * @csum_start: Offset from skb->head where checksumming should start
427 * @csum_offset: Offset from csum_start where checksum should be stored 431 * @csum_offset: Offset from csum_start where checksum should be stored
428 * @priority: Packet queueing priority 432 * @priority: Packet queueing priority
429 * @local_df: allow local fragmentation 433 * @ignore_df: allow local fragmentation
430 * @cloned: Head may be cloned (check refcnt to be sure) 434 * @cloned: Head may be cloned (check refcnt to be sure)
431 * @ip_summed: Driver fed us an IP checksum 435 * @ip_summed: Driver fed us an IP checksum
432 * @nohdr: Payload reference only, must not modify header 436 * @nohdr: Payload reference only, must not modify header
@@ -514,7 +518,7 @@ struct sk_buff {
514 }; 518 };
515 __u32 priority; 519 __u32 priority;
516 kmemcheck_bitfield_begin(flags1); 520 kmemcheck_bitfield_begin(flags1);
517 __u8 local_df:1, 521 __u8 ignore_df:1,
518 cloned:1, 522 cloned:1,
519 ip_summed:2, 523 ip_summed:2,
520 nohdr:1, 524 nohdr:1,
@@ -567,7 +571,10 @@ struct sk_buff {
567 * headers if needed 571 * headers if needed
568 */ 572 */
569 __u8 encapsulation:1; 573 __u8 encapsulation:1;
570 /* 6/8 bit hole (depending on ndisc_nodetype presence) */ 574 __u8 encap_hdr_csum:1;
575 __u8 csum_valid:1;
576 __u8 csum_complete_sw:1;
577 /* 3/5 bit hole (depending on ndisc_nodetype presence) */
571 kmemcheck_bitfield_end(flags2); 578 kmemcheck_bitfield_end(flags2);
572 579
573#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL 580#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
@@ -739,7 +746,13 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
739int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 746int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
740struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); 747struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
741struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); 748struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
742struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask); 749struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
750 gfp_t gfp_mask, bool fclone);
751static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
752 gfp_t gfp_mask)
753{
754 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
755}
743 756
744int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); 757int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
745struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 758struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
@@ -2233,6 +2246,14 @@ static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2233 return __pskb_copy(skb, skb_headroom(skb), gfp_mask); 2246 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2234} 2247}
2235 2248
2249
2250static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2251 gfp_t gfp_mask)
2252{
2253 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
2254}
2255
2256
2236/** 2257/**
2237 * skb_clone_writable - is the header of a clone writable 2258 * skb_clone_writable - is the header of a clone writable
2238 * @skb: buffer to check 2259 * @skb: buffer to check
@@ -2716,7 +2737,7 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb);
2716 2737
2717static inline int skb_csum_unnecessary(const struct sk_buff *skb) 2738static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2718{ 2739{
2719 return skb->ip_summed & CHECKSUM_UNNECESSARY; 2740 return ((skb->ip_summed & CHECKSUM_UNNECESSARY) || skb->csum_valid);
2720} 2741}
2721 2742
2722/** 2743/**
@@ -2741,6 +2762,103 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2741 0 : __skb_checksum_complete(skb); 2762 0 : __skb_checksum_complete(skb);
2742} 2763}
2743 2764
2765/* Check if we need to perform checksum complete validation.
2766 *
2767 * Returns true if checksum complete is needed, false otherwise
2768 * (either checksum is unnecessary or zero checksum is allowed).
2769 */
2770static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
2771 bool zero_okay,
2772 __sum16 check)
2773{
2774 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
2775 skb->csum_valid = 1;
2776 return false;
2777 }
2778
2779 return true;
2780}
2781
2782/* For small packets <= CHECKSUM_BREAK peform checksum complete directly
2783 * in checksum_init.
2784 */
2785#define CHECKSUM_BREAK 76
2786
2787/* Validate (init) checksum based on checksum complete.
2788 *
2789 * Return values:
2790 * 0: checksum is validated or try to in skb_checksum_complete. In the latter
2791 * case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
2792 * checksum is stored in skb->csum for use in __skb_checksum_complete
2793 * non-zero: value of invalid checksum
2794 *
2795 */
2796static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
2797 bool complete,
2798 __wsum psum)
2799{
2800 if (skb->ip_summed == CHECKSUM_COMPLETE) {
2801 if (!csum_fold(csum_add(psum, skb->csum))) {
2802 skb->csum_valid = 1;
2803 return 0;
2804 }
2805 }
2806
2807 skb->csum = psum;
2808
2809 if (complete || skb->len <= CHECKSUM_BREAK) {
2810 __sum16 csum;
2811
2812 csum = __skb_checksum_complete(skb);
2813 skb->csum_valid = !csum;
2814 return csum;
2815 }
2816
2817 return 0;
2818}
2819
2820static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
2821{
2822 return 0;
2823}
2824
2825/* Perform checksum validate (init). Note that this is a macro since we only
2826 * want to calculate the pseudo header which is an input function if necessary.
2827 * First we try to validate without any computation (checksum unnecessary) and
2828 * then calculate based on checksum complete calling the function to compute
2829 * pseudo header.
2830 *
2831 * Return values:
2832 * 0: checksum is validated or try to in skb_checksum_complete
2833 * non-zero: value of invalid checksum
2834 */
2835#define __skb_checksum_validate(skb, proto, complete, \
2836 zero_okay, check, compute_pseudo) \
2837({ \
2838 __sum16 __ret = 0; \
2839 skb->csum_valid = 0; \
2840 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
2841 __ret = __skb_checksum_validate_complete(skb, \
2842 complete, compute_pseudo(skb, proto)); \
2843 __ret; \
2844})
2845
2846#define skb_checksum_init(skb, proto, compute_pseudo) \
2847 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
2848
2849#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
2850 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
2851
2852#define skb_checksum_validate(skb, proto, compute_pseudo) \
2853 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
2854
2855#define skb_checksum_validate_zero_check(skb, proto, check, \
2856 compute_pseudo) \
2857 __skb_checksum_validate_(skb, proto, true, true, check, compute_pseudo)
2858
2859#define skb_checksum_simple_validate(skb) \
2860 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
2861
2744#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2862#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2745void nf_conntrack_destroy(struct nf_conntrack *nfct); 2863void nf_conntrack_destroy(struct nf_conntrack *nfct);
2746static inline void nf_conntrack_put(struct nf_conntrack *nfct) 2864static inline void nf_conntrack_put(struct nf_conntrack *nfct)
@@ -2895,6 +3013,7 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2895struct skb_gso_cb { 3013struct skb_gso_cb {
2896 int mac_offset; 3014 int mac_offset;
2897 int encap_level; 3015 int encap_level;
3016 __u16 csum_start;
2898}; 3017};
2899#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) 3018#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
2900 3019
@@ -2919,6 +3038,28 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
2919 return 0; 3038 return 0;
2920} 3039}
2921 3040
3041/* Compute the checksum for a gso segment. First compute the checksum value
3042 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
3043 * then add in skb->csum (checksum from csum_start to end of packet).
3044 * skb->csum and csum_start are then updated to reflect the checksum of the
3045 * resultant packet starting from the transport header-- the resultant checksum
3046 * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
3047 * header.
3048 */
3049static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
3050{
3051 int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
3052 skb_transport_offset(skb);
3053 __u16 csum;
3054
3055 csum = csum_fold(csum_partial(skb_transport_header(skb),
3056 plen, skb->csum));
3057 skb->csum = res;
3058 SKB_GSO_CB(skb)->csum_start -= plen;
3059
3060 return csum;
3061}
3062
2922static inline bool skb_is_gso(const struct sk_buff *skb) 3063static inline bool skb_is_gso(const struct sk_buff *skb)
2923{ 3064{
2924 return skb_shinfo(skb)->gso_size; 3065 return skb_shinfo(skb)->gso_size;
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
index aa327a8105ad..b2b1afbb3202 100644
--- a/include/linux/spi/at86rf230.h
+++ b/include/linux/spi/at86rf230.h
@@ -26,20 +26,6 @@ struct at86rf230_platform_data {
26 int rstn; 26 int rstn;
27 int slp_tr; 27 int slp_tr;
28 int dig2; 28 int dig2;
29
30 /* Setting the irq_type will configure the driver to request
31 * the platform irq trigger type according to the given value
32 * and configure the interrupt polarity of the device to the
33 * corresponding polarity.
34 *
35 * Allowed values are: IRQF_TRIGGER_RISING, IRQF_TRIGGER_FALLING,
36 * IRQF_TRIGGER_HIGH and IRQF_TRIGGER_LOW
37 *
38 * Setting it to 0, the driver does not touch the trigger type
39 * configuration of the interrupt and sets the interrupt polarity
40 * of the device to high active (the default value).
41 */
42 int irq_type;
43}; 29};
44 30
45#endif 31#endif
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 07ef9b82b66d..4568a5cc9ab8 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -33,6 +33,7 @@ struct ssb_sprom {
33 u8 et1phyaddr; /* MII address for enet1 */ 33 u8 et1phyaddr; /* MII address for enet1 */
34 u8 et0mdcport; /* MDIO for enet0 */ 34 u8 et0mdcport; /* MDIO for enet0 */
35 u8 et1mdcport; /* MDIO for enet1 */ 35 u8 et1mdcport; /* MDIO for enet1 */
36 u16 dev_id; /* Device ID overriding e.g. PCI ID */
36 u16 board_rev; /* Board revision number from SPROM. */ 37 u16 board_rev; /* Board revision number from SPROM. */
37 u16 board_num; /* Board number from SPROM. */ 38 u16 board_num; /* Board number from SPROM. */
38 u16 board_type; /* Board type from SPROM. */ 39 u16 board_type; /* Board type from SPROM. */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 239946868142..a0513210798f 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -197,7 +197,8 @@ struct tcp_sock {
197 u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ 197 u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
198 syn_data:1, /* SYN includes data */ 198 syn_data:1, /* SYN includes data */
199 syn_fastopen:1, /* SYN includes Fast Open option */ 199 syn_fastopen:1, /* SYN includes Fast Open option */
200 syn_data_acked:1;/* data in SYN is acked by SYN-ACK */ 200 syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
201 is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
201 u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ 202 u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
202 203
203/* RTT measurement */ 204/* RTT measurement */
@@ -209,6 +210,8 @@ struct tcp_sock {
209 210
210 u32 packets_out; /* Packets which are "in flight" */ 211 u32 packets_out; /* Packets which are "in flight" */
211 u32 retrans_out; /* Retransmitted packets out */ 212 u32 retrans_out; /* Retransmitted packets out */
213 u32 max_packets_out; /* max packets_out in last window */
214 u32 max_packets_seq; /* right edge of max_packets_out flight */
212 215
213 u16 urg_data; /* Saved octet of OOB data and control flags */ 216 u16 urg_data; /* Saved octet of OOB data and control flags */
214 u8 ecn_flags; /* ECN status bits. */ 217 u8 ecn_flags; /* ECN status bits. */
@@ -365,11 +368,6 @@ static inline bool tcp_passive_fastopen(const struct sock *sk)
365 tcp_sk(sk)->fastopen_rsk != NULL); 368 tcp_sk(sk)->fastopen_rsk != NULL);
366} 369}
367 370
368static inline bool fastopen_cookie_present(struct tcp_fastopen_cookie *foc)
369{
370 return foc->len != -1;
371}
372
373extern void tcp_sock_destruct(struct sock *sk); 371extern void tcp_sock_destruct(struct sock *sk);
374 372
375static inline int fastopen_init_queue(struct sock *sk, int backlog) 373static inline int fastopen_init_queue(struct sock *sk, int backlog)
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 42278bbf7a88..247cfdcc4b08 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -47,7 +47,9 @@ struct udp_sock {
47#define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node 47#define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node
48 int pending; /* Any pending frames ? */ 48 int pending; /* Any pending frames ? */
49 unsigned int corkflag; /* Cork is required */ 49 unsigned int corkflag; /* Cork is required */
50 __u16 encap_type; /* Is this an Encapsulation socket? */ 50 __u8 encap_type; /* Is this an Encapsulation socket? */
51 unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
52 no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
51 /* 53 /*
52 * Following member retains the information to create a UDP header 54 * Following member retains the information to create a UDP header
53 * when the socket is uncorked. 55 * when the socket is uncorked.
@@ -76,6 +78,26 @@ static inline struct udp_sock *udp_sk(const struct sock *sk)
76 return (struct udp_sock *)sk; 78 return (struct udp_sock *)sk;
77} 79}
78 80
81static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
82{
83 udp_sk(sk)->no_check6_tx = val;
84}
85
86static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
87{
88 udp_sk(sk)->no_check6_rx = val;
89}
90
91static inline bool udp_get_no_check6_tx(struct sock *sk)
92{
93 return udp_sk(sk)->no_check6_tx;
94}
95
96static inline bool udp_get_no_check6_rx(struct sock *sk)
97{
98 return udp_sk(sk)->no_check6_rx;
99}
100
79#define udp_portaddr_for_each_entry(__sk, node, list) \ 101#define udp_portaddr_for_each_entry(__sk, node, list) \
80 hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) 102 hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
81 103
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 44b38b92236a..7c9b484735c5 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -52,6 +52,10 @@
52#define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */ 52#define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */
53#define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */ 53#define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */
54 54
55/* Initial NTB length */
56#define CDC_NCM_NTB_DEF_SIZE_TX 16384 /* bytes */
57#define CDC_NCM_NTB_DEF_SIZE_RX 16384 /* bytes */
58
55/* Minimum value for MaxDatagramSize, ch. 6.2.9 */ 59/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
56#define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */ 60#define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */
57 61
@@ -72,16 +76,9 @@
72/* Restart the timer, if amount of datagrams is less than given value */ 76/* Restart the timer, if amount of datagrams is less than given value */
73#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 77#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
74#define CDC_NCM_TIMER_PENDING_CNT 2 78#define CDC_NCM_TIMER_PENDING_CNT 2
75#define CDC_NCM_TIMER_INTERVAL (400UL * NSEC_PER_USEC) 79#define CDC_NCM_TIMER_INTERVAL_USEC 400UL
76 80#define CDC_NCM_TIMER_INTERVAL_MIN 5UL
77/* The following macro defines the minimum header space */ 81#define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC)
78#define CDC_NCM_MIN_HDR_SIZE \
79 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
80 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
81
82#define CDC_NCM_NDP_SIZE \
83 (sizeof(struct usb_cdc_ncm_ndp16) + \
84 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
85 82
86#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ 83#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
87 (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) 84 (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
@@ -107,6 +104,9 @@ struct cdc_ncm_ctx {
107 spinlock_t mtx; 104 spinlock_t mtx;
108 atomic_t stop; 105 atomic_t stop;
109 106
107 u32 timer_interval;
108 u32 max_ndp_size;
109
110 u32 tx_timer_pending; 110 u32 tx_timer_pending;
111 u32 tx_curr_frame_num; 111 u32 tx_curr_frame_num;
112 u32 rx_max; 112 u32 rx_max;
@@ -118,10 +118,21 @@ struct cdc_ncm_ctx {
118 u16 tx_ndp_modulus; 118 u16 tx_ndp_modulus;
119 u16 tx_seq; 119 u16 tx_seq;
120 u16 rx_seq; 120 u16 rx_seq;
121 u16 connected; 121 u16 min_tx_pkt;
122
123 /* statistics */
124 u32 tx_curr_frame_payload;
125 u32 tx_reason_ntb_full;
126 u32 tx_reason_ndp_full;
127 u32 tx_reason_timeout;
128 u32 tx_reason_max_datagram;
129 u64 tx_overhead;
130 u64 tx_ntbs;
131 u64 rx_overhead;
132 u64 rx_ntbs;
122}; 133};
123 134
124u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf); 135u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
125int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); 136int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
126void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); 137void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
127struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); 138struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index f7d372b7d4ff..79b530fb2c4d 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -54,6 +54,7 @@
54#define __6LOWPAN_H__ 54#define __6LOWPAN_H__
55 55
56#include <net/ipv6.h> 56#include <net/ipv6.h>
57#include <net/net_namespace.h>
57 58
58#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */ 59#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */
59#define UIP_IPH_LEN 40 /* ipv6 fixed header size */ 60#define UIP_IPH_LEN 40 /* ipv6 fixed header size */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 933a9f22a05f..f679877bb601 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -306,11 +306,6 @@ static inline void addrconf_addr_solict_mult(const struct in6_addr *addr,
306 htonl(0xFF000000) | addr->s6_addr32[3]); 306 htonl(0xFF000000) | addr->s6_addr32[3]);
307} 307}
308 308
309static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr)
310{
311 return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000);
312}
313
314static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr) 309static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
315{ 310{
316#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 311#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h
index f79ae2aa76d6..085940f7eeec 100644
--- a/include/net/af_ieee802154.h
+++ b/include/net/af_ieee802154.h
@@ -57,6 +57,14 @@ struct sockaddr_ieee802154 {
57/* get/setsockopt */ 57/* get/setsockopt */
58#define SOL_IEEE802154 0 58#define SOL_IEEE802154 0
59 59
60#define WPAN_WANTACK 0 60#define WPAN_WANTACK 0
61#define WPAN_SECURITY 1
62#define WPAN_SECURITY_LEVEL 2
63
64#define WPAN_SECURITY_DEFAULT 0
65#define WPAN_SECURITY_OFF 1
66#define WPAN_SECURITY_ON 2
67
68#define WPAN_SECURITY_LEVEL_DEFAULT (-1)
61 69
62#endif 70#endif
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index be150cf8cd43..16587dcd6a91 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -367,6 +367,7 @@ enum {
367#define HCI_ERROR_REMOTE_POWER_OFF 0x15 367#define HCI_ERROR_REMOTE_POWER_OFF 0x15
368#define HCI_ERROR_LOCAL_HOST_TERM 0x16 368#define HCI_ERROR_LOCAL_HOST_TERM 0x16
369#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18 369#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
370#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
370 371
371/* Flow control modes */ 372/* Flow control modes */
372#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00 373#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00
@@ -1053,6 +1054,17 @@ struct hci_cp_write_page_scan_activity {
1053 __le16 window; 1054 __le16 window;
1054} __packed; 1055} __packed;
1055 1056
1057#define HCI_OP_READ_TX_POWER 0x0c2d
1058struct hci_cp_read_tx_power {
1059 __le16 handle;
1060 __u8 type;
1061} __packed;
1062struct hci_rp_read_tx_power {
1063 __u8 status;
1064 __le16 handle;
1065 __s8 tx_power;
1066} __packed;
1067
1056#define HCI_OP_READ_PAGE_SCAN_TYPE 0x0c46 1068#define HCI_OP_READ_PAGE_SCAN_TYPE 0x0c46
1057struct hci_rp_read_page_scan_type { 1069struct hci_rp_read_page_scan_type {
1058 __u8 status; 1070 __u8 status;
@@ -1063,6 +1075,16 @@ struct hci_rp_read_page_scan_type {
1063 #define PAGE_SCAN_TYPE_STANDARD 0x00 1075 #define PAGE_SCAN_TYPE_STANDARD 0x00
1064 #define PAGE_SCAN_TYPE_INTERLACED 0x01 1076 #define PAGE_SCAN_TYPE_INTERLACED 0x01
1065 1077
1078#define HCI_OP_READ_RSSI 0x1405
1079struct hci_cp_read_rssi {
1080 __le16 handle;
1081} __packed;
1082struct hci_rp_read_rssi {
1083 __u8 status;
1084 __le16 handle;
1085 __s8 rssi;
1086} __packed;
1087
1066#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409 1088#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409
1067struct hci_rp_read_local_amp_info { 1089struct hci_rp_read_local_amp_info {
1068 __u8 status; 1090 __u8 status;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 5f8bc05694ac..b386bf17e6c2 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -68,6 +68,11 @@ struct discovery_state {
68 struct list_head unknown; /* Name state not known */ 68 struct list_head unknown; /* Name state not known */
69 struct list_head resolve; /* Name needs to be resolved */ 69 struct list_head resolve; /* Name needs to be resolved */
70 __u32 timestamp; 70 __u32 timestamp;
71 bdaddr_t last_adv_addr;
72 u8 last_adv_addr_type;
73 s8 last_adv_rssi;
74 u8 last_adv_data[HCI_MAX_AD_LENGTH];
75 u8 last_adv_data_len;
71}; 76};
72 77
73struct hci_conn_hash { 78struct hci_conn_hash {
@@ -140,6 +145,10 @@ struct oob_data {
140/* Default LE RPA expiry time, 15 minutes */ 145/* Default LE RPA expiry time, 15 minutes */
141#define HCI_DEFAULT_RPA_TIMEOUT (15 * 60) 146#define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
142 147
148/* Default min/max age of connection information (1s/3s) */
149#define DEFAULT_CONN_INFO_MIN_AGE 1000
150#define DEFAULT_CONN_INFO_MAX_AGE 3000
151
143struct amp_assoc { 152struct amp_assoc {
144 __u16 len; 153 __u16 len;
145 __u16 offset; 154 __u16 offset;
@@ -194,6 +203,9 @@ struct hci_dev {
194 __u16 le_scan_window; 203 __u16 le_scan_window;
195 __u16 le_conn_min_interval; 204 __u16 le_conn_min_interval;
196 __u16 le_conn_max_interval; 205 __u16 le_conn_max_interval;
206 __u16 discov_interleaved_timeout;
207 __u16 conn_info_min_age;
208 __u16 conn_info_max_age;
197 __u8 ssp_debug_mode; 209 __u8 ssp_debug_mode;
198 210
199 __u16 devid_source; 211 __u16 devid_source;
@@ -368,8 +380,13 @@ struct hci_conn {
368 __u16 setting; 380 __u16 setting;
369 __u16 le_conn_min_interval; 381 __u16 le_conn_min_interval;
370 __u16 le_conn_max_interval; 382 __u16 le_conn_max_interval;
383 __s8 rssi;
384 __s8 tx_power;
385 __s8 max_tx_power;
371 unsigned long flags; 386 unsigned long flags;
372 387
388 unsigned long conn_info_timestamp;
389
373 __u8 remote_cap; 390 __u8 remote_cap;
374 __u8 remote_auth; 391 __u8 remote_auth;
375 __u8 remote_id; 392 __u8 remote_id;
@@ -1204,8 +1221,8 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
1204 */ 1221 */
1205#define DISCOV_LE_SCAN_WIN 0x12 1222#define DISCOV_LE_SCAN_WIN 0x12
1206#define DISCOV_LE_SCAN_INT 0x12 1223#define DISCOV_LE_SCAN_INT 0x12
1207#define DISCOV_LE_TIMEOUT msecs_to_jiffies(10240) 1224#define DISCOV_LE_TIMEOUT 10240 /* msec */
1208#define DISCOV_INTERLEAVED_TIMEOUT msecs_to_jiffies(5120) 1225#define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */
1209#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04 1226#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
1210#define DISCOV_BREDR_INQUIRY_LEN 0x08 1227#define DISCOV_BREDR_INQUIRY_LEN 0x08
1211 1228
@@ -1265,7 +1282,8 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
1265 u8 *randomizer256, u8 status); 1282 u8 *randomizer256, u8 status);
1266void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 1283void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1267 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, 1284 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
1268 u8 ssp, u8 *eir, u16 eir_len); 1285 u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
1286 u8 scan_rsp_len);
1269void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 1287void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1270 u8 addr_type, s8 rssi, u8 *name, u8 name_len); 1288 u8 addr_type, s8 rssi, u8 *name, u8 name_len);
1271void mgmt_discovering(struct hci_dev *hdev, u8 discovering); 1289void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index d4b571c2f9fd..bcffc9ae0c89 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -181,6 +181,9 @@ struct mgmt_cp_load_link_keys {
181} __packed; 181} __packed;
182#define MGMT_LOAD_LINK_KEYS_SIZE 3 182#define MGMT_LOAD_LINK_KEYS_SIZE 3
183 183
184#define MGMT_LTK_UNAUTHENTICATED 0x00
185#define MGMT_LTK_AUTHENTICATED 0x01
186
184struct mgmt_ltk_info { 187struct mgmt_ltk_info {
185 struct mgmt_addr_info addr; 188 struct mgmt_addr_info addr;
186 __u8 type; 189 __u8 type;
@@ -409,6 +412,18 @@ struct mgmt_cp_load_irks {
409} __packed; 412} __packed;
410#define MGMT_LOAD_IRKS_SIZE 2 413#define MGMT_LOAD_IRKS_SIZE 2
411 414
415#define MGMT_OP_GET_CONN_INFO 0x0031
416struct mgmt_cp_get_conn_info {
417 struct mgmt_addr_info addr;
418} __packed;
419#define MGMT_GET_CONN_INFO_SIZE MGMT_ADDR_INFO_SIZE
420struct mgmt_rp_get_conn_info {
421 struct mgmt_addr_info addr;
422 __s8 rssi;
423 __s8 tx_power;
424 __s8 max_tx_power;
425} __packed;
426
412#define MGMT_EV_CMD_COMPLETE 0x0001 427#define MGMT_EV_CMD_COMPLETE 0x0001
413struct mgmt_ev_cmd_complete { 428struct mgmt_ev_cmd_complete {
414 __le16 opcode; 429 __le16 opcode;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 2611cc389d7d..578b83127af1 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -173,7 +173,7 @@ struct rfcomm_dlc {
173 struct sk_buff_head tx_queue; 173 struct sk_buff_head tx_queue;
174 struct timer_list timer; 174 struct timer_list timer;
175 175
176 spinlock_t lock; 176 struct mutex lock;
177 unsigned long state; 177 unsigned long state;
178 unsigned long flags; 178 unsigned long flags;
179 atomic_t refcnt; 179 atomic_t refcnt;
@@ -244,8 +244,8 @@ int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig);
244void rfcomm_dlc_accept(struct rfcomm_dlc *d); 244void rfcomm_dlc_accept(struct rfcomm_dlc *d);
245struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel); 245struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel);
246 246
247#define rfcomm_dlc_lock(d) spin_lock(&d->lock) 247#define rfcomm_dlc_lock(d) mutex_lock(&d->lock)
248#define rfcomm_dlc_unlock(d) spin_unlock(&d->lock) 248#define rfcomm_dlc_unlock(d) mutex_unlock(&d->lock)
249 249
250static inline void rfcomm_dlc_hold(struct rfcomm_dlc *d) 250static inline void rfcomm_dlc_hold(struct rfcomm_dlc *d)
251{ 251{
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f856e5a746fa..e46c437944f7 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -109,6 +109,13 @@ enum ieee80211_band {
109 * channel as the control or any of the secondary channels. 109 * channel as the control or any of the secondary channels.
110 * This may be due to the driver or due to regulatory bandwidth 110 * This may be due to the driver or due to regulatory bandwidth
111 * restrictions. 111 * restrictions.
112 * @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY
113 * @IEEE80211_CHAN_GO_CONCURRENT: see %NL80211_FREQUENCY_ATTR_GO_CONCURRENT
114 * @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted
115 * on this channel.
116 * @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
117 * on this channel.
118 *
112 */ 119 */
113enum ieee80211_channel_flags { 120enum ieee80211_channel_flags {
114 IEEE80211_CHAN_DISABLED = 1<<0, 121 IEEE80211_CHAN_DISABLED = 1<<0,
@@ -120,6 +127,10 @@ enum ieee80211_channel_flags {
120 IEEE80211_CHAN_NO_OFDM = 1<<6, 127 IEEE80211_CHAN_NO_OFDM = 1<<6,
121 IEEE80211_CHAN_NO_80MHZ = 1<<7, 128 IEEE80211_CHAN_NO_80MHZ = 1<<7,
122 IEEE80211_CHAN_NO_160MHZ = 1<<8, 129 IEEE80211_CHAN_NO_160MHZ = 1<<8,
130 IEEE80211_CHAN_INDOOR_ONLY = 1<<9,
131 IEEE80211_CHAN_GO_CONCURRENT = 1<<10,
132 IEEE80211_CHAN_NO_20MHZ = 1<<11,
133 IEEE80211_CHAN_NO_10MHZ = 1<<12,
123}; 134};
124 135
125#define IEEE80211_CHAN_NO_HT40 \ 136#define IEEE80211_CHAN_NO_HT40 \
@@ -330,8 +341,8 @@ struct vif_params {
330 * @seq_len: length of @seq. 341 * @seq_len: length of @seq.
331 */ 342 */
332struct key_params { 343struct key_params {
333 u8 *key; 344 const u8 *key;
334 u8 *seq; 345 const u8 *seq;
335 int key_len; 346 int key_len;
336 int seq_len; 347 int seq_len;
337 u32 cipher; 348 u32 cipher;
@@ -441,10 +452,13 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
441 * cfg80211_chandef_dfs_required - checks if radar detection is required 452 * cfg80211_chandef_dfs_required - checks if radar detection is required
442 * @wiphy: the wiphy to validate against 453 * @wiphy: the wiphy to validate against
443 * @chandef: the channel definition to check 454 * @chandef: the channel definition to check
444 * Return: 1 if radar detection is required, 0 if it is not, < 0 on error 455 * @iftype: the interface type as specified in &enum nl80211_iftype
456 * Returns:
457 * 1 if radar detection is required, 0 if it is not, < 0 on error
445 */ 458 */
446int cfg80211_chandef_dfs_required(struct wiphy *wiphy, 459int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
447 const struct cfg80211_chan_def *chandef); 460 const struct cfg80211_chan_def *chandef,
461 enum nl80211_iftype iftype);
448 462
449/** 463/**
450 * ieee80211_chandef_rate_flags - returns rate flags for a channel 464 * ieee80211_chandef_rate_flags - returns rate flags for a channel
@@ -654,7 +668,6 @@ struct cfg80211_acl_data {
654 * @p2p_opp_ps: P2P opportunistic PS 668 * @p2p_opp_ps: P2P opportunistic PS
655 * @acl: ACL configuration used by the drivers which has support for 669 * @acl: ACL configuration used by the drivers which has support for
656 * MAC address based access control 670 * MAC address based access control
657 * @radar_required: set if radar detection is required
658 */ 671 */
659struct cfg80211_ap_settings { 672struct cfg80211_ap_settings {
660 struct cfg80211_chan_def chandef; 673 struct cfg80211_chan_def chandef;
@@ -672,7 +685,6 @@ struct cfg80211_ap_settings {
672 u8 p2p_ctwindow; 685 u8 p2p_ctwindow;
673 bool p2p_opp_ps; 686 bool p2p_opp_ps;
674 const struct cfg80211_acl_data *acl; 687 const struct cfg80211_acl_data *acl;
675 bool radar_required;
676}; 688};
677 689
678/** 690/**
@@ -682,8 +694,10 @@ struct cfg80211_ap_settings {
682 * 694 *
683 * @chandef: defines the channel to use after the switch 695 * @chandef: defines the channel to use after the switch
684 * @beacon_csa: beacon data while performing the switch 696 * @beacon_csa: beacon data while performing the switch
685 * @counter_offset_beacon: offset for the counter within the beacon (tail) 697 * @counter_offsets_beacon: offsets of the counters within the beacon (tail)
686 * @counter_offset_presp: offset for the counter within the probe response 698 * @counter_offsets_presp: offsets of the counters within the probe response
699 * @n_counter_offsets_beacon: number of csa counters the beacon (tail)
700 * @n_counter_offsets_presp: number of csa counters in the probe response
687 * @beacon_after: beacon data to be used on the new channel 701 * @beacon_after: beacon data to be used on the new channel
688 * @radar_required: whether radar detection is required on the new channel 702 * @radar_required: whether radar detection is required on the new channel
689 * @block_tx: whether transmissions should be blocked while changing 703 * @block_tx: whether transmissions should be blocked while changing
@@ -692,7 +706,10 @@ struct cfg80211_ap_settings {
692struct cfg80211_csa_settings { 706struct cfg80211_csa_settings {
693 struct cfg80211_chan_def chandef; 707 struct cfg80211_chan_def chandef;
694 struct cfg80211_beacon_data beacon_csa; 708 struct cfg80211_beacon_data beacon_csa;
695 u16 counter_offset_beacon, counter_offset_presp; 709 const u16 *counter_offsets_beacon;
710 const u16 *counter_offsets_presp;
711 unsigned int n_counter_offsets_beacon;
712 unsigned int n_counter_offsets_presp;
696 struct cfg80211_beacon_data beacon_after; 713 struct cfg80211_beacon_data beacon_after;
697 bool radar_required; 714 bool radar_required;
698 bool block_tx; 715 bool block_tx;
@@ -856,36 +873,38 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
856 * @STATION_INFO_NONPEER_PM: @nonpeer_pm filled 873 * @STATION_INFO_NONPEER_PM: @nonpeer_pm filled
857 * @STATION_INFO_CHAIN_SIGNAL: @chain_signal filled 874 * @STATION_INFO_CHAIN_SIGNAL: @chain_signal filled
858 * @STATION_INFO_CHAIN_SIGNAL_AVG: @chain_signal_avg filled 875 * @STATION_INFO_CHAIN_SIGNAL_AVG: @chain_signal_avg filled
876 * @STATION_INFO_EXPECTED_THROUGHPUT: @expected_throughput filled
859 */ 877 */
860enum station_info_flags { 878enum station_info_flags {
861 STATION_INFO_INACTIVE_TIME = 1<<0, 879 STATION_INFO_INACTIVE_TIME = BIT(0),
862 STATION_INFO_RX_BYTES = 1<<1, 880 STATION_INFO_RX_BYTES = BIT(1),
863 STATION_INFO_TX_BYTES = 1<<2, 881 STATION_INFO_TX_BYTES = BIT(2),
864 STATION_INFO_LLID = 1<<3, 882 STATION_INFO_LLID = BIT(3),
865 STATION_INFO_PLID = 1<<4, 883 STATION_INFO_PLID = BIT(4),
866 STATION_INFO_PLINK_STATE = 1<<5, 884 STATION_INFO_PLINK_STATE = BIT(5),
867 STATION_INFO_SIGNAL = 1<<6, 885 STATION_INFO_SIGNAL = BIT(6),
868 STATION_INFO_TX_BITRATE = 1<<7, 886 STATION_INFO_TX_BITRATE = BIT(7),
869 STATION_INFO_RX_PACKETS = 1<<8, 887 STATION_INFO_RX_PACKETS = BIT(8),
870 STATION_INFO_TX_PACKETS = 1<<9, 888 STATION_INFO_TX_PACKETS = BIT(9),
871 STATION_INFO_TX_RETRIES = 1<<10, 889 STATION_INFO_TX_RETRIES = BIT(10),
872 STATION_INFO_TX_FAILED = 1<<11, 890 STATION_INFO_TX_FAILED = BIT(11),
873 STATION_INFO_RX_DROP_MISC = 1<<12, 891 STATION_INFO_RX_DROP_MISC = BIT(12),
874 STATION_INFO_SIGNAL_AVG = 1<<13, 892 STATION_INFO_SIGNAL_AVG = BIT(13),
875 STATION_INFO_RX_BITRATE = 1<<14, 893 STATION_INFO_RX_BITRATE = BIT(14),
876 STATION_INFO_BSS_PARAM = 1<<15, 894 STATION_INFO_BSS_PARAM = BIT(15),
877 STATION_INFO_CONNECTED_TIME = 1<<16, 895 STATION_INFO_CONNECTED_TIME = BIT(16),
878 STATION_INFO_ASSOC_REQ_IES = 1<<17, 896 STATION_INFO_ASSOC_REQ_IES = BIT(17),
879 STATION_INFO_STA_FLAGS = 1<<18, 897 STATION_INFO_STA_FLAGS = BIT(18),
880 STATION_INFO_BEACON_LOSS_COUNT = 1<<19, 898 STATION_INFO_BEACON_LOSS_COUNT = BIT(19),
881 STATION_INFO_T_OFFSET = 1<<20, 899 STATION_INFO_T_OFFSET = BIT(20),
882 STATION_INFO_LOCAL_PM = 1<<21, 900 STATION_INFO_LOCAL_PM = BIT(21),
883 STATION_INFO_PEER_PM = 1<<22, 901 STATION_INFO_PEER_PM = BIT(22),
884 STATION_INFO_NONPEER_PM = 1<<23, 902 STATION_INFO_NONPEER_PM = BIT(23),
885 STATION_INFO_RX_BYTES64 = 1<<24, 903 STATION_INFO_RX_BYTES64 = BIT(24),
886 STATION_INFO_TX_BYTES64 = 1<<25, 904 STATION_INFO_TX_BYTES64 = BIT(25),
887 STATION_INFO_CHAIN_SIGNAL = 1<<26, 905 STATION_INFO_CHAIN_SIGNAL = BIT(26),
888 STATION_INFO_CHAIN_SIGNAL_AVG = 1<<27, 906 STATION_INFO_CHAIN_SIGNAL_AVG = BIT(27),
907 STATION_INFO_EXPECTED_THROUGHPUT = BIT(28),
889}; 908};
890 909
891/** 910/**
@@ -1007,6 +1026,8 @@ struct sta_bss_parameters {
1007 * @local_pm: local mesh STA power save mode 1026 * @local_pm: local mesh STA power save mode
1008 * @peer_pm: peer mesh STA power save mode 1027 * @peer_pm: peer mesh STA power save mode
1009 * @nonpeer_pm: non-peer mesh STA power save mode 1028 * @nonpeer_pm: non-peer mesh STA power save mode
1029 * @expected_throughput: expected throughput in kbps (including 802.11 headers)
1030 * towards this station.
1010 */ 1031 */
1011struct station_info { 1032struct station_info {
1012 u32 filled; 1033 u32 filled;
@@ -1045,6 +1066,8 @@ struct station_info {
1045 enum nl80211_mesh_power_mode peer_pm; 1066 enum nl80211_mesh_power_mode peer_pm;
1046 enum nl80211_mesh_power_mode nonpeer_pm; 1067 enum nl80211_mesh_power_mode nonpeer_pm;
1047 1068
1069 u32 expected_throughput;
1070
1048 /* 1071 /*
1049 * Note: Add a new enum station_info_flags value for each new field and 1072 * Note: Add a new enum station_info_flags value for each new field and
1050 * use it to check which fields are initialized. 1073 * use it to check which fields are initialized.
@@ -1052,6 +1075,19 @@ struct station_info {
1052}; 1075};
1053 1076
1054/** 1077/**
1078 * cfg80211_get_station - retrieve information about a given station
1079 * @dev: the device where the station is supposed to be connected to
1080 * @mac_addr: the mac address of the station of interest
1081 * @sinfo: pointer to the structure to fill with the information
1082 *
1083 * Returns 0 on success and sinfo is filled with the available information
1084 * otherwise returns a negative error code and the content of sinfo has to be
1085 * considered undefined.
1086 */
1087int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
1088 struct station_info *sinfo);
1089
1090/**
1055 * enum monitor_flags - monitor flags 1091 * enum monitor_flags - monitor flags
1056 * 1092 *
1057 * Monitor interface configuration flags. Note that these must be the bits 1093 * Monitor interface configuration flags. Note that these must be the bits
@@ -1152,7 +1188,7 @@ struct bss_parameters {
1152 int use_cts_prot; 1188 int use_cts_prot;
1153 int use_short_preamble; 1189 int use_short_preamble;
1154 int use_short_slot_time; 1190 int use_short_slot_time;
1155 u8 *basic_rates; 1191 const u8 *basic_rates;
1156 u8 basic_rates_len; 1192 u8 basic_rates_len;
1157 int ap_isolate; 1193 int ap_isolate;
1158 int ht_opmode; 1194 int ht_opmode;
@@ -1682,10 +1718,10 @@ struct cfg80211_disassoc_request {
1682 * @ht_capa_mask: The bits of ht_capa which are to be used. 1718 * @ht_capa_mask: The bits of ht_capa which are to be used.
1683 */ 1719 */
1684struct cfg80211_ibss_params { 1720struct cfg80211_ibss_params {
1685 u8 *ssid; 1721 const u8 *ssid;
1686 u8 *bssid; 1722 const u8 *bssid;
1687 struct cfg80211_chan_def chandef; 1723 struct cfg80211_chan_def chandef;
1688 u8 *ie; 1724 const u8 *ie;
1689 u8 ssid_len, ie_len; 1725 u8 ssid_len, ie_len;
1690 u16 beacon_interval; 1726 u16 beacon_interval;
1691 u32 basic_rates; 1727 u32 basic_rates;
@@ -1794,8 +1830,8 @@ struct cfg80211_bitrate_mask {
1794 * @pmkid: The PMK material itself. 1830 * @pmkid: The PMK material itself.
1795 */ 1831 */
1796struct cfg80211_pmksa { 1832struct cfg80211_pmksa {
1797 u8 *bssid; 1833 const u8 *bssid;
1798 u8 *pmkid; 1834 const u8 *pmkid;
1799}; 1835};
1800 1836
1801/** 1837/**
@@ -1810,7 +1846,7 @@ struct cfg80211_pmksa {
1810 * memory, free @mask only! 1846 * memory, free @mask only!
1811 */ 1847 */
1812struct cfg80211_pkt_pattern { 1848struct cfg80211_pkt_pattern {
1813 u8 *mask, *pattern; 1849 const u8 *mask, *pattern;
1814 int pattern_len; 1850 int pattern_len;
1815 int pkt_offset; 1851 int pkt_offset;
1816}; 1852};
@@ -1974,6 +2010,8 @@ struct cfg80211_update_ft_ies_params {
1974 * @len: buffer length 2010 * @len: buffer length
1975 * @no_cck: don't use cck rates for this frame 2011 * @no_cck: don't use cck rates for this frame
1976 * @dont_wait_for_ack: tells the low level not to wait for an ack 2012 * @dont_wait_for_ack: tells the low level not to wait for an ack
2013 * @n_csa_offsets: length of csa_offsets array
2014 * @csa_offsets: array of all the csa offsets in the frame
1977 */ 2015 */
1978struct cfg80211_mgmt_tx_params { 2016struct cfg80211_mgmt_tx_params {
1979 struct ieee80211_channel *chan; 2017 struct ieee80211_channel *chan;
@@ -1983,6 +2021,8 @@ struct cfg80211_mgmt_tx_params {
1983 size_t len; 2021 size_t len;
1984 bool no_cck; 2022 bool no_cck;
1985 bool dont_wait_for_ack; 2023 bool dont_wait_for_ack;
2024 int n_csa_offsets;
2025 const u16 *csa_offsets;
1986}; 2026};
1987 2027
1988/** 2028/**
@@ -2278,6 +2318,10 @@ struct cfg80211_qos_map {
2278 * @channel_switch: initiate channel-switch procedure (with CSA) 2318 * @channel_switch: initiate channel-switch procedure (with CSA)
2279 * 2319 *
2280 * @set_qos_map: Set QoS mapping information to the driver 2320 * @set_qos_map: Set QoS mapping information to the driver
2321 *
2322 * @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the
2323 * given interface This is used e.g. for dynamic HT 20/40 MHz channel width
2324 * changes during the lifetime of the BSS.
2281 */ 2325 */
2282struct cfg80211_ops { 2326struct cfg80211_ops {
2283 int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); 2327 int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2320,28 +2364,29 @@ struct cfg80211_ops {
2320 2364
2321 2365
2322 int (*add_station)(struct wiphy *wiphy, struct net_device *dev, 2366 int (*add_station)(struct wiphy *wiphy, struct net_device *dev,
2323 u8 *mac, struct station_parameters *params); 2367 const u8 *mac,
2368 struct station_parameters *params);
2324 int (*del_station)(struct wiphy *wiphy, struct net_device *dev, 2369 int (*del_station)(struct wiphy *wiphy, struct net_device *dev,
2325 u8 *mac); 2370 const u8 *mac);
2326 int (*change_station)(struct wiphy *wiphy, struct net_device *dev, 2371 int (*change_station)(struct wiphy *wiphy, struct net_device *dev,
2327 u8 *mac, struct station_parameters *params); 2372 const u8 *mac,
2373 struct station_parameters *params);
2328 int (*get_station)(struct wiphy *wiphy, struct net_device *dev, 2374 int (*get_station)(struct wiphy *wiphy, struct net_device *dev,
2329 u8 *mac, struct station_info *sinfo); 2375 const u8 *mac, struct station_info *sinfo);
2330 int (*dump_station)(struct wiphy *wiphy, struct net_device *dev, 2376 int (*dump_station)(struct wiphy *wiphy, struct net_device *dev,
2331 int idx, u8 *mac, struct station_info *sinfo); 2377 int idx, u8 *mac, struct station_info *sinfo);
2332 2378
2333 int (*add_mpath)(struct wiphy *wiphy, struct net_device *dev, 2379 int (*add_mpath)(struct wiphy *wiphy, struct net_device *dev,
2334 u8 *dst, u8 *next_hop); 2380 const u8 *dst, const u8 *next_hop);
2335 int (*del_mpath)(struct wiphy *wiphy, struct net_device *dev, 2381 int (*del_mpath)(struct wiphy *wiphy, struct net_device *dev,
2336 u8 *dst); 2382 const u8 *dst);
2337 int (*change_mpath)(struct wiphy *wiphy, struct net_device *dev, 2383 int (*change_mpath)(struct wiphy *wiphy, struct net_device *dev,
2338 u8 *dst, u8 *next_hop); 2384 const u8 *dst, const u8 *next_hop);
2339 int (*get_mpath)(struct wiphy *wiphy, struct net_device *dev, 2385 int (*get_mpath)(struct wiphy *wiphy, struct net_device *dev,
2340 u8 *dst, u8 *next_hop, 2386 u8 *dst, u8 *next_hop, struct mpath_info *pinfo);
2341 struct mpath_info *pinfo);
2342 int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev, 2387 int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev,
2343 int idx, u8 *dst, u8 *next_hop, 2388 int idx, u8 *dst, u8 *next_hop,
2344 struct mpath_info *pinfo); 2389 struct mpath_info *pinfo);
2345 int (*get_mesh_config)(struct wiphy *wiphy, 2390 int (*get_mesh_config)(struct wiphy *wiphy,
2346 struct net_device *dev, 2391 struct net_device *dev,
2347 struct mesh_config *conf); 2392 struct mesh_config *conf);
@@ -2471,11 +2516,11 @@ struct cfg80211_ops {
2471 struct cfg80211_gtk_rekey_data *data); 2516 struct cfg80211_gtk_rekey_data *data);
2472 2517
2473 int (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev, 2518 int (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev,
2474 u8 *peer, u8 action_code, u8 dialog_token, 2519 const u8 *peer, u8 action_code, u8 dialog_token,
2475 u16 status_code, u32 peer_capability, 2520 u16 status_code, u32 peer_capability,
2476 const u8 *buf, size_t len); 2521 const u8 *buf, size_t len);
2477 int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev, 2522 int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
2478 u8 *peer, enum nl80211_tdls_operation oper); 2523 const u8 *peer, enum nl80211_tdls_operation oper);
2479 2524
2480 int (*probe_client)(struct wiphy *wiphy, struct net_device *dev, 2525 int (*probe_client)(struct wiphy *wiphy, struct net_device *dev,
2481 const u8 *peer, u64 *cookie); 2526 const u8 *peer, u64 *cookie);
@@ -2521,9 +2566,13 @@ struct cfg80211_ops {
2521 int (*channel_switch)(struct wiphy *wiphy, 2566 int (*channel_switch)(struct wiphy *wiphy,
2522 struct net_device *dev, 2567 struct net_device *dev,
2523 struct cfg80211_csa_settings *params); 2568 struct cfg80211_csa_settings *params);
2569
2524 int (*set_qos_map)(struct wiphy *wiphy, 2570 int (*set_qos_map)(struct wiphy *wiphy,
2525 struct net_device *dev, 2571 struct net_device *dev,
2526 struct cfg80211_qos_map *qos_map); 2572 struct cfg80211_qos_map *qos_map);
2573
2574 int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev,
2575 struct cfg80211_chan_def *chandef);
2527}; 2576};
2528 2577
2529/* 2578/*
@@ -2618,6 +2667,7 @@ struct ieee80211_iface_limit {
2618 * between infrastructure and AP types must match. This is required 2667 * between infrastructure and AP types must match. This is required
2619 * only in special cases. 2668 * only in special cases.
2620 * @radar_detect_widths: bitmap of channel widths supported for radar detection 2669 * @radar_detect_widths: bitmap of channel widths supported for radar detection
2670 * @radar_detect_regions: bitmap of regions supported for radar detection
2621 * 2671 *
2622 * With this structure the driver can describe which interface 2672 * With this structure the driver can describe which interface
2623 * combinations it supports concurrently. 2673 * combinations it supports concurrently.
@@ -2675,6 +2725,7 @@ struct ieee80211_iface_combination {
2675 u8 n_limits; 2725 u8 n_limits;
2676 bool beacon_int_infra_match; 2726 bool beacon_int_infra_match;
2677 u8 radar_detect_widths; 2727 u8 radar_detect_widths;
2728 u8 radar_detect_regions;
2678}; 2729};
2679 2730
2680struct ieee80211_txrx_stypes { 2731struct ieee80211_txrx_stypes {
@@ -2905,6 +2956,17 @@ struct wiphy_vendor_command {
2905 * (including P2P GO) or 0 to indicate no such limit is advertised. The 2956 * (including P2P GO) or 0 to indicate no such limit is advertised. The
2906 * driver is allowed to advertise a theoretical limit that it can reach in 2957 * driver is allowed to advertise a theoretical limit that it can reach in
2907 * some cases, but may not always reach. 2958 * some cases, but may not always reach.
2959 *
2960 * @max_num_csa_counters: Number of supported csa_counters in beacons
2961 * and probe responses. This value should be set if the driver
2962 * wishes to limit the number of csa counters. Default (0) means
2963 * infinite.
2964 * @max_adj_channel_rssi_comp: max offset of between the channel on which the
2965 * frame was sent and the channel on which the frame was heard for which
2966 * the reported rssi is still valid. If a driver is able to compensate the
2967 * low rssi when a frame is heard on different channel, then it should set
2968 * this variable to the maximal offset for which it can compensate.
2969 * This value should be set in MHz.
2908 */ 2970 */
2909struct wiphy { 2971struct wiphy {
2910 /* assign these fields before you register the wiphy */ 2972 /* assign these fields before you register the wiphy */
@@ -3022,6 +3084,9 @@ struct wiphy {
3022 3084
3023 u16 max_ap_assoc_sta; 3085 u16 max_ap_assoc_sta;
3024 3086
3087 u8 max_num_csa_counters;
3088 u8 max_adj_channel_rssi_comp;
3089
3025 char priv[0] __aligned(NETDEV_ALIGN); 3090 char priv[0] __aligned(NETDEV_ALIGN);
3026}; 3091};
3027 3092
@@ -3194,6 +3259,7 @@ struct cfg80211_cached_keys;
3194 * @ibss_dfs_possible: (private) IBSS may change to a DFS channel 3259 * @ibss_dfs_possible: (private) IBSS may change to a DFS channel
3195 * @event_list: (private) list for internal event processing 3260 * @event_list: (private) list for internal event processing
3196 * @event_lock: (private) lock for event list 3261 * @event_lock: (private) lock for event list
3262 * @owner_nlportid: (private) owner socket port ID
3197 */ 3263 */
3198struct wireless_dev { 3264struct wireless_dev {
3199 struct wiphy *wiphy; 3265 struct wiphy *wiphy;
@@ -3241,13 +3307,15 @@ struct wireless_dev {
3241 unsigned long cac_start_time; 3307 unsigned long cac_start_time;
3242 unsigned int cac_time_ms; 3308 unsigned int cac_time_ms;
3243 3309
3310 u32 owner_nlportid;
3311
3244#ifdef CONFIG_CFG80211_WEXT 3312#ifdef CONFIG_CFG80211_WEXT
3245 /* wext data */ 3313 /* wext data */
3246 struct { 3314 struct {
3247 struct cfg80211_ibss_params ibss; 3315 struct cfg80211_ibss_params ibss;
3248 struct cfg80211_connect_params connect; 3316 struct cfg80211_connect_params connect;
3249 struct cfg80211_cached_keys *keys; 3317 struct cfg80211_cached_keys *keys;
3250 u8 *ie; 3318 const u8 *ie;
3251 size_t ie_len; 3319 size_t ie_len;
3252 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; 3320 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
3253 u8 ssid[IEEE80211_MAX_SSID_LEN]; 3321 u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -3488,7 +3556,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
3488 * Return: 0 on success, or a negative error code. 3556 * Return: 0 on success, or a negative error code.
3489 */ 3557 */
3490int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, 3558int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
3491 enum nl80211_iftype iftype, u8 *bssid, bool qos); 3559 enum nl80211_iftype iftype, const u8 *bssid,
3560 bool qos);
3492 3561
3493/** 3562/**
3494 * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame 3563 * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
@@ -3600,7 +3669,7 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
3600 * default channel settings will be disregarded. If no rule is found for a 3669 * default channel settings will be disregarded. If no rule is found for a
3601 * channel on the regulatory domain the channel will be disabled. 3670 * channel on the regulatory domain the channel will be disabled.
3602 * Drivers using this for a wiphy should also set the wiphy flag 3671 * Drivers using this for a wiphy should also set the wiphy flag
3603 * WIPHY_FLAG_CUSTOM_REGULATORY or cfg80211 will set it for the wiphy 3672 * REGULATORY_CUSTOM_REG or cfg80211 will set it for the wiphy
3604 * that called this helper. 3673 * that called this helper.
3605 */ 3674 */
3606void wiphy_apply_custom_regulatory(struct wiphy *wiphy, 3675void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
@@ -4289,7 +4358,7 @@ void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss,
4289 * and not try to connect to any AP any more. 4358 * and not try to connect to any AP any more.
4290 */ 4359 */
4291void cfg80211_disconnected(struct net_device *dev, u16 reason, 4360void cfg80211_disconnected(struct net_device *dev, u16 reason,
4292 u8 *ie, size_t ie_len, gfp_t gfp); 4361 const u8 *ie, size_t ie_len, gfp_t gfp);
4293 4362
4294/** 4363/**
4295 * cfg80211_ready_on_channel - notification of remain_on_channel start 4364 * cfg80211_ready_on_channel - notification of remain_on_channel start
@@ -4543,12 +4612,14 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
4543 * cfg80211_reg_can_beacon - check if beaconing is allowed 4612 * cfg80211_reg_can_beacon - check if beaconing is allowed
4544 * @wiphy: the wiphy 4613 * @wiphy: the wiphy
4545 * @chandef: the channel definition 4614 * @chandef: the channel definition
4615 * @iftype: interface type
4546 * 4616 *
4547 * Return: %true if there is no secondary channel or the secondary channel(s) 4617 * Return: %true if there is no secondary channel or the secondary channel(s)
4548 * can be used for beaconing (i.e. is not a radar channel etc.) 4618 * can be used for beaconing (i.e. is not a radar channel etc.)
4549 */ 4619 */
4550bool cfg80211_reg_can_beacon(struct wiphy *wiphy, 4620bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
4551 struct cfg80211_chan_def *chandef); 4621 struct cfg80211_chan_def *chandef,
4622 enum nl80211_iftype iftype);
4552 4623
4553/* 4624/*
4554 * cfg80211_ch_switch_notify - update wdev channel and notify userspace 4625 * cfg80211_ch_switch_notify - update wdev channel and notify userspace
@@ -4694,6 +4765,84 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp);
4694 */ 4765 */
4695unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy); 4766unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy);
4696 4767
4768/**
4769 * cfg80211_check_combinations - check interface combinations
4770 *
4771 * @wiphy: the wiphy
4772 * @num_different_channels: the number of different channels we want
4773 * to use for verification
4774 * @radar_detect: a bitmap where each bit corresponds to a channel
4775 * width where radar detection is needed, as in the definition of
4776 * &struct ieee80211_iface_combination.@radar_detect_widths
4777 * @iftype_num: array with the numbers of interfaces of each interface
4778 * type. The index is the interface type as specified in &enum
4779 * nl80211_iftype.
4780 *
4781 * This function can be called by the driver to check whether a
4782 * combination of interfaces and their types are allowed according to
4783 * the interface combinations.
4784 */
4785int cfg80211_check_combinations(struct wiphy *wiphy,
4786 const int num_different_channels,
4787 const u8 radar_detect,
4788 const int iftype_num[NUM_NL80211_IFTYPES]);
4789
4790/**
4791 * cfg80211_iter_combinations - iterate over matching combinations
4792 *
4793 * @wiphy: the wiphy
4794 * @num_different_channels: the number of different channels we want
4795 * to use for verification
4796 * @radar_detect: a bitmap where each bit corresponds to a channel
4797 * width where radar detection is needed, as in the definition of
4798 * &struct ieee80211_iface_combination.@radar_detect_widths
4799 * @iftype_num: array with the numbers of interfaces of each interface
4800 * type. The index is the interface type as specified in &enum
4801 * nl80211_iftype.
4802 * @iter: function to call for each matching combination
4803 * @data: pointer to pass to iter function
4804 *
4805 * This function can be called by the driver to check what possible
4806 * combinations it fits in at a given moment, e.g. for channel switching
4807 * purposes.
4808 */
4809int cfg80211_iter_combinations(struct wiphy *wiphy,
4810 const int num_different_channels,
4811 const u8 radar_detect,
4812 const int iftype_num[NUM_NL80211_IFTYPES],
4813 void (*iter)(const struct ieee80211_iface_combination *c,
4814 void *data),
4815 void *data);
4816
4817/*
4818 * cfg80211_stop_iface - trigger interface disconnection
4819 *
4820 * @wiphy: the wiphy
4821 * @wdev: wireless device
4822 * @gfp: context flags
4823 *
4824 * Trigger interface to be stopped as if AP was stopped, IBSS/mesh left, STA
4825 * disconnected.
4826 *
4827 * Note: This doesn't need any locks and is asynchronous.
4828 */
4829void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
4830 gfp_t gfp);
4831
4832/**
4833 * cfg80211_shutdown_all_interfaces - shut down all interfaces for a wiphy
4834 * @wiphy: the wiphy to shut down
4835 *
4836 * This function shuts down all interfaces belonging to this wiphy by
4837 * calling dev_close() (and treating non-netdev interfaces as needed).
4838 * It shouldn't really be used unless there are some fatal device errors
4839 * that really can't be recovered in any other way.
4840 *
4841 * Callers must hold the RTNL and be able to deal with callbacks into
4842 * the driver while the function is running.
4843 */
4844void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy);
4845
4697/* Logging, debugging and troubleshooting/diagnostic helpers. */ 4846/* Logging, debugging and troubleshooting/diagnostic helpers. */
4698 4847
4699/* wiphy_printk helpers, similar to dev_printk */ 4848/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/checksum.h b/include/net/checksum.h
index a28f4e0f6251..87cb1903640d 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -57,12 +57,14 @@ static __inline__ __wsum csum_and_copy_to_user
57} 57}
58#endif 58#endif
59 59
60#ifndef HAVE_ARCH_CSUM_ADD
60static inline __wsum csum_add(__wsum csum, __wsum addend) 61static inline __wsum csum_add(__wsum csum, __wsum addend)
61{ 62{
62 u32 res = (__force u32)csum; 63 u32 res = (__force u32)csum;
63 res += (__force u32)addend; 64 res += (__force u32)addend;
64 return (__force __wsum)(res + (res < (__force u32)addend)); 65 return (__force __wsum)(res + (res < (__force u32)addend));
65} 66}
67#endif
66 68
67static inline __wsum csum_sub(__wsum csum, __wsum addend) 69static inline __wsum csum_sub(__wsum csum, __wsum addend)
68{ 70{
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 7828ebf99ee1..6efce384451e 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -181,6 +181,11 @@ struct dsa_switch_driver {
181void register_switch_driver(struct dsa_switch_driver *type); 181void register_switch_driver(struct dsa_switch_driver *type);
182void unregister_switch_driver(struct dsa_switch_driver *type); 182void unregister_switch_driver(struct dsa_switch_driver *type);
183 183
184static inline void *ds_to_priv(struct dsa_switch *ds)
185{
186 return (void *)(ds + 1);
187}
188
184/* 189/*
185 * The original DSA tag format and some other tag formats have no 190 * The original DSA tag format and some other tag formats have no
186 * ethertype, which means that we need to add a little hack to the 191 * ethertype, which means that we need to add a little hack to the
diff --git a/include/net/gre.h b/include/net/gre.h
index 70046a0b0b89..b53182018743 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -37,9 +37,10 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
37 int hdr_len); 37 int hdr_len);
38 38
39static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb, 39static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
40 bool gre_csum) 40 bool csum)
41{ 41{
42 return iptunnel_handle_offloads(skb, gre_csum, SKB_GSO_GRE); 42 return iptunnel_handle_offloads(skb, csum,
43 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
43} 44}
44 45
45 46
diff --git a/include/net/ieee802154.h b/include/net/ieee802154.h
index c7ae0ac528dc..0aa7122e8f15 100644
--- a/include/net/ieee802154.h
+++ b/include/net/ieee802154.h
@@ -79,6 +79,15 @@
79#define IEEE802154_SCF_KEY_SHORT_INDEX 2 79#define IEEE802154_SCF_KEY_SHORT_INDEX 2
80#define IEEE802154_SCF_KEY_HW_INDEX 3 80#define IEEE802154_SCF_KEY_HW_INDEX 3
81 81
82#define IEEE802154_SCF_SECLEVEL_NONE 0
83#define IEEE802154_SCF_SECLEVEL_MIC32 1
84#define IEEE802154_SCF_SECLEVEL_MIC64 2
85#define IEEE802154_SCF_SECLEVEL_MIC128 3
86#define IEEE802154_SCF_SECLEVEL_ENC 4
87#define IEEE802154_SCF_SECLEVEL_ENC_MIC32 5
88#define IEEE802154_SCF_SECLEVEL_ENC_MIC64 6
89#define IEEE802154_SCF_SECLEVEL_ENC_MIC128 7
90
82/* MAC footer size */ 91/* MAC footer size */
83#define IEEE802154_MFR_SIZE 2 /* 2 octets */ 92#define IEEE802154_MFR_SIZE 2 /* 2 octets */
84 93
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 5a719ca892f4..3b53c8e405e4 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -27,6 +27,7 @@
27#ifndef IEEE802154_NETDEVICE_H 27#ifndef IEEE802154_NETDEVICE_H
28#define IEEE802154_NETDEVICE_H 28#define IEEE802154_NETDEVICE_H
29 29
30#include <net/ieee802154.h>
30#include <net/af_ieee802154.h> 31#include <net/af_ieee802154.h>
31#include <linux/netdevice.h> 32#include <linux/netdevice.h>
32#include <linux/skbuff.h> 33#include <linux/skbuff.h>
@@ -114,6 +115,34 @@ int ieee802154_hdr_pull(struct sk_buff *skb, struct ieee802154_hdr *hdr);
114int ieee802154_hdr_peek_addrs(const struct sk_buff *skb, 115int ieee802154_hdr_peek_addrs(const struct sk_buff *skb,
115 struct ieee802154_hdr *hdr); 116 struct ieee802154_hdr *hdr);
116 117
118/* parses the full 802.15.4 header a given skb and stores them into hdr,
119 * performing pan id decompression and length checks to be suitable for use in
120 * header_ops.parse
121 */
122int ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr);
123
124int ieee802154_max_payload(const struct ieee802154_hdr *hdr);
125
126static inline int
127ieee802154_sechdr_authtag_len(const struct ieee802154_sechdr *sec)
128{
129 switch (sec->level) {
130 case IEEE802154_SCF_SECLEVEL_MIC32:
131 case IEEE802154_SCF_SECLEVEL_ENC_MIC32:
132 return 4;
133 case IEEE802154_SCF_SECLEVEL_MIC64:
134 case IEEE802154_SCF_SECLEVEL_ENC_MIC64:
135 return 8;
136 case IEEE802154_SCF_SECLEVEL_MIC128:
137 case IEEE802154_SCF_SECLEVEL_ENC_MIC128:
138 return 16;
139 case IEEE802154_SCF_SECLEVEL_NONE:
140 case IEEE802154_SCF_SECLEVEL_ENC:
141 default:
142 return 0;
143 }
144}
145
117static inline int ieee802154_hdr_length(struct sk_buff *skb) 146static inline int ieee802154_hdr_length(struct sk_buff *skb)
118{ 147{
119 struct ieee802154_hdr hdr; 148 struct ieee802154_hdr hdr;
@@ -193,8 +222,12 @@ static inline void ieee802154_addr_to_sa(struct ieee802154_addr_sa *sa,
193 */ 222 */
194struct ieee802154_mac_cb { 223struct ieee802154_mac_cb {
195 u8 lqi; 224 u8 lqi;
196 u8 flags; 225 u8 type;
197 u8 seq; 226 bool ackreq;
227 bool secen;
228 bool secen_override;
229 u8 seclevel;
230 bool seclevel_override;
198 struct ieee802154_addr source; 231 struct ieee802154_addr source;
199 struct ieee802154_addr dest; 232 struct ieee802154_addr dest;
200}; 233};
@@ -204,25 +237,96 @@ static inline struct ieee802154_mac_cb *mac_cb(struct sk_buff *skb)
204 return (struct ieee802154_mac_cb *)skb->cb; 237 return (struct ieee802154_mac_cb *)skb->cb;
205} 238}
206 239
207#define MAC_CB_FLAG_TYPEMASK ((1 << 3) - 1) 240static inline struct ieee802154_mac_cb *mac_cb_init(struct sk_buff *skb)
208
209#define MAC_CB_FLAG_ACKREQ (1 << 3)
210#define MAC_CB_FLAG_SECEN (1 << 4)
211
212static inline bool mac_cb_is_ackreq(struct sk_buff *skb)
213{ 241{
214 return mac_cb(skb)->flags & MAC_CB_FLAG_ACKREQ; 242 BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
215}
216 243
217static inline bool mac_cb_is_secen(struct sk_buff *skb) 244 memset(skb->cb, 0, sizeof(struct ieee802154_mac_cb));
218{ 245 return mac_cb(skb);
219 return mac_cb(skb)->flags & MAC_CB_FLAG_SECEN;
220} 246}
221 247
222static inline int mac_cb_type(struct sk_buff *skb) 248#define IEEE802154_LLSEC_KEY_SIZE 16
223{ 249
224 return mac_cb(skb)->flags & MAC_CB_FLAG_TYPEMASK; 250struct ieee802154_llsec_key_id {
225} 251 u8 mode;
252 u8 id;
253 union {
254 struct ieee802154_addr device_addr;
255 __le32 short_source;
256 __le64 extended_source;
257 };
258};
259
260struct ieee802154_llsec_key {
261 u8 frame_types;
262 u32 cmd_frame_ids;
263 u8 key[IEEE802154_LLSEC_KEY_SIZE];
264};
265
266struct ieee802154_llsec_key_entry {
267 struct list_head list;
268
269 struct ieee802154_llsec_key_id id;
270 struct ieee802154_llsec_key *key;
271};
272
273struct ieee802154_llsec_device_key {
274 struct list_head list;
275
276 struct ieee802154_llsec_key_id key_id;
277 u32 frame_counter;
278};
279
280enum {
281 IEEE802154_LLSEC_DEVKEY_IGNORE,
282 IEEE802154_LLSEC_DEVKEY_RESTRICT,
283 IEEE802154_LLSEC_DEVKEY_RECORD,
284
285 __IEEE802154_LLSEC_DEVKEY_MAX,
286};
287
288struct ieee802154_llsec_device {
289 struct list_head list;
290
291 __le16 pan_id;
292 __le16 short_addr;
293 __le64 hwaddr;
294 u32 frame_counter;
295 bool seclevel_exempt;
296
297 u8 key_mode;
298 struct list_head keys;
299};
300
301struct ieee802154_llsec_seclevel {
302 struct list_head list;
303
304 u8 frame_type;
305 u8 cmd_frame_id;
306 bool device_override;
307 u32 sec_levels;
308};
309
310struct ieee802154_llsec_params {
311 bool enabled;
312
313 __be32 frame_counter;
314 u8 out_level;
315 struct ieee802154_llsec_key_id out_key;
316
317 __le64 default_key_source;
318
319 __le16 pan_id;
320 __le64 hwaddr;
321 __le64 coord_hwaddr;
322 __le16 coord_shortaddr;
323};
324
325struct ieee802154_llsec_table {
326 struct list_head keys;
327 struct list_head devices;
328 struct list_head security_levels;
329};
226 330
227#define IEEE802154_MAC_SCAN_ED 0 331#define IEEE802154_MAC_SCAN_ED 0
228#define IEEE802154_MAC_SCAN_ACTIVE 1 332#define IEEE802154_MAC_SCAN_ACTIVE 1
@@ -242,6 +346,53 @@ struct ieee802154_mac_params {
242}; 346};
243 347
244struct wpan_phy; 348struct wpan_phy;
349
350enum {
351 IEEE802154_LLSEC_PARAM_ENABLED = 1 << 0,
352 IEEE802154_LLSEC_PARAM_FRAME_COUNTER = 1 << 1,
353 IEEE802154_LLSEC_PARAM_OUT_LEVEL = 1 << 2,
354 IEEE802154_LLSEC_PARAM_OUT_KEY = 1 << 3,
355 IEEE802154_LLSEC_PARAM_KEY_SOURCE = 1 << 4,
356 IEEE802154_LLSEC_PARAM_PAN_ID = 1 << 5,
357 IEEE802154_LLSEC_PARAM_HWADDR = 1 << 6,
358 IEEE802154_LLSEC_PARAM_COORD_HWADDR = 1 << 7,
359 IEEE802154_LLSEC_PARAM_COORD_SHORTADDR = 1 << 8,
360};
361
362struct ieee802154_llsec_ops {
363 int (*get_params)(struct net_device *dev,
364 struct ieee802154_llsec_params *params);
365 int (*set_params)(struct net_device *dev,
366 const struct ieee802154_llsec_params *params,
367 int changed);
368
369 int (*add_key)(struct net_device *dev,
370 const struct ieee802154_llsec_key_id *id,
371 const struct ieee802154_llsec_key *key);
372 int (*del_key)(struct net_device *dev,
373 const struct ieee802154_llsec_key_id *id);
374
375 int (*add_dev)(struct net_device *dev,
376 const struct ieee802154_llsec_device *llsec_dev);
377 int (*del_dev)(struct net_device *dev, __le64 dev_addr);
378
379 int (*add_devkey)(struct net_device *dev,
380 __le64 device_addr,
381 const struct ieee802154_llsec_device_key *key);
382 int (*del_devkey)(struct net_device *dev,
383 __le64 device_addr,
384 const struct ieee802154_llsec_device_key *key);
385
386 int (*add_seclevel)(struct net_device *dev,
387 const struct ieee802154_llsec_seclevel *sl);
388 int (*del_seclevel)(struct net_device *dev,
389 const struct ieee802154_llsec_seclevel *sl);
390
391 void (*lock_table)(struct net_device *dev);
392 void (*get_table)(struct net_device *dev,
393 struct ieee802154_llsec_table **t);
394 void (*unlock_table)(struct net_device *dev);
395};
245/* 396/*
246 * This should be located at net_device->ml_priv 397 * This should be located at net_device->ml_priv
247 * 398 *
@@ -272,6 +423,8 @@ struct ieee802154_mlme_ops {
272 void (*get_mac_params)(struct net_device *dev, 423 void (*get_mac_params)(struct net_device *dev,
273 struct ieee802154_mac_params *params); 424 struct ieee802154_mac_params *params);
274 425
426 struct ieee802154_llsec_ops *llsec;
427
275 /* The fields below are required. */ 428 /* The fields below are required. */
276 429
277 struct wpan_phy *(*get_phy)(const struct net_device *dev); 430 struct wpan_phy *(*get_phy)(const struct net_device *dev);
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 3bd22795c3e2..84b20835b736 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -150,7 +150,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
150} 150}
151 151
152/* 152/*
153 * RFC 6080 4.2 153 * RFC 6040 4.2
154 * To decapsulate the inner header at the tunnel egress, a compliant 154 * To decapsulate the inner header at the tunnel egress, a compliant
155 * tunnel egress MUST set the outgoing ECN field to the codepoint at the 155 * tunnel egress MUST set the outgoing ECN field to the codepoint at the
156 * intersection of the appropriate arriving inner header (row) and outer 156 * intersection of the appropriate arriving inner header (row) and outer
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 1bdb47715def..dd1950a7e273 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -292,12 +292,12 @@ static inline struct sock *inet_lookup_listener(struct net *net,
292#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ 292#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
293 const __addrpair __name = (__force __addrpair) ( \ 293 const __addrpair __name = (__force __addrpair) ( \
294 (((__force __u64)(__be32)(__saddr)) << 32) | \ 294 (((__force __u64)(__be32)(__saddr)) << 32) | \
295 ((__force __u64)(__be32)(__daddr))); 295 ((__force __u64)(__be32)(__daddr)))
296#else /* __LITTLE_ENDIAN */ 296#else /* __LITTLE_ENDIAN */
297#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ 297#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
298 const __addrpair __name = (__force __addrpair) ( \ 298 const __addrpair __name = (__force __addrpair) ( \
299 (((__force __u64)(__be32)(__daddr)) << 32) | \ 299 (((__force __u64)(__be32)(__daddr)) << 32) | \
300 ((__force __u64)(__be32)(__saddr))); 300 ((__force __u64)(__be32)(__saddr)))
301#endif /* __BIG_ENDIAN */ 301#endif /* __BIG_ENDIAN */
302#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \ 302#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
303 (((__sk)->sk_portpair == (__ports)) && \ 303 (((__sk)->sk_portpair == (__ports)) && \
@@ -306,7 +306,9 @@ static inline struct sock *inet_lookup_listener(struct net *net,
306 ((__sk)->sk_bound_dev_if == (__dif))) && \ 306 ((__sk)->sk_bound_dev_if == (__dif))) && \
307 net_eq(sock_net(__sk), (__net))) 307 net_eq(sock_net(__sk), (__net)))
308#else /* 32-bit arch */ 308#else /* 32-bit arch */
309#define INET_ADDR_COOKIE(__name, __saddr, __daddr) 309#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
310 const int __name __deprecated __attribute__((unused))
311
310#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \ 312#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
311 (((__sk)->sk_portpair == (__ports)) && \ 313 (((__sk)->sk_portpair == (__ports)) && \
312 ((__sk)->sk_daddr == (__saddr)) && \ 314 ((__sk)->sk_daddr == (__saddr)) && \
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 1833c3f389ee..b1edf17bec01 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -90,6 +90,7 @@ struct inet_request_sock {
90 kmemcheck_bitfield_end(flags); 90 kmemcheck_bitfield_end(flags);
91 struct ip_options_rcu *opt; 91 struct ip_options_rcu *opt;
92 struct sk_buff *pktopts; 92 struct sk_buff *pktopts;
93 u32 ir_mark;
93}; 94};
94 95
95static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) 96static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
@@ -97,6 +98,15 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
97 return (struct inet_request_sock *)sk; 98 return (struct inet_request_sock *)sk;
98} 99}
99 100
101static inline u32 inet_request_mark(struct sock *sk, struct sk_buff *skb)
102{
103 if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) {
104 return skb->mark;
105 } else {
106 return sk->sk_mark;
107 }
108}
109
100struct inet_cork { 110struct inet_cork {
101 unsigned int flags; 111 unsigned int flags;
102 __be32 addr; 112 __be32 addr;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 058271bde27a..01d590ee5e7e 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -41,14 +41,13 @@ struct inet_peer {
41 struct rcu_head gc_rcu; 41 struct rcu_head gc_rcu;
42 }; 42 };
43 /* 43 /*
44 * Once inet_peer is queued for deletion (refcnt == -1), following fields 44 * Once inet_peer is queued for deletion (refcnt == -1), following field
45 * are not available: rid, ip_id_count 45 * is not available: rid
46 * We can share memory with rcu_head to help keep inet_peer small. 46 * We can share memory with rcu_head to help keep inet_peer small.
47 */ 47 */
48 union { 48 union {
49 struct { 49 struct {
50 atomic_t rid; /* Frag reception counter */ 50 atomic_t rid; /* Frag reception counter */
51 atomic_t ip_id_count; /* IP ID for the next packet */
52 }; 51 };
53 struct rcu_head rcu; 52 struct rcu_head rcu;
54 struct inet_peer *gc_next; 53 struct inet_peer *gc_next;
@@ -165,21 +164,11 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
165void inetpeer_invalidate_tree(struct inet_peer_base *); 164void inetpeer_invalidate_tree(struct inet_peer_base *);
166 165
167/* 166/*
168 * temporary check to make sure we dont access rid, ip_id_count, tcp_ts, 167 * temporary check to make sure we dont access rid, tcp_ts,
169 * tcp_ts_stamp if no refcount is taken on inet_peer 168 * tcp_ts_stamp if no refcount is taken on inet_peer
170 */ 169 */
171static inline void inet_peer_refcheck(const struct inet_peer *p) 170static inline void inet_peer_refcheck(const struct inet_peer *p)
172{ 171{
173 WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0); 172 WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
174} 173}
175
176
177/* can be called with or without local BH being disabled */
178static inline int inet_getid(struct inet_peer *p, int more)
179{
180 more++;
181 inet_peer_refcheck(p);
182 return atomic_add_return(more, &p->ip_id_count) - more;
183}
184
185#endif /* _NET_INETPEER_H */ 174#endif /* _NET_INETPEER_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 3ec2b0fb9d83..0e795df05ec9 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -196,35 +196,31 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
196#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd) 196#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
197#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd) 197#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
198 198
199unsigned long snmp_fold_field(void __percpu *mib[], int offt); 199unsigned long snmp_fold_field(void __percpu *mib, int offt);
200#if BITS_PER_LONG==32 200#if BITS_PER_LONG==32
201u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off); 201u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
202#else 202#else
203static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off) 203static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
204{ 204{
205 return snmp_fold_field(mib, offt); 205 return snmp_fold_field(mib, offt);
206} 206}
207#endif 207#endif
208int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
209
210static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
211{
212 int i;
213
214 BUG_ON(ptr == NULL);
215 for (i = 0; i < SNMP_ARRAY_SZ; i++) {
216 free_percpu(ptr[i]);
217 ptr[i] = NULL;
218 }
219}
220 208
221void inet_get_local_port_range(struct net *net, int *low, int *high); 209void inet_get_local_port_range(struct net *net, int *low, int *high);
222 210
223extern unsigned long *sysctl_local_reserved_ports; 211#ifdef CONFIG_SYSCTL
224static inline int inet_is_reserved_local_port(int port) 212static inline int inet_is_local_reserved_port(struct net *net, int port)
225{ 213{
226 return test_bit(port, sysctl_local_reserved_ports); 214 if (!net->ipv4.sysctl_local_reserved_ports)
215 return 0;
216 return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
227} 217}
218#else
219static inline int inet_is_local_reserved_port(struct net *net, int port)
220{
221 return 0;
222}
223#endif
228 224
229extern int sysctl_ip_nonlocal_bind; 225extern int sysctl_ip_nonlocal_bind;
230 226
@@ -243,6 +239,9 @@ void ipfrag_init(void);
243 239
244void ip_static_sysctl_init(void); 240void ip_static_sysctl_init(void);
245 241
242#define IP4_REPLY_MARK(net, mark) \
243 ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
244
246static inline bool ip_is_fragment(const struct iphdr *iph) 245static inline bool ip_is_fragment(const struct iphdr *iph)
247{ 246{
248 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0; 247 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
@@ -281,7 +280,7 @@ static inline bool ip_sk_use_pmtu(const struct sock *sk)
281 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE; 280 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
282} 281}
283 282
284static inline bool ip_sk_local_df(const struct sock *sk) 283static inline bool ip_sk_ignore_df(const struct sock *sk)
285{ 284{
286 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO || 285 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
287 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT; 286 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
@@ -310,36 +309,48 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
310 } 309 }
311} 310}
312 311
313void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more); 312#define IP_IDENTS_SZ 2048u
313extern atomic_t *ip_idents;
314
315static inline u32 ip_idents_reserve(u32 hash, int segs)
316{
317 atomic_t *id_ptr = ip_idents + hash % IP_IDENTS_SZ;
318
319 return atomic_add_return(segs, id_ptr) - segs;
320}
321
322void __ip_select_ident(struct iphdr *iph, int segs);
314 323
315static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk) 324static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
316{ 325{
317 struct iphdr *iph = ip_hdr(skb); 326 struct iphdr *iph = ip_hdr(skb);
318 327
319 if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) { 328 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
320 /* This is only to work around buggy Windows95/2000 329 /* This is only to work around buggy Windows95/2000
321 * VJ compression implementations. If the ID field 330 * VJ compression implementations. If the ID field
322 * does not change, they drop every other packet in 331 * does not change, they drop every other packet in
323 * a TCP stream using header compression. 332 * a TCP stream using header compression.
324 */ 333 */
325 iph->id = (sk && inet_sk(sk)->inet_daddr) ? 334 if (sk && inet_sk(sk)->inet_daddr) {
326 htons(inet_sk(sk)->inet_id++) : 0; 335 iph->id = htons(inet_sk(sk)->inet_id);
327 } else 336 inet_sk(sk)->inet_id += segs;
328 __ip_select_ident(iph, dst, 0); 337 } else {
338 iph->id = 0;
339 }
340 } else {
341 __ip_select_ident(iph, segs);
342 }
329} 343}
330 344
331static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more) 345static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk)
332{ 346{
333 struct iphdr *iph = ip_hdr(skb); 347 ip_select_ident_segs(skb, sk, 1);
348}
334 349
335 if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) { 350static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
336 if (sk && inet_sk(sk)->inet_daddr) { 351{
337 iph->id = htons(inet_sk(sk)->inet_id); 352 return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
338 inet_sk(sk)->inet_id += 1 + more; 353 skb->len, proto, 0);
339 } else
340 iph->id = 0;
341 } else
342 __ip_select_ident(iph, dst, more);
343} 354}
344 355
345/* 356/*
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index 9e3c540c1b11..55236cb71174 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -41,6 +41,13 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
41 __wsum csum); 41 __wsum csum);
42#endif 42#endif
43 43
44static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto)
45{
46 return ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
47 &ipv6_hdr(skb)->daddr,
48 skb->len, proto, 0));
49}
50
44static __inline__ __sum16 tcp_v6_check(int len, 51static __inline__ __sum16 tcp_v6_check(int len,
45 const struct in6_addr *saddr, 52 const struct in6_addr *saddr,
46 const struct in6_addr *daddr, 53 const struct in6_addr *daddr,
@@ -75,5 +82,17 @@ static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
75} 82}
76#endif 83#endif
77 84
85static inline __sum16 udp_v6_check(int len,
86 const struct in6_addr *saddr,
87 const struct in6_addr *daddr,
88 __wsum base)
89{
90 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, base);
91}
92
93void udp6_set_csum(bool nocheck, struct sk_buff *skb,
94 const struct in6_addr *saddr,
95 const struct in6_addr *daddr, int len);
96
78int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto); 97int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto);
79#endif 98#endif
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 216cecce65e9..1d09b46c1e48 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -186,7 +186,7 @@ static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
186 inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT; 186 inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT;
187} 187}
188 188
189static inline bool ip6_sk_local_df(const struct sock *sk) 189static inline bool ip6_sk_ignore_df(const struct sock *sk)
190{ 190{
191 return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO || 191 return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO ||
192 inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT; 192 inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index d640925bc454..574337fe72dd 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -113,6 +113,9 @@ struct frag_hdr {
113#define IP6_MF 0x0001 113#define IP6_MF 0x0001
114#define IP6_OFFSET 0xFFF8 114#define IP6_OFFSET 0xFFF8
115 115
116#define IP6_REPLY_MARK(net, mark) \
117 ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0)
118
116#include <net/sock.h> 119#include <net/sock.h>
117 120
118/* sysctls */ 121/* sysctls */
@@ -583,6 +586,11 @@ static inline bool ipv6_addr_orchid(const struct in6_addr *a)
583 return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010); 586 return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010);
584} 587}
585 588
589static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr)
590{
591 return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000);
592}
593
586static inline void ipv6_addr_set_v4mapped(const __be32 addr, 594static inline void ipv6_addr_set_v4mapped(const __be32 addr,
587 struct in6_addr *v4mapped) 595 struct in6_addr *v4mapped)
588{ 596{
@@ -660,10 +668,22 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
660 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); 668 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
661} 669}
662 670
663void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
664
665int ip6_dst_hoplimit(struct dst_entry *dst); 671int ip6_dst_hoplimit(struct dst_entry *dst);
666 672
673static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
674 struct dst_entry *dst)
675{
676 int hlimit;
677
678 if (ipv6_addr_is_multicast(&fl6->daddr))
679 hlimit = np->mcast_hops;
680 else
681 hlimit = np->hop_limit;
682 if (hlimit < 0)
683 hlimit = ip6_dst_hoplimit(dst);
684 return hlimit;
685}
686
667/* 687/*
668 * Header manipulation 688 * Header manipulation
669 */ 689 */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 8248e3909fdf..421b6ecb4b2c 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -189,6 +189,43 @@ struct ieee80211_chanctx_conf {
189}; 189};
190 190
191/** 191/**
192 * enum ieee80211_chanctx_switch_mode - channel context switch mode
193 * @CHANCTX_SWMODE_REASSIGN_VIF: Both old and new contexts already
194 * exist (and will continue to exist), but the virtual interface
195 * needs to be switched from one to the other.
196 * @CHANCTX_SWMODE_SWAP_CONTEXTS: The old context exists but will stop
197 * to exist with this call, the new context doesn't exist but
198 * will be active after this call, the virtual interface switches
199 * from the old to the new (note that the driver may of course
200 * implement this as an on-the-fly chandef switch of the existing
201 * hardware context, but the mac80211 pointer for the old context
202 * will cease to exist and only the new one will later be used
203 * for changes/removal.)
204 */
205enum ieee80211_chanctx_switch_mode {
206 CHANCTX_SWMODE_REASSIGN_VIF,
207 CHANCTX_SWMODE_SWAP_CONTEXTS,
208};
209
210/**
211 * struct ieee80211_vif_chanctx_switch - vif chanctx switch information
212 *
213 * This is structure is used to pass information about a vif that
214 * needs to switch from one chanctx to another. The
215 * &ieee80211_chanctx_switch_mode defines how the switch should be
216 * done.
217 *
218 * @vif: the vif that should be switched from old_ctx to new_ctx
219 * @old_ctx: the old context to which the vif was assigned
220 * @new_ctx: the new context to which the vif must be assigned
221 */
222struct ieee80211_vif_chanctx_switch {
223 struct ieee80211_vif *vif;
224 struct ieee80211_chanctx_conf *old_ctx;
225 struct ieee80211_chanctx_conf *new_ctx;
226};
227
228/**
192 * enum ieee80211_bss_change - BSS change notification flags 229 * enum ieee80211_bss_change - BSS change notification flags
193 * 230 *
194 * These flags are used with the bss_info_changed() callback 231 * These flags are used with the bss_info_changed() callback
@@ -1113,7 +1150,9 @@ enum ieee80211_vif_flags {
1113 * @addr: address of this interface 1150 * @addr: address of this interface
1114 * @p2p: indicates whether this AP or STA interface is a p2p 1151 * @p2p: indicates whether this AP or STA interface is a p2p
1115 * interface, i.e. a GO or p2p-sta respectively 1152 * interface, i.e. a GO or p2p-sta respectively
1116 * @csa_active: marks whether a channel switch is going on 1153 * @csa_active: marks whether a channel switch is going on. Internally it is
1154 * write-protected by sdata_lock and local->mtx so holding either is fine
1155 * for read access.
1117 * @driver_flags: flags/capabilities the driver has for this interface, 1156 * @driver_flags: flags/capabilities the driver has for this interface,
1118 * these need to be set (or cleared) when the interface is added 1157 * these need to be set (or cleared) when the interface is added
1119 * or, if supported by the driver, the interface type is changed 1158 * or, if supported by the driver, the interface type is changed
@@ -1202,14 +1241,18 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
1202 * fall back to software crypto. Note that this flag deals only with 1241 * fall back to software crypto. Note that this flag deals only with
1203 * RX, if your crypto engine can't deal with TX you can also set the 1242 * RX, if your crypto engine can't deal with TX you can also set the
1204 * %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW. 1243 * %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
1244 * @IEEE80211_KEY_FLAG_GENERATE_IV_MGMT: This flag should be set by the
1245 * driver for a CCMP key to indicate that is requires IV generation
1246 * only for managment frames (MFP).
1205 */ 1247 */
1206enum ieee80211_key_flags { 1248enum ieee80211_key_flags {
1207 IEEE80211_KEY_FLAG_GENERATE_IV = 1<<1, 1249 IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0),
1208 IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2, 1250 IEEE80211_KEY_FLAG_GENERATE_IV = BIT(1),
1209 IEEE80211_KEY_FLAG_PAIRWISE = 1<<3, 1251 IEEE80211_KEY_FLAG_GENERATE_MMIC = BIT(2),
1210 IEEE80211_KEY_FLAG_SW_MGMT_TX = 1<<4, 1252 IEEE80211_KEY_FLAG_PAIRWISE = BIT(3),
1211 IEEE80211_KEY_FLAG_PUT_IV_SPACE = 1<<5, 1253 IEEE80211_KEY_FLAG_SW_MGMT_TX = BIT(4),
1212 IEEE80211_KEY_FLAG_RX_MGMT = 1<<6, 1254 IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(5),
1255 IEEE80211_KEY_FLAG_RX_MGMT = BIT(6),
1213}; 1256};
1214 1257
1215/** 1258/**
@@ -1370,6 +1413,7 @@ struct ieee80211_sta_rates {
1370 * the station moves to associated state. 1413 * the station moves to associated state.
1371 * @smps_mode: current SMPS mode (off, static or dynamic) 1414 * @smps_mode: current SMPS mode (off, static or dynamic)
1372 * @rates: rate control selection table 1415 * @rates: rate control selection table
1416 * @tdls: indicates whether the STA is a TDLS peer
1373 */ 1417 */
1374struct ieee80211_sta { 1418struct ieee80211_sta {
1375 u32 supp_rates[IEEE80211_NUM_BANDS]; 1419 u32 supp_rates[IEEE80211_NUM_BANDS];
@@ -1384,6 +1428,7 @@ struct ieee80211_sta {
1384 enum ieee80211_sta_rx_bandwidth bandwidth; 1428 enum ieee80211_sta_rx_bandwidth bandwidth;
1385 enum ieee80211_smps_mode smps_mode; 1429 enum ieee80211_smps_mode smps_mode;
1386 struct ieee80211_sta_rates __rcu *rates; 1430 struct ieee80211_sta_rates __rcu *rates;
1431 bool tdls;
1387 1432
1388 /* must be last */ 1433 /* must be last */
1389 u8 drv_priv[0] __aligned(sizeof(void *)); 1434 u8 drv_priv[0] __aligned(sizeof(void *));
@@ -1555,6 +1600,12 @@ struct ieee80211_tx_control {
1555 * for a single active channel while using channel contexts. When support 1600 * for a single active channel while using channel contexts. When support
1556 * is not enabled the default action is to disconnect when getting the 1601 * is not enabled the default action is to disconnect when getting the
1557 * CSA frame. 1602 * CSA frame.
1603 *
1604 * @IEEE80211_HW_CHANGE_RUNNING_CHANCTX: The hardware can change a
1605 * channel context on-the-fly. This is needed for channel switch
1606 * on single-channel hardware. It can also be used as an
1607 * optimization in certain channel switch cases with
1608 * multi-channel.
1558 */ 1609 */
1559enum ieee80211_hw_flags { 1610enum ieee80211_hw_flags {
1560 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0, 1611 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
@@ -1586,6 +1637,7 @@ enum ieee80211_hw_flags {
1586 IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26, 1637 IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26,
1587 IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27, 1638 IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27,
1588 IEEE80211_HW_CHANCTX_STA_CSA = 1<<28, 1639 IEEE80211_HW_CHANCTX_STA_CSA = 1<<28,
1640 IEEE80211_HW_CHANGE_RUNNING_CHANCTX = 1<<29,
1589}; 1641};
1590 1642
1591/** 1643/**
@@ -2609,6 +2661,7 @@ enum ieee80211_roc_type {
2609 * of queues to flush, which is useful if different virtual interfaces 2661 * of queues to flush, which is useful if different virtual interfaces
2610 * use different hardware queues; it may also indicate all queues. 2662 * use different hardware queues; it may also indicate all queues.
2611 * If the parameter @drop is set to %true, pending frames may be dropped. 2663 * If the parameter @drop is set to %true, pending frames may be dropped.
2664 * Note that vif can be NULL.
2612 * The callback can sleep. 2665 * The callback can sleep.
2613 * 2666 *
2614 * @channel_switch: Drivers that need (or want) to offload the channel 2667 * @channel_switch: Drivers that need (or want) to offload the channel
@@ -2720,6 +2773,11 @@ enum ieee80211_roc_type {
2720 * to vif. Possible use is for hw queue remapping. 2773 * to vif. Possible use is for hw queue remapping.
2721 * @unassign_vif_chanctx: Notifies device driver about channel context being 2774 * @unassign_vif_chanctx: Notifies device driver about channel context being
2722 * unbound from vif. 2775 * unbound from vif.
2776 * @switch_vif_chanctx: switch a number of vifs from one chanctx to
2777 * another, as specified in the list of
2778 * @ieee80211_vif_chanctx_switch passed to the driver, according
2779 * to the mode defined in &ieee80211_chanctx_switch_mode.
2780 *
2723 * @start_ap: Start operation on the AP interface, this is called after all the 2781 * @start_ap: Start operation on the AP interface, this is called after all the
2724 * information in bss_conf is set and beacon can be retrieved. A channel 2782 * information in bss_conf is set and beacon can be retrieved. A channel
2725 * context is bound before this is called. Note that if the driver uses 2783 * context is bound before this is called. Note that if the driver uses
@@ -2753,6 +2811,10 @@ enum ieee80211_roc_type {
2753 * information in bss_conf is set up and the beacon can be retrieved. A 2811 * information in bss_conf is set up and the beacon can be retrieved. A
2754 * channel context is bound before this is called. 2812 * channel context is bound before this is called.
2755 * @leave_ibss: Leave the IBSS again. 2813 * @leave_ibss: Leave the IBSS again.
2814 *
2815 * @get_expected_throughput: extract the expected throughput towards the
2816 * specified station. The returned value is expressed in Kbps. It returns 0
2817 * if the RC algorithm does not have proper data to provide.
2756 */ 2818 */
2757struct ieee80211_ops { 2819struct ieee80211_ops {
2758 void (*tx)(struct ieee80211_hw *hw, 2820 void (*tx)(struct ieee80211_hw *hw,
@@ -2871,7 +2933,8 @@ struct ieee80211_ops {
2871 struct netlink_callback *cb, 2933 struct netlink_callback *cb,
2872 void *data, int len); 2934 void *data, int len);
2873#endif 2935#endif
2874 void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop); 2936 void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2937 u32 queues, bool drop);
2875 void (*channel_switch)(struct ieee80211_hw *hw, 2938 void (*channel_switch)(struct ieee80211_hw *hw,
2876 struct ieee80211_channel_switch *ch_switch); 2939 struct ieee80211_channel_switch *ch_switch);
2877 int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant); 2940 int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
@@ -2931,6 +2994,10 @@ struct ieee80211_ops {
2931 void (*unassign_vif_chanctx)(struct ieee80211_hw *hw, 2994 void (*unassign_vif_chanctx)(struct ieee80211_hw *hw,
2932 struct ieee80211_vif *vif, 2995 struct ieee80211_vif *vif,
2933 struct ieee80211_chanctx_conf *ctx); 2996 struct ieee80211_chanctx_conf *ctx);
2997 int (*switch_vif_chanctx)(struct ieee80211_hw *hw,
2998 struct ieee80211_vif_chanctx_switch *vifs,
2999 int n_vifs,
3000 enum ieee80211_chanctx_switch_mode mode);
2934 3001
2935 void (*restart_complete)(struct ieee80211_hw *hw); 3002 void (*restart_complete)(struct ieee80211_hw *hw);
2936 3003
@@ -2945,6 +3012,7 @@ struct ieee80211_ops {
2945 3012
2946 int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 3013 int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
2947 void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 3014 void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
3015 u32 (*get_expected_throughput)(struct ieee80211_sta *sta);
2948}; 3016};
2949 3017
2950/** 3018/**
@@ -3394,6 +3462,47 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
3394 */ 3462 */
3395void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets); 3463void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets);
3396 3464
3465#define IEEE80211_MAX_CSA_COUNTERS_NUM 2
3466
3467/**
3468 * struct ieee80211_mutable_offsets - mutable beacon offsets
3469 * @tim_offset: position of TIM element
3470 * @tim_length: size of TIM element
3471 * @csa_counter_offs: array of IEEE80211_MAX_CSA_COUNTERS_NUM offsets
3472 * to CSA counters. This array can contain zero values which
3473 * should be ignored.
3474 */
3475struct ieee80211_mutable_offsets {
3476 u16 tim_offset;
3477 u16 tim_length;
3478
3479 u16 csa_counter_offs[IEEE80211_MAX_CSA_COUNTERS_NUM];
3480};
3481
3482/**
3483 * ieee80211_beacon_get_template - beacon template generation function
3484 * @hw: pointer obtained from ieee80211_alloc_hw().
3485 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
3486 * @offs: &struct ieee80211_mutable_offsets pointer to struct that will
3487 * receive the offsets that may be updated by the driver.
3488 *
3489 * If the driver implements beaconing modes, it must use this function to
3490 * obtain the beacon template.
3491 *
3492 * This function should be used if the beacon frames are generated by the
3493 * device, and then the driver must use the returned beacon as the template
3494 * The driver or the device are responsible to update the DTIM and, when
3495 * applicable, the CSA count.
3496 *
3497 * The driver is responsible for freeing the returned skb.
3498 *
3499 * Return: The beacon template. %NULL on error.
3500 */
3501struct sk_buff *
3502ieee80211_beacon_get_template(struct ieee80211_hw *hw,
3503 struct ieee80211_vif *vif,
3504 struct ieee80211_mutable_offsets *offs);
3505
3397/** 3506/**
3398 * ieee80211_beacon_get_tim - beacon generation function 3507 * ieee80211_beacon_get_tim - beacon generation function
3399 * @hw: pointer obtained from ieee80211_alloc_hw(). 3508 * @hw: pointer obtained from ieee80211_alloc_hw().
@@ -3405,16 +3514,12 @@ void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets);
3405 * Set to 0 if invalid (in non-AP modes). 3514 * Set to 0 if invalid (in non-AP modes).
3406 * 3515 *
3407 * If the driver implements beaconing modes, it must use this function to 3516 * If the driver implements beaconing modes, it must use this function to
3408 * obtain the beacon frame/template. 3517 * obtain the beacon frame.
3409 * 3518 *
3410 * If the beacon frames are generated by the host system (i.e., not in 3519 * If the beacon frames are generated by the host system (i.e., not in
3411 * hardware/firmware), the driver uses this function to get each beacon 3520 * hardware/firmware), the driver uses this function to get each beacon
3412 * frame from mac80211 -- it is responsible for calling this function 3521 * frame from mac80211 -- it is responsible for calling this function exactly
3413 * before the beacon is needed (e.g. based on hardware interrupt). 3522 * once before the beacon is needed (e.g. based on hardware interrupt).
3414 *
3415 * If the beacon frames are generated by the device, then the driver
3416 * must use the returned beacon as the template and change the TIM IE
3417 * according to the current DTIM parameters/TIM bitmap.
3418 * 3523 *
3419 * The driver is responsible for freeing the returned skb. 3524 * The driver is responsible for freeing the returned skb.
3420 * 3525 *
@@ -3440,6 +3545,20 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
3440} 3545}
3441 3546
3442/** 3547/**
3548 * ieee80211_csa_update_counter - request mac80211 to decrement the csa counter
3549 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
3550 *
3551 * The csa counter should be updated after each beacon transmission.
3552 * This function is called implicitly when
3553 * ieee80211_beacon_get/ieee80211_beacon_get_tim are called, however if the
3554 * beacon frames are generated by the device, the driver should call this
3555 * function after each beacon transmission to sync mac80211's csa counters.
3556 *
3557 * Return: new csa counter value
3558 */
3559u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif);
3560
3561/**
3443 * ieee80211_csa_finish - notify mac80211 about channel switch 3562 * ieee80211_csa_finish - notify mac80211 about channel switch
3444 * @vif: &struct ieee80211_vif pointer from the add_interface callback. 3563 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
3445 * 3564 *
@@ -4467,6 +4586,8 @@ struct rate_control_ops {
4467 void (*add_sta_debugfs)(void *priv, void *priv_sta, 4586 void (*add_sta_debugfs)(void *priv, void *priv_sta,
4468 struct dentry *dir); 4587 struct dentry *dir);
4469 void (*remove_sta_debugfs)(void *priv, void *priv_sta); 4588 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
4589
4590 u32 (*get_expected_throughput)(void *priv_sta);
4470}; 4591};
4471 4592
4472static inline int rate_supported(struct ieee80211_sta *sta, 4593static inline int rate_supported(struct ieee80211_sta *sta,
@@ -4576,7 +4697,9 @@ conf_is_ht40(struct ieee80211_conf *conf)
4576static inline bool 4697static inline bool
4577conf_is_ht(struct ieee80211_conf *conf) 4698conf_is_ht(struct ieee80211_conf *conf)
4578{ 4699{
4579 return conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT; 4700 return (conf->chandef.width != NL80211_CHAN_WIDTH_5) &&
4701 (conf->chandef.width != NL80211_CHAN_WIDTH_10) &&
4702 (conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT);
4580} 4703}
4581 4704
4582static inline enum nl80211_iftype 4705static inline enum nl80211_iftype
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 5f9eb260990f..361d26077196 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -373,6 +373,14 @@ static inline void rt_genid_bump_ipv6(struct net *net)
373} 373}
374#endif 374#endif
375 375
376#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
377static inline struct netns_ieee802154_lowpan *
378net_ieee802154_lowpan(struct net *net)
379{
380 return &net->ieee802154_lowpan;
381}
382#endif
383
376/* For callers who don't really care about whether it's IPv4 or IPv6 */ 384/* For callers who don't really care about whether it's IPv4 or IPv6 */
377static inline void rt_genid_bump_all(struct net *net) 385static inline void rt_genid_bump_all(struct net *net)
378{ 386{
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index 07eaaf604092..a71dd333ac68 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -48,6 +48,8 @@ unsigned int nf_nat_setup_info(struct nf_conn *ct,
48extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct, 48extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct,
49 unsigned int hooknum); 49 unsigned int hooknum);
50 50
51struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct);
52
51/* Is this tuple already taken? (not by us)*/ 53/* Is this tuple already taken? (not by us)*/
52int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, 54int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
53 const struct nf_conn *ignored_conntrack); 55 const struct nf_conn *ignored_conntrack);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index e6bc14d8fa9a..7ee6ce6564ae 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -72,21 +72,23 @@ static inline void nft_data_debug(const struct nft_data *data)
72 * struct nft_ctx - nf_tables rule/set context 72 * struct nft_ctx - nf_tables rule/set context
73 * 73 *
74 * @net: net namespace 74 * @net: net namespace
75 * @skb: netlink skb
76 * @nlh: netlink message header
77 * @afi: address family info 75 * @afi: address family info
78 * @table: the table the chain is contained in 76 * @table: the table the chain is contained in
79 * @chain: the chain the rule is contained in 77 * @chain: the chain the rule is contained in
80 * @nla: netlink attributes 78 * @nla: netlink attributes
79 * @portid: netlink portID of the original message
80 * @seq: netlink sequence number
81 * @report: notify via unicast netlink message
81 */ 82 */
82struct nft_ctx { 83struct nft_ctx {
83 struct net *net; 84 struct net *net;
84 const struct sk_buff *skb; 85 struct nft_af_info *afi;
85 const struct nlmsghdr *nlh; 86 struct nft_table *table;
86 const struct nft_af_info *afi; 87 struct nft_chain *chain;
87 const struct nft_table *table;
88 const struct nft_chain *chain;
89 const struct nlattr * const *nla; 88 const struct nlattr * const *nla;
89 u32 portid;
90 u32 seq;
91 bool report;
90}; 92};
91 93
92struct nft_data_desc { 94struct nft_data_desc {
@@ -146,6 +148,44 @@ struct nft_set_iter {
146}; 148};
147 149
148/** 150/**
151 * struct nft_set_desc - description of set elements
152 *
153 * @klen: key length
154 * @dlen: data length
155 * @size: number of set elements
156 */
157struct nft_set_desc {
158 unsigned int klen;
159 unsigned int dlen;
160 unsigned int size;
161};
162
163/**
164 * enum nft_set_class - performance class
165 *
166 * @NFT_LOOKUP_O_1: constant, O(1)
167 * @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N)
168 * @NFT_LOOKUP_O_N: linear, O(N)
169 */
170enum nft_set_class {
171 NFT_SET_CLASS_O_1,
172 NFT_SET_CLASS_O_LOG_N,
173 NFT_SET_CLASS_O_N,
174};
175
176/**
177 * struct nft_set_estimate - estimation of memory and performance
178 * characteristics
179 *
180 * @size: required memory
181 * @class: lookup performance class
182 */
183struct nft_set_estimate {
184 unsigned int size;
185 enum nft_set_class class;
186};
187
188/**
149 * struct nft_set_ops - nf_tables set operations 189 * struct nft_set_ops - nf_tables set operations
150 * 190 *
151 * @lookup: look up an element within the set 191 * @lookup: look up an element within the set
@@ -174,7 +214,11 @@ struct nft_set_ops {
174 struct nft_set_iter *iter); 214 struct nft_set_iter *iter);
175 215
176 unsigned int (*privsize)(const struct nlattr * const nla[]); 216 unsigned int (*privsize)(const struct nlattr * const nla[]);
217 bool (*estimate)(const struct nft_set_desc *desc,
218 u32 features,
219 struct nft_set_estimate *est);
177 int (*init)(const struct nft_set *set, 220 int (*init)(const struct nft_set *set,
221 const struct nft_set_desc *desc,
178 const struct nlattr * const nla[]); 222 const struct nlattr * const nla[]);
179 void (*destroy)(const struct nft_set *set); 223 void (*destroy)(const struct nft_set *set);
180 224
@@ -194,6 +238,8 @@ void nft_unregister_set(struct nft_set_ops *ops);
194 * @name: name of the set 238 * @name: name of the set
195 * @ktype: key type (numeric type defined by userspace, not used in the kernel) 239 * @ktype: key type (numeric type defined by userspace, not used in the kernel)
196 * @dtype: data type (verdict or numeric type defined by userspace) 240 * @dtype: data type (verdict or numeric type defined by userspace)
241 * @size: maximum set size
242 * @nelems: number of elements
197 * @ops: set ops 243 * @ops: set ops
198 * @flags: set flags 244 * @flags: set flags
199 * @klen: key length 245 * @klen: key length
@@ -206,6 +252,8 @@ struct nft_set {
206 char name[IFNAMSIZ]; 252 char name[IFNAMSIZ];
207 u32 ktype; 253 u32 ktype;
208 u32 dtype; 254 u32 dtype;
255 u32 size;
256 u32 nelems;
209 /* runtime data below here */ 257 /* runtime data below here */
210 const struct nft_set_ops *ops ____cacheline_aligned; 258 const struct nft_set_ops *ops ____cacheline_aligned;
211 u16 flags; 259 u16 flags;
@@ -222,6 +270,8 @@ static inline void *nft_set_priv(const struct nft_set *set)
222 270
223struct nft_set *nf_tables_set_lookup(const struct nft_table *table, 271struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
224 const struct nlattr *nla); 272 const struct nlattr *nla);
273struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
274 const struct nlattr *nla);
225 275
226/** 276/**
227 * struct nft_set_binding - nf_tables set binding 277 * struct nft_set_binding - nf_tables set binding
@@ -341,18 +391,75 @@ struct nft_rule {
341}; 391};
342 392
343/** 393/**
344 * struct nft_rule_trans - nf_tables rule update in transaction 394 * struct nft_trans - nf_tables object update in transaction
345 * 395 *
396 * @rcu_head: rcu head to defer release of transaction data
346 * @list: used internally 397 * @list: used internally
347 * @ctx: rule context 398 * @msg_type: message type
348 * @rule: rule that needs to be updated 399 * @ctx: transaction context
400 * @data: internal information related to the transaction
349 */ 401 */
350struct nft_rule_trans { 402struct nft_trans {
403 struct rcu_head rcu_head;
351 struct list_head list; 404 struct list_head list;
405 int msg_type;
352 struct nft_ctx ctx; 406 struct nft_ctx ctx;
407 char data[0];
408};
409
410struct nft_trans_rule {
353 struct nft_rule *rule; 411 struct nft_rule *rule;
354}; 412};
355 413
414#define nft_trans_rule(trans) \
415 (((struct nft_trans_rule *)trans->data)->rule)
416
417struct nft_trans_set {
418 struct nft_set *set;
419 u32 set_id;
420};
421
422#define nft_trans_set(trans) \
423 (((struct nft_trans_set *)trans->data)->set)
424#define nft_trans_set_id(trans) \
425 (((struct nft_trans_set *)trans->data)->set_id)
426
427struct nft_trans_chain {
428 bool update;
429 char name[NFT_CHAIN_MAXNAMELEN];
430 struct nft_stats __percpu *stats;
431 u8 policy;
432};
433
434#define nft_trans_chain_update(trans) \
435 (((struct nft_trans_chain *)trans->data)->update)
436#define nft_trans_chain_name(trans) \
437 (((struct nft_trans_chain *)trans->data)->name)
438#define nft_trans_chain_stats(trans) \
439 (((struct nft_trans_chain *)trans->data)->stats)
440#define nft_trans_chain_policy(trans) \
441 (((struct nft_trans_chain *)trans->data)->policy)
442
443struct nft_trans_table {
444 bool update;
445 bool enable;
446};
447
448#define nft_trans_table_update(trans) \
449 (((struct nft_trans_table *)trans->data)->update)
450#define nft_trans_table_enable(trans) \
451 (((struct nft_trans_table *)trans->data)->enable)
452
453struct nft_trans_elem {
454 struct nft_set *set;
455 struct nft_set_elem elem;
456};
457
458#define nft_trans_elem_set(trans) \
459 (((struct nft_trans_elem *)trans->data)->set)
460#define nft_trans_elem(trans) \
461 (((struct nft_trans_elem *)trans->data)->elem)
462
356static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule) 463static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
357{ 464{
358 return (struct nft_expr *)&rule->data[0]; 465 return (struct nft_expr *)&rule->data[0];
@@ -385,6 +492,7 @@ static inline void *nft_userdata(const struct nft_rule *rule)
385 492
386enum nft_chain_flags { 493enum nft_chain_flags {
387 NFT_BASE_CHAIN = 0x1, 494 NFT_BASE_CHAIN = 0x1,
495 NFT_CHAIN_INACTIVE = 0x2,
388}; 496};
389 497
390/** 498/**
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
new file mode 100644
index 000000000000..0ee47c3e2e31
--- /dev/null
+++ b/include/net/netfilter/nft_meta.h
@@ -0,0 +1,36 @@
1#ifndef _NFT_META_H_
2#define _NFT_META_H_
3
4struct nft_meta {
5 enum nft_meta_keys key:8;
6 union {
7 enum nft_registers dreg:8;
8 enum nft_registers sreg:8;
9 };
10};
11
12extern const struct nla_policy nft_meta_policy[];
13
14int nft_meta_get_init(const struct nft_ctx *ctx,
15 const struct nft_expr *expr,
16 const struct nlattr * const tb[]);
17
18int nft_meta_set_init(const struct nft_ctx *ctx,
19 const struct nft_expr *expr,
20 const struct nlattr * const tb[]);
21
22int nft_meta_get_dump(struct sk_buff *skb,
23 const struct nft_expr *expr);
24
25int nft_meta_set_dump(struct sk_buff *skb,
26 const struct nft_expr *expr);
27
28void nft_meta_get_eval(const struct nft_expr *expr,
29 struct nft_data data[NFT_REG_MAX + 1],
30 const struct nft_pktinfo *pkt);
31
32void nft_meta_set_eval(const struct nft_expr *expr,
33 struct nft_data data[NFT_REG_MAX + 1],
34 const struct nft_pktinfo *pkt);
35
36#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index b2704fd0ec80..aec5e12f9f19 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -77,10 +77,17 @@ struct netns_ipv4 {
77 int sysctl_ip_no_pmtu_disc; 77 int sysctl_ip_no_pmtu_disc;
78 int sysctl_ip_fwd_use_pmtu; 78 int sysctl_ip_fwd_use_pmtu;
79 79
80 int sysctl_fwmark_reflect;
81 int sysctl_tcp_fwmark_accept;
82
80 struct ping_group_range ping_group_range; 83 struct ping_group_range ping_group_range;
81 84
82 atomic_t dev_addr_genid; 85 atomic_t dev_addr_genid;
83 86
87#ifdef CONFIG_SYSCTL
88 unsigned long *sysctl_local_reserved_ports;
89#endif
90
84#ifdef CONFIG_IP_MROUTE 91#ifdef CONFIG_IP_MROUTE
85#ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES 92#ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
86 struct mr_table *mrt; 93 struct mr_table *mrt;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 21edaf1f7916..19d3446e59d2 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -30,6 +30,7 @@ struct netns_sysctl_ipv6 {
30 int flowlabel_consistency; 30 int flowlabel_consistency;
31 int icmpv6_time; 31 int icmpv6_time;
32 int anycast_src_echo_reply; 32 int anycast_src_echo_reply;
33 int fwmark_reflect;
33}; 34};
34 35
35struct netns_ipv6 { 36struct netns_ipv6 {
diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h
index 7655cfe27c34..bdf55c3b7a19 100644
--- a/include/net/nfc/digital.h
+++ b/include/net/nfc/digital.h
@@ -36,6 +36,7 @@ enum {
36 NFC_DIGITAL_RF_TECH_212F, 36 NFC_DIGITAL_RF_TECH_212F,
37 NFC_DIGITAL_RF_TECH_424F, 37 NFC_DIGITAL_RF_TECH_424F,
38 NFC_DIGITAL_RF_TECH_ISO15693, 38 NFC_DIGITAL_RF_TECH_ISO15693,
39 NFC_DIGITAL_RF_TECH_106B,
39 40
40 NFC_DIGITAL_RF_TECH_LAST, 41 NFC_DIGITAL_RF_TECH_LAST,
41}; 42};
@@ -62,6 +63,9 @@ enum {
62 NFC_DIGITAL_FRAMING_ISO15693_INVENTORY, 63 NFC_DIGITAL_FRAMING_ISO15693_INVENTORY,
63 NFC_DIGITAL_FRAMING_ISO15693_T5T, 64 NFC_DIGITAL_FRAMING_ISO15693_T5T,
64 65
66 NFC_DIGITAL_FRAMING_NFCB,
67 NFC_DIGITAL_FRAMING_NFCB_T4T,
68
65 NFC_DIGITAL_FRAMING_LAST, 69 NFC_DIGITAL_FRAMING_LAST,
66}; 70};
67 71
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 03c4650b548c..61286db54388 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -27,6 +27,7 @@ struct nfc_hci_dev;
27struct nfc_hci_ops { 27struct nfc_hci_ops {
28 int (*open) (struct nfc_hci_dev *hdev); 28 int (*open) (struct nfc_hci_dev *hdev);
29 void (*close) (struct nfc_hci_dev *hdev); 29 void (*close) (struct nfc_hci_dev *hdev);
30 int (*load_session) (struct nfc_hci_dev *hdev);
30 int (*hci_ready) (struct nfc_hci_dev *hdev); 31 int (*hci_ready) (struct nfc_hci_dev *hdev);
31 /* 32 /*
32 * xmit must always send the complete buffer before 33 * xmit must always send the complete buffer before
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 2e8b40c16274..6c583e244de2 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -264,4 +264,7 @@ int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type);
264int nfc_remove_se(struct nfc_dev *dev, u32 se_idx); 264int nfc_remove_se(struct nfc_dev *dev, u32 se_idx);
265struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx); 265struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx);
266 266
267void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
268 u8 payload_type, u8 direction);
269
267#endif /* __NET_NFC_H */ 270#endif /* __NET_NFC_H */
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index a2441fb1428f..6da46dcf1049 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -136,7 +136,7 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
136 136
137int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 137int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
138 struct nlattr **tb, struct nlattr *rate_tlv, 138 struct nlattr **tb, struct nlattr *rate_tlv,
139 struct tcf_exts *exts); 139 struct tcf_exts *exts, bool ovr);
140void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts); 140void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
141void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, 141void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
142 struct tcf_exts *src); 142 struct tcf_exts *src);
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 891d80d2c4d2..ec030cd76616 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -96,7 +96,7 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
96 struct nlattr *tab); 96 struct nlattr *tab);
97void qdisc_put_rtab(struct qdisc_rate_table *tab); 97void qdisc_put_rtab(struct qdisc_rate_table *tab);
98void qdisc_put_stab(struct qdisc_size_table *tab); 98void qdisc_put_stab(struct qdisc_size_table *tab);
99void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc); 99void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
100int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 100int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
101 struct net_device *dev, struct netdev_queue *txq, 101 struct net_device *dev, struct netdev_queue *txq,
102 spinlock_t *root_lock); 102 spinlock_t *root_lock);
diff --git a/include/net/protocol.h b/include/net/protocol.h
index a7e986b08147..d6fcc1fcdb5b 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -86,7 +86,6 @@ struct inet_protosw {
86 struct proto *prot; 86 struct proto *prot;
87 const struct proto_ops *ops; 87 const struct proto_ops *ops;
88 88
89 char no_check; /* checksum on rcv/xmit/none? */
90 unsigned char flags; /* See INET_PROTOSW_* below. */ 89 unsigned char flags; /* See INET_PROTOSW_* below. */
91}; 90};
92#define INET_PROTOSW_REUSE 0x01 /* Are ports automatically reusable? */ 91#define INET_PROTOSW_REUSE 0x01 /* Are ports automatically reusable? */
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index 75fc1f5a948d..259992444e80 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -131,6 +131,11 @@ struct regulatory_request {
131 * all country IE information processed by the regulatory core. This will 131 * all country IE information processed by the regulatory core. This will
132 * override %REGULATORY_COUNTRY_IE_FOLLOW_POWER as all country IEs will 132 * override %REGULATORY_COUNTRY_IE_FOLLOW_POWER as all country IEs will
133 * be ignored. 133 * be ignored.
134 * @REGULATORY_ENABLE_RELAX_NO_IR: for devices that wish to allow the
135 * NO_IR relaxation, which enables transmissions on channels on which
136 * otherwise initiating radiation is not allowed. This will enable the
137 * relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration
138 * option
134 */ 139 */
135enum ieee80211_regulatory_flags { 140enum ieee80211_regulatory_flags {
136 REGULATORY_CUSTOM_REG = BIT(0), 141 REGULATORY_CUSTOM_REG = BIT(0),
@@ -138,6 +143,7 @@ enum ieee80211_regulatory_flags {
138 REGULATORY_DISABLE_BEACON_HINTS = BIT(2), 143 REGULATORY_DISABLE_BEACON_HINTS = BIT(2),
139 REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3), 144 REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3),
140 REGULATORY_COUNTRY_IE_IGNORE = BIT(4), 145 REGULATORY_COUNTRY_IE_IGNORE = BIT(4),
146 REGULATORY_ENABLE_RELAX_NO_IR = BIT(5),
141}; 147};
142 148
143struct ieee80211_freq_range { 149struct ieee80211_freq_range {
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index d062f81c692f..624f9857c83e 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -199,7 +199,7 @@ struct tcf_proto_ops {
199 int (*change)(struct net *net, struct sk_buff *, 199 int (*change)(struct net *net, struct sk_buff *,
200 struct tcf_proto*, unsigned long, 200 struct tcf_proto*, unsigned long,
201 u32 handle, struct nlattr **, 201 u32 handle, struct nlattr **,
202 unsigned long *); 202 unsigned long *, bool);
203 int (*delete)(struct tcf_proto*, unsigned long); 203 int (*delete)(struct tcf_proto*, unsigned long);
204 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 204 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
205 205
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 0dfcc92600e8..f38588bf3462 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -838,10 +838,10 @@ struct sctp_transport {
838 unsigned long sackdelay; 838 unsigned long sackdelay;
839 __u32 sackfreq; 839 __u32 sackfreq;
840 840
841 /* When was the last time (in jiffies) that we heard from this 841 /* When was the last time that we heard from this transport? We use
842 * transport? We use this to pick new active and retran paths. 842 * this to pick new active and retran paths.
843 */ 843 */
844 unsigned long last_time_heard; 844 ktime_t last_time_heard;
845 845
846 /* Last time(in jiffies) when cwnd is reduced due to the congestion 846 /* Last time(in jiffies) when cwnd is reduced due to the congestion
847 * indication based on ECNE chunk. 847 * indication based on ECNE chunk.
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index f257486f17be..3f36d45b714a 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -3,8 +3,6 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6__u32 secure_ip_id(__be32 daddr);
7__u32 secure_ipv6_id(const __be32 daddr[4]);
8u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); 6u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
9u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, 7u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
10 __be16 dport); 8 __be16 dport);
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 71596261fa99..f1f27fdbb0d5 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -116,51 +116,49 @@ struct linux_xfrm_mib {
116 unsigned long mibs[LINUX_MIB_XFRMMAX]; 116 unsigned long mibs[LINUX_MIB_XFRMMAX];
117}; 117};
118 118
119#define SNMP_ARRAY_SZ 1
120
121#define DEFINE_SNMP_STAT(type, name) \ 119#define DEFINE_SNMP_STAT(type, name) \
122 __typeof__(type) __percpu *name[SNMP_ARRAY_SZ] 120 __typeof__(type) __percpu *name
123#define DEFINE_SNMP_STAT_ATOMIC(type, name) \ 121#define DEFINE_SNMP_STAT_ATOMIC(type, name) \
124 __typeof__(type) *name 122 __typeof__(type) *name
125#define DECLARE_SNMP_STAT(type, name) \ 123#define DECLARE_SNMP_STAT(type, name) \
126 extern __typeof__(type) __percpu *name[SNMP_ARRAY_SZ] 124 extern __typeof__(type) __percpu *name
127 125
128#define SNMP_INC_STATS_BH(mib, field) \ 126#define SNMP_INC_STATS_BH(mib, field) \
129 __this_cpu_inc(mib[0]->mibs[field]) 127 __this_cpu_inc(mib->mibs[field])
130 128
131#define SNMP_INC_STATS_USER(mib, field) \ 129#define SNMP_INC_STATS_USER(mib, field) \
132 this_cpu_inc(mib[0]->mibs[field]) 130 this_cpu_inc(mib->mibs[field])
133 131
134#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \ 132#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
135 atomic_long_inc(&mib->mibs[field]) 133 atomic_long_inc(&mib->mibs[field])
136 134
137#define SNMP_INC_STATS(mib, field) \ 135#define SNMP_INC_STATS(mib, field) \
138 this_cpu_inc(mib[0]->mibs[field]) 136 this_cpu_inc(mib->mibs[field])
139 137
140#define SNMP_DEC_STATS(mib, field) \ 138#define SNMP_DEC_STATS(mib, field) \
141 this_cpu_dec(mib[0]->mibs[field]) 139 this_cpu_dec(mib->mibs[field])
142 140
143#define SNMP_ADD_STATS_BH(mib, field, addend) \ 141#define SNMP_ADD_STATS_BH(mib, field, addend) \
144 __this_cpu_add(mib[0]->mibs[field], addend) 142 __this_cpu_add(mib->mibs[field], addend)
145 143
146#define SNMP_ADD_STATS_USER(mib, field, addend) \ 144#define SNMP_ADD_STATS_USER(mib, field, addend) \
147 this_cpu_add(mib[0]->mibs[field], addend) 145 this_cpu_add(mib->mibs[field], addend)
148 146
149#define SNMP_ADD_STATS(mib, field, addend) \ 147#define SNMP_ADD_STATS(mib, field, addend) \
150 this_cpu_add(mib[0]->mibs[field], addend) 148 this_cpu_add(mib->mibs[field], addend)
151/* 149/*
152 * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr" 150 * Use "__typeof__(*mib) *ptr" instead of "__typeof__(mib) ptr"
153 * to make @ptr a non-percpu pointer. 151 * to make @ptr a non-percpu pointer.
154 */ 152 */
155#define SNMP_UPD_PO_STATS(mib, basefield, addend) \ 153#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
156 do { \ 154 do { \
157 __typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs; \ 155 __typeof__(*mib->mibs) *ptr = mib->mibs; \
158 this_cpu_inc(ptr[basefield##PKTS]); \ 156 this_cpu_inc(ptr[basefield##PKTS]); \
159 this_cpu_add(ptr[basefield##OCTETS], addend); \ 157 this_cpu_add(ptr[basefield##OCTETS], addend); \
160 } while (0) 158 } while (0)
161#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ 159#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
162 do { \ 160 do { \
163 __typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs; \ 161 __typeof__(*mib->mibs) *ptr = mib->mibs; \
164 __this_cpu_inc(ptr[basefield##PKTS]); \ 162 __this_cpu_inc(ptr[basefield##PKTS]); \
165 __this_cpu_add(ptr[basefield##OCTETS], addend); \ 163 __this_cpu_add(ptr[basefield##OCTETS], addend); \
166 } while (0) 164 } while (0)
@@ -170,7 +168,7 @@ struct linux_xfrm_mib {
170 168
171#define SNMP_ADD_STATS64_BH(mib, field, addend) \ 169#define SNMP_ADD_STATS64_BH(mib, field, addend) \
172 do { \ 170 do { \
173 __typeof__(*mib[0]) *ptr = __this_cpu_ptr((mib)[0]); \ 171 __typeof__(*mib) *ptr = __this_cpu_ptr(mib); \
174 u64_stats_update_begin(&ptr->syncp); \ 172 u64_stats_update_begin(&ptr->syncp); \
175 ptr->mibs[field] += addend; \ 173 ptr->mibs[field] += addend; \
176 u64_stats_update_end(&ptr->syncp); \ 174 u64_stats_update_end(&ptr->syncp); \
@@ -191,8 +189,8 @@ struct linux_xfrm_mib {
191#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) 189#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
192#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \ 190#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \
193 do { \ 191 do { \
194 __typeof__(*mib[0]) *ptr; \ 192 __typeof__(*mib) *ptr; \
195 ptr = __this_cpu_ptr((mib)[0]); \ 193 ptr = __this_cpu_ptr(mib); \
196 u64_stats_update_begin(&ptr->syncp); \ 194 u64_stats_update_begin(&ptr->syncp); \
197 ptr->mibs[basefield##PKTS]++; \ 195 ptr->mibs[basefield##PKTS]++; \
198 ptr->mibs[basefield##OCTETS] += addend; \ 196 ptr->mibs[basefield##OCTETS] += addend; \
diff --git a/include/net/sock.h b/include/net/sock.h
index 21569cf456ed..07b7fcd60d80 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -243,7 +243,8 @@ struct cg_proto;
243 * @sk_sndbuf: size of send buffer in bytes 243 * @sk_sndbuf: size of send buffer in bytes
244 * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, 244 * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
245 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings 245 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
246 * @sk_no_check: %SO_NO_CHECK setting, whether or not checkup packets 246 * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
247 * @sk_no_check_rx: allow zero checksum in RX packets
247 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) 248 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
248 * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) 249 * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
249 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) 250 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
@@ -371,7 +372,8 @@ struct sock {
371 struct sk_buff_head sk_write_queue; 372 struct sk_buff_head sk_write_queue;
372 kmemcheck_bitfield_begin(flags); 373 kmemcheck_bitfield_begin(flags);
373 unsigned int sk_shutdown : 2, 374 unsigned int sk_shutdown : 2,
374 sk_no_check : 2, 375 sk_no_check_tx : 1,
376 sk_no_check_rx : 1,
375 sk_userlocks : 4, 377 sk_userlocks : 4,
376 sk_protocol : 8, 378 sk_protocol : 8,
377 sk_type : 16; 379 sk_type : 16;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 87d877408188..7286db80e8b8 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -220,8 +220,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
220#define TFO_SERVER_ENABLE 2 220#define TFO_SERVER_ENABLE 2
221#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */ 221#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
222 222
223/* Process SYN data but skip cookie validation */
224#define TFO_SERVER_COOKIE_NOT_CHKED 0x100
225/* Accept SYN data w/o any cookie option */ 223/* Accept SYN data w/o any cookie option */
226#define TFO_SERVER_COOKIE_NOT_REQD 0x200 224#define TFO_SERVER_COOKIE_NOT_REQD 0x200
227 225
@@ -230,10 +228,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
230 */ 228 */
231#define TFO_SERVER_WO_SOCKOPT1 0x400 229#define TFO_SERVER_WO_SOCKOPT1 0x400
232#define TFO_SERVER_WO_SOCKOPT2 0x800 230#define TFO_SERVER_WO_SOCKOPT2 0x800
233/* Always create TFO child sockets on a TFO listener even when
234 * cookie/data not present. (For testing purpose!)
235 */
236#define TFO_SERVER_ALWAYS 0x1000
237 231
238extern struct inet_timewait_death_row tcp_death_row; 232extern struct inet_timewait_death_row tcp_death_row;
239 233
@@ -541,7 +535,7 @@ void tcp_retransmit_timer(struct sock *sk);
541void tcp_xmit_retransmit_queue(struct sock *); 535void tcp_xmit_retransmit_queue(struct sock *);
542void tcp_simple_retransmit(struct sock *); 536void tcp_simple_retransmit(struct sock *);
543int tcp_trim_head(struct sock *, struct sk_buff *, u32); 537int tcp_trim_head(struct sock *, struct sk_buff *, u32);
544int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int); 538int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
545 539
546void tcp_send_probe0(struct sock *); 540void tcp_send_probe0(struct sock *);
547void tcp_send_partial(struct sock *); 541void tcp_send_partial(struct sock *);
@@ -558,7 +552,6 @@ void tcp_send_loss_probe(struct sock *sk);
558bool tcp_schedule_loss_probe(struct sock *sk); 552bool tcp_schedule_loss_probe(struct sock *sk);
559 553
560/* tcp_input.c */ 554/* tcp_input.c */
561void tcp_cwnd_application_limited(struct sock *sk);
562void tcp_resume_early_retransmit(struct sock *sk); 555void tcp_resume_early_retransmit(struct sock *sk);
563void tcp_rearm_rto(struct sock *sk); 556void tcp_rearm_rto(struct sock *sk);
564void tcp_reset(struct sock *sk); 557void tcp_reset(struct sock *sk);
@@ -797,7 +790,7 @@ struct tcp_congestion_ops {
797 /* return slow start threshold (required) */ 790 /* return slow start threshold (required) */
798 u32 (*ssthresh)(struct sock *sk); 791 u32 (*ssthresh)(struct sock *sk);
799 /* do new cwnd calculation (required) */ 792 /* do new cwnd calculation (required) */
800 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight); 793 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
801 /* call before changing ca_state (optional) */ 794 /* call before changing ca_state (optional) */
802 void (*set_state)(struct sock *sk, u8 new_state); 795 void (*set_state)(struct sock *sk, u8 new_state);
803 /* call when cwnd event occurs (optional) */ 796 /* call when cwnd event occurs (optional) */
@@ -829,7 +822,7 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
829 822
830extern struct tcp_congestion_ops tcp_init_congestion_ops; 823extern struct tcp_congestion_ops tcp_init_congestion_ops;
831u32 tcp_reno_ssthresh(struct sock *sk); 824u32 tcp_reno_ssthresh(struct sock *sk);
832void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight); 825void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
833extern struct tcp_congestion_ops tcp_reno; 826extern struct tcp_congestion_ops tcp_reno;
834 827
835static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) 828static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
@@ -975,7 +968,30 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
975{ 968{
976 return tp->snd_una + tp->snd_wnd; 969 return tp->snd_una + tp->snd_wnd;
977} 970}
978bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); 971
972/* We follow the spirit of RFC2861 to validate cwnd but implement a more
973 * flexible approach. The RFC suggests cwnd should not be raised unless
974 * it was fully used previously. And that's exactly what we do in
975 * congestion avoidance mode. But in slow start we allow cwnd to grow
976 * as long as the application has used half the cwnd.
977 * Example :
978 * cwnd is 10 (IW10), but application sends 9 frames.
979 * We allow cwnd to reach 18 when all frames are ACKed.
980 * This check is safe because it's as aggressive as slow start which already
981 * risks 100% overshoot. The advantage is that we discourage application to
982 * either send more filler packets or data to artificially blow up the cwnd
983 * usage, and allow application-limited process to probe bw more aggressively.
984 */
985static inline bool tcp_is_cwnd_limited(const struct sock *sk)
986{
987 const struct tcp_sock *tp = tcp_sk(sk);
988
989 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
990 if (tp->snd_cwnd <= tp->snd_ssthresh)
991 return tp->snd_cwnd < 2 * tp->max_packets_out;
992
993 return tp->is_cwnd_limited;
994}
979 995
980static inline void tcp_check_probe_timer(struct sock *sk) 996static inline void tcp_check_probe_timer(struct sock *sk)
981{ 997{
@@ -1103,6 +1119,9 @@ static inline void tcp_openreq_init(struct request_sock *req,
1103 ireq->ir_num = ntohs(tcp_hdr(skb)->dest); 1119 ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
1104} 1120}
1105 1121
1122extern void tcp_openreq_init_rwin(struct request_sock *req,
1123 struct sock *sk, struct dst_entry *dst);
1124
1106void tcp_enter_memory_pressure(struct sock *sk); 1125void tcp_enter_memory_pressure(struct sock *sk);
1107 1126
1108static inline int keepalive_intvl_when(const struct tcp_sock *tp) 1127static inline int keepalive_intvl_when(const struct tcp_sock *tp)
@@ -1312,8 +1331,10 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
1312 1331
1313extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; 1332extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1314int tcp_fastopen_reset_cipher(void *key, unsigned int len); 1333int tcp_fastopen_reset_cipher(void *key, unsigned int len);
1315void tcp_fastopen_cookie_gen(__be32 src, __be32 dst, 1334bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1316 struct tcp_fastopen_cookie *foc); 1335 struct request_sock *req,
1336 struct tcp_fastopen_cookie *foc,
1337 struct dst_entry *dst);
1317void tcp_fastopen_init_key_once(bool publish); 1338void tcp_fastopen_init_key_once(bool publish);
1318#define TCP_FASTOPEN_KEY_LENGTH 16 1339#define TCP_FASTOPEN_KEY_LENGTH 16
1319 1340
diff --git a/include/net/tso.h b/include/net/tso.h
new file mode 100644
index 000000000000..47e5444f7d15
--- /dev/null
+++ b/include/net/tso.h
@@ -0,0 +1,20 @@
1#ifndef _TSO_H
2#define _TSO_H
3
4#include <net/ip.h>
5
6struct tso_t {
7 int next_frag_idx;
8 void *data;
9 size_t size;
10 u16 ip_id;
11 u32 tcp_seq;
12};
13
14int tso_count_descs(struct sk_buff *skb);
15void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
16 int size, bool is_last);
17void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size);
18void tso_start(struct sk_buff *skb, struct tso_t *tso);
19
20#endif /* _TSO_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index a24f0f3e107f..2ecfc6e15609 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -95,15 +95,6 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
95 return &table->hash2[hash & table->mask]; 95 return &table->hash2[hash & table->mask];
96} 96}
97 97
98/* Note: this must match 'valbool' in sock_setsockopt */
99#define UDP_CSUM_NOXMIT 1
100
101/* Used by SunRPC/xprt layer. */
102#define UDP_CSUM_NORCV 2
103
104/* Default, as per the RFC, is to always do csums. */
105#define UDP_CSUM_DEFAULT 0
106
107extern struct proto udp_prot; 98extern struct proto udp_prot;
108 99
109extern atomic_long_t udp_memory_allocated; 100extern atomic_long_t udp_memory_allocated;
@@ -156,6 +147,15 @@ static inline __wsum udp_csum(struct sk_buff *skb)
156 return csum; 147 return csum;
157} 148}
158 149
150static inline __sum16 udp_v4_check(int len, __be32 saddr,
151 __be32 daddr, __wsum base)
152{
153 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
154}
155
156void udp_set_csum(bool nocheck, struct sk_buff *skb,
157 __be32 saddr, __be32 daddr, int len);
158
159/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ 159/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
160static inline void udp_lib_hash(struct sock *sk) 160static inline void udp_lib_hash(struct sock *sk)
161{ 161{
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 5deef1ae78c9..12196ce661d9 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -24,16 +24,26 @@ struct vxlan_sock {
24 struct udp_offload udp_offloads; 24 struct udp_offload udp_offloads;
25}; 25};
26 26
27#define VXLAN_F_LEARN 0x01
28#define VXLAN_F_PROXY 0x02
29#define VXLAN_F_RSC 0x04
30#define VXLAN_F_L2MISS 0x08
31#define VXLAN_F_L3MISS 0x10
32#define VXLAN_F_IPV6 0x20
33#define VXLAN_F_UDP_CSUM 0x40
34#define VXLAN_F_UDP_ZERO_CSUM6_TX 0x80
35#define VXLAN_F_UDP_ZERO_CSUM6_RX 0x100
36
27struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, 37struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
28 vxlan_rcv_t *rcv, void *data, 38 vxlan_rcv_t *rcv, void *data,
29 bool no_share, bool ipv6); 39 bool no_share, u32 flags);
30 40
31void vxlan_sock_release(struct vxlan_sock *vs); 41void vxlan_sock_release(struct vxlan_sock *vs);
32 42
33int vxlan_xmit_skb(struct vxlan_sock *vs, 43int vxlan_xmit_skb(struct vxlan_sock *vs,
34 struct rtable *rt, struct sk_buff *skb, 44 struct rtable *rt, struct sk_buff *skb,
35 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, 45 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
36 __be16 src_port, __be16 dst_port, __be32 vni); 46 __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
37 47
38__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb); 48__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
39 49
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 116e9c7e19cb..721e9c3b11bd 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -691,13 +691,6 @@ struct xfrm_spi_skb_cb {
691 691
692#define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0])) 692#define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
693 693
694/* Audit Information */
695struct xfrm_audit {
696 u32 secid;
697 kuid_t loginuid;
698 unsigned int sessionid;
699};
700
701#ifdef CONFIG_AUDITSYSCALL 694#ifdef CONFIG_AUDITSYSCALL
702static inline struct audit_buffer *xfrm_audit_start(const char *op) 695static inline struct audit_buffer *xfrm_audit_start(const char *op)
703{ 696{
@@ -713,30 +706,24 @@ static inline struct audit_buffer *xfrm_audit_start(const char *op)
713 return audit_buf; 706 return audit_buf;
714} 707}
715 708
716static inline void xfrm_audit_helper_usrinfo(kuid_t auid, unsigned int ses, u32 secid, 709static inline void xfrm_audit_helper_usrinfo(bool task_valid,
717 struct audit_buffer *audit_buf) 710 struct audit_buffer *audit_buf)
718{ 711{
719 char *secctx; 712 const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
720 u32 secctx_len; 713 audit_get_loginuid(current) :
721 714 INVALID_UID);
722 audit_log_format(audit_buf, " auid=%u ses=%u", 715 const unsigned int ses = task_valid ? audit_get_sessionid(current) :
723 from_kuid(&init_user_ns, auid), ses); 716 (unsigned int) -1;
724 if (secid != 0 && 717
725 security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) { 718 audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
726 audit_log_format(audit_buf, " subj=%s", secctx); 719 audit_log_task_context(audit_buf);
727 security_release_secctx(secctx, secctx_len); 720}
728 } else 721
729 audit_log_task_context(audit_buf); 722void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
730} 723void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
731 724 bool task_valid);
732void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, kuid_t auid, 725void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
733 unsigned int ses, u32 secid); 726void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
734void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, kuid_t auid,
735 unsigned int ses, u32 secid);
736void xfrm_audit_state_add(struct xfrm_state *x, int result, kuid_t auid,
737 unsigned int ses, u32 secid);
738void xfrm_audit_state_delete(struct xfrm_state *x, int result, kuid_t auid,
739 unsigned int ses, u32 secid);
740void xfrm_audit_state_replay_overflow(struct xfrm_state *x, 727void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
741 struct sk_buff *skb); 728 struct sk_buff *skb);
742void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb, 729void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
@@ -749,22 +736,22 @@ void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
749#else 736#else
750 737
751static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 738static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
752 kuid_t auid, unsigned int ses, u32 secid) 739 bool task_valid)
753{ 740{
754} 741}
755 742
756static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 743static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
757 kuid_t auid, unsigned int ses, u32 secid) 744 bool task_valid)
758{ 745{
759} 746}
760 747
761static inline void xfrm_audit_state_add(struct xfrm_state *x, int result, 748static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
762 kuid_t auid, unsigned int ses, u32 secid) 749 bool task_valid)
763{ 750{
764} 751}
765 752
766static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result, 753static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
767 kuid_t auid, unsigned int ses, u32 secid) 754 bool task_valid)
768{ 755{
769} 756}
770 757
@@ -1508,7 +1495,7 @@ struct xfrmk_spdinfo {
1508 1495
1509struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); 1496struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1510int xfrm_state_delete(struct xfrm_state *x); 1497int xfrm_state_delete(struct xfrm_state *x);
1511int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info); 1498int xfrm_state_flush(struct net *net, u8 proto, bool task_valid);
1512void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); 1499void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1513void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); 1500void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1514u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); 1501u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
@@ -1603,7 +1590,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
1603 int *err); 1590 int *err);
1604struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, 1591struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
1605 u32 id, int delete, int *err); 1592 u32 id, int delete, int *err);
1606int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info); 1593int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1607u32 xfrm_get_acqseq(void); 1594u32 xfrm_get_acqseq(void);
1608int verify_spi_info(u8 proto, u32 min, u32 max); 1595int verify_spi_info(u8 proto, u32 min, u32 max);
1609int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi); 1596int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 4c31a366be16..cf6714752b69 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -385,6 +385,14 @@ enum {
385 */ 385 */
386#define AUDIT_MESSAGE_TEXT_MAX 8560 386#define AUDIT_MESSAGE_TEXT_MAX 8560
387 387
388/* Multicast Netlink socket groups (default up to 32) */
389enum audit_nlgrps {
390 AUDIT_NLGRP_NONE, /* Group 0 not used */
391 AUDIT_NLGRP_READLOG, /* "best effort" read only socket */
392 __AUDIT_NLGRP_MAX
393};
394#define AUDIT_NLGRP_MAX (__AUDIT_NLGRP_MAX - 1)
395
388struct audit_status { 396struct audit_status {
389 __u32 mask; /* Bit mask for valid entries */ 397 __u32 mask; /* Bit mask for valid entries */
390 __u32 enabled; /* 1 = enabled, 0 = disabled */ 398 __u32 enabled; /* 1 = enabled, 0 = disabled */
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 5d9d1d140718..41892f720057 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -42,8 +42,8 @@
42 * DAMAGE. 42 * DAMAGE.
43 */ 43 */
44 44
45#ifndef CAN_H 45#ifndef _UAPI_CAN_H
46#define CAN_H 46#define _UAPI_CAN_H
47 47
48#include <linux/types.h> 48#include <linux/types.h>
49#include <linux/socket.h> 49#include <linux/socket.h>
@@ -191,4 +191,4 @@ struct can_filter {
191 191
192#define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */ 192#define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
193 193
194#endif /* CAN_H */ 194#endif /* !_UAPI_CAN_H */
diff --git a/include/uapi/linux/can/bcm.h b/include/uapi/linux/can/bcm.h
index 382251a1d214..89ddb9dc9bdf 100644
--- a/include/uapi/linux/can/bcm.h
+++ b/include/uapi/linux/can/bcm.h
@@ -41,8 +41,8 @@
41 * DAMAGE. 41 * DAMAGE.
42 */ 42 */
43 43
44#ifndef CAN_BCM_H 44#ifndef _UAPI_CAN_BCM_H
45#define CAN_BCM_H 45#define _UAPI_CAN_BCM_H
46 46
47#include <linux/types.h> 47#include <linux/types.h>
48#include <linux/can.h> 48#include <linux/can.h>
@@ -95,4 +95,4 @@ enum {
95#define TX_RESET_MULTI_IDX 0x0200 95#define TX_RESET_MULTI_IDX 0x0200
96#define RX_RTR_FRAME 0x0400 96#define RX_RTR_FRAME 0x0400
97 97
98#endif /* CAN_BCM_H */ 98#endif /* !_UAPI_CAN_BCM_H */
diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h
index b63204545320..c247446ab25a 100644
--- a/include/uapi/linux/can/error.h
+++ b/include/uapi/linux/can/error.h
@@ -41,8 +41,8 @@
41 * DAMAGE. 41 * DAMAGE.
42 */ 42 */
43 43
44#ifndef CAN_ERROR_H 44#ifndef _UAPI_CAN_ERROR_H
45#define CAN_ERROR_H 45#define _UAPI_CAN_ERROR_H
46 46
47#define CAN_ERR_DLC 8 /* dlc for error message frames */ 47#define CAN_ERR_DLC 8 /* dlc for error message frames */
48 48
@@ -120,4 +120,4 @@
120 120
121/* controller specific additional information / data[5..7] */ 121/* controller specific additional information / data[5..7] */
122 122
123#endif /* CAN_ERROR_H */ 123#endif /* _UAPI_CAN_ERROR_H */
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index 844c8964bdfe..3e6184cf2f6d 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -41,8 +41,8 @@
41 * DAMAGE. 41 * DAMAGE.
42 */ 42 */
43 43
44#ifndef CAN_GW_H 44#ifndef _UAPI_CAN_GW_H
45#define CAN_GW_H 45#define _UAPI_CAN_GW_H
46 46
47#include <linux/types.h> 47#include <linux/types.h>
48#include <linux/can.h> 48#include <linux/can.h>
@@ -200,4 +200,4 @@ enum {
200 * Beware of sending unpacked or aligned structs! 200 * Beware of sending unpacked or aligned structs!
201 */ 201 */
202 202
203#endif 203#endif /* !_UAPI_CAN_GW_H */
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 7e2e1863db16..813d11f54977 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -15,8 +15,8 @@
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 */ 16 */
17 17
18#ifndef CAN_NETLINK_H 18#ifndef _UAPI_CAN_NETLINK_H
19#define CAN_NETLINK_H 19#define _UAPI_CAN_NETLINK_H
20 20
21#include <linux/types.h> 21#include <linux/types.h>
22 22
@@ -130,4 +130,4 @@ enum {
130 130
131#define IFLA_CAN_MAX (__IFLA_CAN_MAX - 1) 131#define IFLA_CAN_MAX (__IFLA_CAN_MAX - 1)
132 132
133#endif /* CAN_NETLINK_H */ 133#endif /* !_UAPI_CAN_NETLINK_H */
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index c7d8c334e0ce..78ec76fd89a6 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -42,8 +42,8 @@
42 * DAMAGE. 42 * DAMAGE.
43 */ 43 */
44 44
45#ifndef CAN_RAW_H 45#ifndef _UAPI_CAN_RAW_H
46#define CAN_RAW_H 46#define _UAPI_CAN_RAW_H
47 47
48#include <linux/can.h> 48#include <linux/can.h>
49 49
@@ -59,4 +59,4 @@ enum {
59 CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */ 59 CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */
60}; 60};
61 61
62#endif 62#endif /* !_UAPI_CAN_RAW_H */
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 154dd6d3c8fe..12c37a197d24 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -347,7 +347,12 @@ struct vfs_cap_data {
347 347
348#define CAP_BLOCK_SUSPEND 36 348#define CAP_BLOCK_SUSPEND 36
349 349
350#define CAP_LAST_CAP CAP_BLOCK_SUSPEND 350/* Allow reading the audit log via multicast netlink socket */
351
352#define CAP_AUDIT_READ 37
353
354
355#define CAP_LAST_CAP CAP_AUDIT_READ
351 356
352#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP) 357#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
353 358
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index fd161e91b6d7..e3c7a719c76b 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -847,6 +847,38 @@ struct ethtool_rxfh_indir {
847}; 847};
848 848
849/** 849/**
850 * struct ethtool_rxfh - command to get/set RX flow hash indir or/and hash key.
851 * @cmd: Specific command number - %ETHTOOL_GRSSH or %ETHTOOL_SRSSH
852 * @rss_context: RSS context identifier.
853 * @indir_size: On entry, the array size of the user buffer for the
854 * indirection table, which may be zero, or (for %ETHTOOL_SRSSH),
855 * %ETH_RXFH_INDIR_NO_CHANGE. On return from %ETHTOOL_GRSSH,
856 * the array size of the hardware indirection table.
857 * @key_size: On entry, the array size of the user buffer for the hash key,
858 * which may be zero. On return from %ETHTOOL_GRSSH, the size of the
859 * hardware hash key.
860 * @rsvd: Reserved for future extensions.
861 * @rss_config: RX ring/queue index for each hash value i.e., indirection table
862 * of @indir_size __u32 elements, followed by hash key of @key_size
863 * bytes.
864 *
865 * For %ETHTOOL_GRSSH, a @indir_size and key_size of zero means that only the
866 * size should be returned. For %ETHTOOL_SRSSH, an @indir_size of
867 * %ETH_RXFH_INDIR_NO_CHANGE means that indir table setting is not requested
868 * and a @indir_size of zero means the indir table should be reset to default
869 * values.
870 */
871struct ethtool_rxfh {
872 __u32 cmd;
873 __u32 rss_context;
874 __u32 indir_size;
875 __u32 key_size;
876 __u32 rsvd[2];
877 __u32 rss_config[0];
878};
879#define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff
880
881/**
850 * struct ethtool_rx_ntuple_flow_spec - specification for RX flow filter 882 * struct ethtool_rx_ntuple_flow_spec - specification for RX flow filter
851 * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW 883 * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW
852 * @h_u: Flow field values to match (dependent on @flow_type) 884 * @h_u: Flow field values to match (dependent on @flow_type)
@@ -1118,6 +1150,9 @@ enum ethtool_sfeatures_retval_bits {
1118#define ETHTOOL_GEEE 0x00000044 /* Get EEE settings */ 1150#define ETHTOOL_GEEE 0x00000044 /* Get EEE settings */
1119#define ETHTOOL_SEEE 0x00000045 /* Set EEE settings */ 1151#define ETHTOOL_SEEE 0x00000045 /* Set EEE settings */
1120 1152
1153#define ETHTOOL_GRSSH 0x00000046 /* Get RX flow hash configuration */
1154#define ETHTOOL_SRSSH 0x00000047 /* Set RX flow hash configuration */
1155
1121/* compatibility with older code */ 1156/* compatibility with older code */
1122#define SPARC_ETH_GSET ETHTOOL_GSET 1157#define SPARC_ETH_GSET ETHTOOL_GSET
1123#define SPARC_ETH_SSET ETHTOOL_SSET 1158#define SPARC_ETH_SSET ETHTOOL_SSET
diff --git a/include/uapi/linux/filter.h b/include/uapi/linux/filter.h
index 8eb9ccaa5b48..253b4d42cf2b 100644
--- a/include/uapi/linux/filter.h
+++ b/include/uapi/linux/filter.h
@@ -130,7 +130,8 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
130#define SKF_AD_VLAN_TAG 44 130#define SKF_AD_VLAN_TAG 44
131#define SKF_AD_VLAN_TAG_PRESENT 48 131#define SKF_AD_VLAN_TAG_PRESENT 48
132#define SKF_AD_PAY_OFFSET 52 132#define SKF_AD_PAY_OFFSET 52
133#define SKF_AD_MAX 56 133#define SKF_AD_RANDOM 56
134#define SKF_AD_MAX 60
134#define SKF_NET_OFF (-0x100000) 135#define SKF_NET_OFF (-0x100000)
135#define SKF_LL_OFF (-0x200000) 136#define SKF_LL_OFF (-0x200000)
136 137
diff --git a/include/uapi/linux/if_fddi.h b/include/uapi/linux/if_fddi.h
index 0d36909c3aef..1086cd9f6754 100644
--- a/include/uapi/linux/if_fddi.h
+++ b/include/uapi/linux/if_fddi.h
@@ -30,74 +30,76 @@
30 * Define max and min legal sizes. The frame sizes do not include 30 * Define max and min legal sizes. The frame sizes do not include
31 * 4 byte FCS/CRC (frame check sequence). 31 * 4 byte FCS/CRC (frame check sequence).
32 */ 32 */
33#define FDDI_K_ALEN 6 /* Octets in one FDDI address */ 33#define FDDI_K_ALEN 6 /* Octets in one FDDI address */
34#define FDDI_K_8022_HLEN 16 /* Total octets in 802.2 header */ 34#define FDDI_K_8022_HLEN 16 /* Total octets in 802.2 header */
35#define FDDI_K_SNAP_HLEN 21 /* Total octets in 802.2 SNAP header */ 35#define FDDI_K_SNAP_HLEN 21 /* Total octets in 802.2 SNAP header */
36#define FDDI_K_8022_ZLEN 16 /* Min octets in 802.2 frame sans FCS */ 36#define FDDI_K_8022_ZLEN 16 /* Min octets in 802.2 frame sans
37#define FDDI_K_SNAP_ZLEN 21 /* Min octets in 802.2 SNAP frame sans FCS */ 37 FCS */
38#define FDDI_K_SNAP_ZLEN 21 /* Min octets in 802.2 SNAP frame sans
39 FCS */
38#define FDDI_K_8022_DLEN 4475 /* Max octets in 802.2 payload */ 40#define FDDI_K_8022_DLEN 4475 /* Max octets in 802.2 payload */
39#define FDDI_K_SNAP_DLEN 4470 /* Max octets in 802.2 SNAP payload */ 41#define FDDI_K_SNAP_DLEN 4470 /* Max octets in 802.2 SNAP payload */
40#define FDDI_K_LLC_ZLEN 13 /* Min octets in LLC frame sans FCS */ 42#define FDDI_K_LLC_ZLEN 13 /* Min octets in LLC frame sans FCS */
41#define FDDI_K_LLC_LEN 4491 /* Max octets in LLC frame sans FCS */ 43#define FDDI_K_LLC_LEN 4491 /* Max octets in LLC frame sans FCS */
44#define FDDI_K_OUI_LEN 3 /* Octets in OUI in 802.2 SNAP
45 header */
42 46
43/* Define FDDI Frame Control (FC) Byte values */ 47/* Define FDDI Frame Control (FC) Byte values */
44#define FDDI_FC_K_VOID 0x00 48#define FDDI_FC_K_VOID 0x00
45#define FDDI_FC_K_NON_RESTRICTED_TOKEN 0x80 49#define FDDI_FC_K_NON_RESTRICTED_TOKEN 0x80
46#define FDDI_FC_K_RESTRICTED_TOKEN 0xC0 50#define FDDI_FC_K_RESTRICTED_TOKEN 0xC0
47#define FDDI_FC_K_SMT_MIN 0x41 51#define FDDI_FC_K_SMT_MIN 0x41
48#define FDDI_FC_K_SMT_MAX 0x4F 52#define FDDI_FC_K_SMT_MAX 0x4F
49#define FDDI_FC_K_MAC_MIN 0xC1 53#define FDDI_FC_K_MAC_MIN 0xC1
50#define FDDI_FC_K_MAC_MAX 0xCF 54#define FDDI_FC_K_MAC_MAX 0xCF
51#define FDDI_FC_K_ASYNC_LLC_MIN 0x50 55#define FDDI_FC_K_ASYNC_LLC_MIN 0x50
52#define FDDI_FC_K_ASYNC_LLC_DEF 0x54 56#define FDDI_FC_K_ASYNC_LLC_DEF 0x54
53#define FDDI_FC_K_ASYNC_LLC_MAX 0x5F 57#define FDDI_FC_K_ASYNC_LLC_MAX 0x5F
54#define FDDI_FC_K_SYNC_LLC_MIN 0xD0 58#define FDDI_FC_K_SYNC_LLC_MIN 0xD0
55#define FDDI_FC_K_SYNC_LLC_MAX 0xD7 59#define FDDI_FC_K_SYNC_LLC_MAX 0xD7
56#define FDDI_FC_K_IMPLEMENTOR_MIN 0x60 60#define FDDI_FC_K_IMPLEMENTOR_MIN 0x60
57#define FDDI_FC_K_IMPLEMENTOR_MAX 0x6F 61#define FDDI_FC_K_IMPLEMENTOR_MAX 0x6F
58#define FDDI_FC_K_RESERVED_MIN 0x70 62#define FDDI_FC_K_RESERVED_MIN 0x70
59#define FDDI_FC_K_RESERVED_MAX 0x7F 63#define FDDI_FC_K_RESERVED_MAX 0x7F
60 64
61/* Define LLC and SNAP constants */ 65/* Define LLC and SNAP constants */
62#define FDDI_EXTENDED_SAP 0xAA 66#define FDDI_EXTENDED_SAP 0xAA
63#define FDDI_UI_CMD 0x03 67#define FDDI_UI_CMD 0x03
64 68
65/* Define 802.2 Type 1 header */ 69/* Define 802.2 Type 1 header */
66struct fddi_8022_1_hdr { 70struct fddi_8022_1_hdr {
67 __u8 dsap; /* destination service access point */ 71 __u8 dsap; /* destination service access point */
68 __u8 ssap; /* source service access point */ 72 __u8 ssap; /* source service access point */
69 __u8 ctrl; /* control byte #1 */ 73 __u8 ctrl; /* control byte #1 */
70} __attribute__((packed)); 74} __attribute__((packed));
71 75
72/* Define 802.2 Type 2 header */ 76/* Define 802.2 Type 2 header */
73struct fddi_8022_2_hdr { 77struct fddi_8022_2_hdr {
74 __u8 dsap; /* destination service access point */ 78 __u8 dsap; /* destination service access point */
75 __u8 ssap; /* source service access point */ 79 __u8 ssap; /* source service access point */
76 __u8 ctrl_1; /* control byte #1 */ 80 __u8 ctrl_1; /* control byte #1 */
77 __u8 ctrl_2; /* control byte #2 */ 81 __u8 ctrl_2; /* control byte #2 */
78} __attribute__((packed)); 82} __attribute__((packed));
79 83
80/* Define 802.2 SNAP header */ 84/* Define 802.2 SNAP header */
81#define FDDI_K_OUI_LEN 3
82struct fddi_snap_hdr { 85struct fddi_snap_hdr {
83 __u8 dsap; /* always 0xAA */ 86 __u8 dsap; /* always 0xAA */
84 __u8 ssap; /* always 0xAA */ 87 __u8 ssap; /* always 0xAA */
85 __u8 ctrl; /* always 0x03 */ 88 __u8 ctrl; /* always 0x03 */
86 __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */ 89 __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */
87 __be16 ethertype; /* packet type ID field */ 90 __be16 ethertype; /* packet type ID field */
88} __attribute__((packed)); 91} __attribute__((packed));
89 92
90/* Define FDDI LLC frame header */ 93/* Define FDDI LLC frame header */
91struct fddihdr { 94struct fddihdr {
92 __u8 fc; /* frame control */ 95 __u8 fc; /* frame control */
93 __u8 daddr[FDDI_K_ALEN]; /* destination address */ 96 __u8 daddr[FDDI_K_ALEN]; /* destination address */
94 __u8 saddr[FDDI_K_ALEN]; /* source address */ 97 __u8 saddr[FDDI_K_ALEN]; /* source address */
95 union 98 union {
96 { 99 struct fddi_8022_1_hdr llc_8022_1;
97 struct fddi_8022_1_hdr llc_8022_1; 100 struct fddi_8022_2_hdr llc_8022_2;
98 struct fddi_8022_2_hdr llc_8022_2; 101 struct fddi_snap_hdr llc_snap;
99 struct fddi_snap_hdr llc_snap; 102 } hdr;
100 } hdr;
101} __attribute__((packed)); 103} __attribute__((packed));
102 104
103 105
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 9a7f7ace6649..b38534895db5 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -319,6 +319,9 @@ enum {
319 IFLA_VXLAN_PORT, /* destination port */ 319 IFLA_VXLAN_PORT, /* destination port */
320 IFLA_VXLAN_GROUP6, 320 IFLA_VXLAN_GROUP6,
321 IFLA_VXLAN_LOCAL6, 321 IFLA_VXLAN_LOCAL6,
322 IFLA_VXLAN_UDP_CSUM,
323 IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
324 IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
322 __IFLA_VXLAN_MAX 325 __IFLA_VXLAN_MAX
323}; 326};
324#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) 327#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -399,9 +402,10 @@ enum {
399 IFLA_VF_UNSPEC, 402 IFLA_VF_UNSPEC,
400 IFLA_VF_MAC, /* Hardware queue specific attributes */ 403 IFLA_VF_MAC, /* Hardware queue specific attributes */
401 IFLA_VF_VLAN, 404 IFLA_VF_VLAN,
402 IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ 405 IFLA_VF_TX_RATE, /* Max TX Bandwidth Allocation */
403 IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */ 406 IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */
404 IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */ 407 IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */
408 IFLA_VF_RATE, /* Min and Max TX Bandwidth Allocation */
405 __IFLA_VF_MAX, 409 __IFLA_VF_MAX,
406}; 410};
407 411
@@ -423,6 +427,12 @@ struct ifla_vf_tx_rate {
423 __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */ 427 __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */
424}; 428};
425 429
430struct ifla_vf_rate {
431 __u32 vf;
432 __u32 min_tx_rate; /* Min Bandwidth in Mbps */
433 __u32 max_tx_rate; /* Max Bandwidth in Mbps */
434};
435
426struct ifla_vf_spoofchk { 436struct ifla_vf_spoofchk {
427 __u32 vf; 437 __u32 vf;
428 __u32 setting; 438 __u32 setting;
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index aee73d0611fb..3bce9e9d9f7c 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -100,7 +100,7 @@ enum {
100#define IFLA_GRE_MAX (__IFLA_GRE_MAX - 1) 100#define IFLA_GRE_MAX (__IFLA_GRE_MAX - 1)
101 101
102/* VTI-mode i_flags */ 102/* VTI-mode i_flags */
103#define VTI_ISVTI 0x0001 103#define VTI_ISVTI ((__force __be16)0x0001)
104 104
105enum { 105enum {
106 IFLA_VTI_UNSPEC, 106 IFLA_VTI_UNSPEC,
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 8adb68160327..21caa2631c20 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -124,6 +124,8 @@ enum {
124 L2TP_ATTR_STATS, /* nested */ 124 L2TP_ATTR_STATS, /* nested */
125 L2TP_ATTR_IP6_SADDR, /* struct in6_addr */ 125 L2TP_ATTR_IP6_SADDR, /* struct in6_addr */
126 L2TP_ATTR_IP6_DADDR, /* struct in6_addr */ 126 L2TP_ATTR_IP6_DADDR, /* struct in6_addr */
127 L2TP_ATTR_UDP_ZERO_CSUM6_TX, /* u8 */
128 L2TP_ATTR_UDP_ZERO_CSUM6_RX, /* u8 */
127 __L2TP_ATTR_MAX, 129 __L2TP_ATTR_MAX,
128}; 130};
129 131
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index d3ef583104e0..4a1d7e96dfe3 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -24,6 +24,7 @@ enum {
24 NDA_PORT, 24 NDA_PORT,
25 NDA_VNI, 25 NDA_VNI,
26 NDA_IFINDEX, 26 NDA_IFINDEX,
27 NDA_MASTER,
27 __NDA_MAX 28 __NDA_MAX
28}; 29};
29 30
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index c88ccbfda5f1..2a88f645a5d8 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -212,6 +212,29 @@ enum nft_set_flags {
212}; 212};
213 213
214/** 214/**
215 * enum nft_set_policies - set selection policy
216 *
217 * @NFT_SET_POL_PERFORMANCE: prefer high performance over low memory use
218 * @NFT_SET_POL_MEMORY: prefer low memory use over high performance
219 */
220enum nft_set_policies {
221 NFT_SET_POL_PERFORMANCE,
222 NFT_SET_POL_MEMORY,
223};
224
225/**
226 * enum nft_set_desc_attributes - set element description
227 *
228 * @NFTA_SET_DESC_SIZE: number of elements in set (NLA_U32)
229 */
230enum nft_set_desc_attributes {
231 NFTA_SET_DESC_UNSPEC,
232 NFTA_SET_DESC_SIZE,
233 __NFTA_SET_DESC_MAX
234};
235#define NFTA_SET_DESC_MAX (__NFTA_SET_DESC_MAX - 1)
236
237/**
215 * enum nft_set_attributes - nf_tables set netlink attributes 238 * enum nft_set_attributes - nf_tables set netlink attributes
216 * 239 *
217 * @NFTA_SET_TABLE: table name (NLA_STRING) 240 * @NFTA_SET_TABLE: table name (NLA_STRING)
@@ -221,6 +244,9 @@ enum nft_set_flags {
221 * @NFTA_SET_KEY_LEN: key data length (NLA_U32) 244 * @NFTA_SET_KEY_LEN: key data length (NLA_U32)
222 * @NFTA_SET_DATA_TYPE: mapping data type (NLA_U32) 245 * @NFTA_SET_DATA_TYPE: mapping data type (NLA_U32)
223 * @NFTA_SET_DATA_LEN: mapping data length (NLA_U32) 246 * @NFTA_SET_DATA_LEN: mapping data length (NLA_U32)
247 * @NFTA_SET_POLICY: selection policy (NLA_U32)
248 * @NFTA_SET_DESC: set description (NLA_NESTED)
249 * @NFTA_SET_ID: uniquely identifies a set in a transaction (NLA_U32)
224 */ 250 */
225enum nft_set_attributes { 251enum nft_set_attributes {
226 NFTA_SET_UNSPEC, 252 NFTA_SET_UNSPEC,
@@ -231,6 +257,9 @@ enum nft_set_attributes {
231 NFTA_SET_KEY_LEN, 257 NFTA_SET_KEY_LEN,
232 NFTA_SET_DATA_TYPE, 258 NFTA_SET_DATA_TYPE,
233 NFTA_SET_DATA_LEN, 259 NFTA_SET_DATA_LEN,
260 NFTA_SET_POLICY,
261 NFTA_SET_DESC,
262 NFTA_SET_ID,
234 __NFTA_SET_MAX 263 __NFTA_SET_MAX
235}; 264};
236#define NFTA_SET_MAX (__NFTA_SET_MAX - 1) 265#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
@@ -266,12 +295,14 @@ enum nft_set_elem_attributes {
266 * @NFTA_SET_ELEM_LIST_TABLE: table of the set to be changed (NLA_STRING) 295 * @NFTA_SET_ELEM_LIST_TABLE: table of the set to be changed (NLA_STRING)
267 * @NFTA_SET_ELEM_LIST_SET: name of the set to be changed (NLA_STRING) 296 * @NFTA_SET_ELEM_LIST_SET: name of the set to be changed (NLA_STRING)
268 * @NFTA_SET_ELEM_LIST_ELEMENTS: list of set elements (NLA_NESTED: nft_set_elem_attributes) 297 * @NFTA_SET_ELEM_LIST_ELEMENTS: list of set elements (NLA_NESTED: nft_set_elem_attributes)
298 * @NFTA_SET_ELEM_LIST_SET_ID: uniquely identifies a set in a transaction (NLA_U32)
269 */ 299 */
270enum nft_set_elem_list_attributes { 300enum nft_set_elem_list_attributes {
271 NFTA_SET_ELEM_LIST_UNSPEC, 301 NFTA_SET_ELEM_LIST_UNSPEC,
272 NFTA_SET_ELEM_LIST_TABLE, 302 NFTA_SET_ELEM_LIST_TABLE,
273 NFTA_SET_ELEM_LIST_SET, 303 NFTA_SET_ELEM_LIST_SET,
274 NFTA_SET_ELEM_LIST_ELEMENTS, 304 NFTA_SET_ELEM_LIST_ELEMENTS,
305 NFTA_SET_ELEM_LIST_SET_ID,
275 __NFTA_SET_ELEM_LIST_MAX 306 __NFTA_SET_ELEM_LIST_MAX
276}; 307};
277#define NFTA_SET_ELEM_LIST_MAX (__NFTA_SET_ELEM_LIST_MAX - 1) 308#define NFTA_SET_ELEM_LIST_MAX (__NFTA_SET_ELEM_LIST_MAX - 1)
@@ -457,12 +488,14 @@ enum nft_cmp_attributes {
457 * @NFTA_LOOKUP_SET: name of the set where to look for (NLA_STRING) 488 * @NFTA_LOOKUP_SET: name of the set where to look for (NLA_STRING)
458 * @NFTA_LOOKUP_SREG: source register of the data to look for (NLA_U32: nft_registers) 489 * @NFTA_LOOKUP_SREG: source register of the data to look for (NLA_U32: nft_registers)
459 * @NFTA_LOOKUP_DREG: destination register (NLA_U32: nft_registers) 490 * @NFTA_LOOKUP_DREG: destination register (NLA_U32: nft_registers)
491 * @NFTA_LOOKUP_SET_ID: uniquely identifies a set in a transaction (NLA_U32)
460 */ 492 */
461enum nft_lookup_attributes { 493enum nft_lookup_attributes {
462 NFTA_LOOKUP_UNSPEC, 494 NFTA_LOOKUP_UNSPEC,
463 NFTA_LOOKUP_SET, 495 NFTA_LOOKUP_SET,
464 NFTA_LOOKUP_SREG, 496 NFTA_LOOKUP_SREG,
465 NFTA_LOOKUP_DREG, 497 NFTA_LOOKUP_DREG,
498 NFTA_LOOKUP_SET_ID,
466 __NFTA_LOOKUP_MAX 499 __NFTA_LOOKUP_MAX
467}; 500};
468#define NFTA_LOOKUP_MAX (__NFTA_LOOKUP_MAX - 1) 501#define NFTA_LOOKUP_MAX (__NFTA_LOOKUP_MAX - 1)
@@ -536,6 +569,8 @@ enum nft_exthdr_attributes {
536 * @NFT_META_SECMARK: packet secmark (skb->secmark) 569 * @NFT_META_SECMARK: packet secmark (skb->secmark)
537 * @NFT_META_NFPROTO: netfilter protocol 570 * @NFT_META_NFPROTO: netfilter protocol
538 * @NFT_META_L4PROTO: layer 4 protocol number 571 * @NFT_META_L4PROTO: layer 4 protocol number
572 * @NFT_META_BRI_IIFNAME: packet input bridge interface name
573 * @NFT_META_BRI_OIFNAME: packet output bridge interface name
539 */ 574 */
540enum nft_meta_keys { 575enum nft_meta_keys {
541 NFT_META_LEN, 576 NFT_META_LEN,
@@ -555,6 +590,8 @@ enum nft_meta_keys {
555 NFT_META_SECMARK, 590 NFT_META_SECMARK,
556 NFT_META_NFPROTO, 591 NFT_META_NFPROTO,
557 NFT_META_L4PROTO, 592 NFT_META_L4PROTO,
593 NFT_META_BRI_IIFNAME,
594 NFT_META_BRI_OIFNAME,
558}; 595};
559 596
560/** 597/**
diff --git a/include/uapi/linux/netfilter/nfnetlink.h b/include/uapi/linux/netfilter/nfnetlink.h
index 596ddd45253c..354a7e5e50f2 100644
--- a/include/uapi/linux/netfilter/nfnetlink.h
+++ b/include/uapi/linux/netfilter/nfnetlink.h
@@ -20,6 +20,8 @@ enum nfnetlink_groups {
20#define NFNLGRP_CONNTRACK_EXP_DESTROY NFNLGRP_CONNTRACK_EXP_DESTROY 20#define NFNLGRP_CONNTRACK_EXP_DESTROY NFNLGRP_CONNTRACK_EXP_DESTROY
21 NFNLGRP_NFTABLES, 21 NFNLGRP_NFTABLES,
22#define NFNLGRP_NFTABLES NFNLGRP_NFTABLES 22#define NFNLGRP_NFTABLES NFNLGRP_NFTABLES
23 NFNLGRP_ACCT_QUOTA,
24#define NFNLGRP_ACCT_QUOTA NFNLGRP_ACCT_QUOTA
23 __NFNLGRP_MAX, 25 __NFNLGRP_MAX,
24}; 26};
25#define NFNLGRP_MAX (__NFNLGRP_MAX - 1) 27#define NFNLGRP_MAX (__NFNLGRP_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_acct.h b/include/uapi/linux/netfilter/nfnetlink_acct.h
index c7b6269e760b..51404ec19022 100644
--- a/include/uapi/linux/netfilter/nfnetlink_acct.h
+++ b/include/uapi/linux/netfilter/nfnetlink_acct.h
@@ -10,15 +10,24 @@ enum nfnl_acct_msg_types {
10 NFNL_MSG_ACCT_GET, 10 NFNL_MSG_ACCT_GET,
11 NFNL_MSG_ACCT_GET_CTRZERO, 11 NFNL_MSG_ACCT_GET_CTRZERO,
12 NFNL_MSG_ACCT_DEL, 12 NFNL_MSG_ACCT_DEL,
13 NFNL_MSG_ACCT_OVERQUOTA,
13 NFNL_MSG_ACCT_MAX 14 NFNL_MSG_ACCT_MAX
14}; 15};
15 16
17enum nfnl_acct_flags {
18 NFACCT_F_QUOTA_PKTS = (1 << 0),
19 NFACCT_F_QUOTA_BYTES = (1 << 1),
20 NFACCT_F_OVERQUOTA = (1 << 2), /* can't be set from userspace */
21};
22
16enum nfnl_acct_type { 23enum nfnl_acct_type {
17 NFACCT_UNSPEC, 24 NFACCT_UNSPEC,
18 NFACCT_NAME, 25 NFACCT_NAME,
19 NFACCT_PKTS, 26 NFACCT_PKTS,
20 NFACCT_BYTES, 27 NFACCT_BYTES,
21 NFACCT_USE, 28 NFACCT_USE,
29 NFACCT_FLAGS,
30 NFACCT_QUOTA,
22 __NFACCT_MAX 31 __NFACCT_MAX
23}; 32};
24#define NFACCT_MAX (__NFACCT_MAX - 1) 33#define NFACCT_MAX (__NFACCT_MAX - 1)
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index 9789dc95b6a8..9b19b4461928 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -273,11 +273,19 @@ struct sockaddr_nfc_llcp {
273 * First byte is the adapter index 273 * First byte is the adapter index
274 * Second byte contains flags 274 * Second byte contains flags
275 * - 0x01 - Direction (0=RX, 1=TX) 275 * - 0x01 - Direction (0=RX, 1=TX)
276 * - 0x02-0x80 - Reserved 276 * - 0x02-0x04 - Payload type (000=LLCP, 001=NCI, 010=HCI, 011=Digital,
277 * 100=Proprietary)
278 * - 0x05-0x80 - Reserved
277 **/ 279 **/
278#define NFC_LLCP_RAW_HEADER_SIZE 2 280#define NFC_RAW_HEADER_SIZE 2
279#define NFC_LLCP_DIRECTION_RX 0x00 281#define NFC_DIRECTION_RX 0x00
280#define NFC_LLCP_DIRECTION_TX 0x01 282#define NFC_DIRECTION_TX 0x01
283
284#define RAW_PAYLOAD_LLCP 0
285#define RAW_PAYLOAD_NCI 1
286#define RAW_PAYLOAD_HCI 2
287#define RAW_PAYLOAD_DIGITAL 3
288#define RAW_PAYLOAD_PROPRIETARY 4
281 289
282/* socket option names */ 290/* socket option names */
283#define NFC_LLCP_RW 0 291#define NFC_LLCP_RW 0
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 194c1eab04d8..be9519b52bb1 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -503,6 +503,9 @@
503 * TX status event pertaining to the TX request. 503 * TX status event pertaining to the TX request.
504 * %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the 504 * %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the
505 * management frames at CCK rate or not in 2GHz band. 505 * management frames at CCK rate or not in 2GHz band.
506 * %NL80211_ATTR_CSA_C_OFFSETS_TX is an array of offsets to CSA
507 * counters which will be updated to the current value. This attribute
508 * is used during CSA period.
506 * @NL80211_CMD_FRAME_WAIT_CANCEL: When an off-channel TX was requested, this 509 * @NL80211_CMD_FRAME_WAIT_CANCEL: When an off-channel TX was requested, this
507 * command may be used with the corresponding cookie to cancel the wait 510 * command may be used with the corresponding cookie to cancel the wait
508 * time if it is known that it is no longer necessary. 511 * time if it is known that it is no longer necessary.
@@ -1525,10 +1528,10 @@ enum nl80211_commands {
1525 * operation). 1528 * operation).
1526 * @NL80211_ATTR_CSA_IES: Nested set of attributes containing the IE information 1529 * @NL80211_ATTR_CSA_IES: Nested set of attributes containing the IE information
1527 * for the time while performing a channel switch. 1530 * for the time while performing a channel switch.
1528 * @NL80211_ATTR_CSA_C_OFF_BEACON: Offset of the channel switch counter 1531 * @NL80211_ATTR_CSA_C_OFF_BEACON: An array of offsets (u16) to the channel
1529 * field in the beacons tail (%NL80211_ATTR_BEACON_TAIL). 1532 * switch counters in the beacons tail (%NL80211_ATTR_BEACON_TAIL).
1530 * @NL80211_ATTR_CSA_C_OFF_PRESP: Offset of the channel switch counter 1533 * @NL80211_ATTR_CSA_C_OFF_PRESP: An array of offsets (u16) to the channel
1531 * field in the probe response (%NL80211_ATTR_PROBE_RESP). 1534 * switch counters in the probe response (%NL80211_ATTR_PROBE_RESP).
1532 * 1535 *
1533 * @NL80211_ATTR_RXMGMT_FLAGS: flags for nl80211_send_mgmt(), u32. 1536 * @NL80211_ATTR_RXMGMT_FLAGS: flags for nl80211_send_mgmt(), u32.
1534 * As specified in the &enum nl80211_rxmgmt_flags. 1537 * As specified in the &enum nl80211_rxmgmt_flags.
@@ -1576,9 +1579,18 @@ enum nl80211_commands {
1576 * advertise values that cannot always be met. In such cases, an attempt 1579 * advertise values that cannot always be met. In such cases, an attempt
1577 * to add a new station entry with @NL80211_CMD_NEW_STATION may fail. 1580 * to add a new station entry with @NL80211_CMD_NEW_STATION may fail.
1578 * 1581 *
1582 * @NL80211_ATTR_CSA_C_OFFSETS_TX: An array of csa counter offsets (u16) which
1583 * should be updated when the frame is transmitted.
1584 * @NL80211_ATTR_MAX_CSA_COUNTERS: U8 attribute used to advertise the maximum
1585 * supported number of csa counters.
1586 *
1579 * @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32. 1587 * @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32.
1580 * As specified in the &enum nl80211_tdls_peer_capability. 1588 * As specified in the &enum nl80211_tdls_peer_capability.
1581 * 1589 *
1590 * @NL80211_ATTR_IFACE_SOCKET_OWNER: flag attribute, if set during interface
1591 * creation then the new interface will be owned by the netlink socket
1592 * that created it and will be destroyed when the socket is closed
1593 *
1582 * @NL80211_ATTR_MAX: highest attribute number currently defined 1594 * @NL80211_ATTR_MAX: highest attribute number currently defined
1583 * @__NL80211_ATTR_AFTER_LAST: internal use 1595 * @__NL80211_ATTR_AFTER_LAST: internal use
1584 */ 1596 */
@@ -1914,6 +1926,11 @@ enum nl80211_attrs {
1914 1926
1915 NL80211_ATTR_TDLS_PEER_CAPABILITY, 1927 NL80211_ATTR_TDLS_PEER_CAPABILITY,
1916 1928
1929 NL80211_ATTR_IFACE_SOCKET_OWNER,
1930
1931 NL80211_ATTR_CSA_C_OFFSETS_TX,
1932 NL80211_ATTR_MAX_CSA_COUNTERS,
1933
1917 /* add attributes here, update the policy in nl80211.c */ 1934 /* add attributes here, update the policy in nl80211.c */
1918 1935
1919 __NL80211_ATTR_AFTER_LAST, 1936 __NL80211_ATTR_AFTER_LAST,
@@ -2182,6 +2199,8 @@ enum nl80211_sta_bss_param {
2182 * Contains a nested array of signal strength attributes (u8, dBm) 2199 * Contains a nested array of signal strength attributes (u8, dBm)
2183 * @NL80211_STA_INFO_CHAIN_SIGNAL_AVG: per-chain signal strength average 2200 * @NL80211_STA_INFO_CHAIN_SIGNAL_AVG: per-chain signal strength average
2184 * Same format as NL80211_STA_INFO_CHAIN_SIGNAL. 2201 * Same format as NL80211_STA_INFO_CHAIN_SIGNAL.
2202 * @NL80211_STA_EXPECTED_THROUGHPUT: expected throughput considering also the
2203 * 802.11 header (u32, kbps)
2185 * @__NL80211_STA_INFO_AFTER_LAST: internal 2204 * @__NL80211_STA_INFO_AFTER_LAST: internal
2186 * @NL80211_STA_INFO_MAX: highest possible station info attribute 2205 * @NL80211_STA_INFO_MAX: highest possible station info attribute
2187 */ 2206 */
@@ -2213,6 +2232,7 @@ enum nl80211_sta_info {
2213 NL80211_STA_INFO_TX_BYTES64, 2232 NL80211_STA_INFO_TX_BYTES64,
2214 NL80211_STA_INFO_CHAIN_SIGNAL, 2233 NL80211_STA_INFO_CHAIN_SIGNAL,
2215 NL80211_STA_INFO_CHAIN_SIGNAL_AVG, 2234 NL80211_STA_INFO_CHAIN_SIGNAL_AVG,
2235 NL80211_STA_INFO_EXPECTED_THROUGHPUT,
2216 2236
2217 /* keep last */ 2237 /* keep last */
2218 __NL80211_STA_INFO_AFTER_LAST, 2238 __NL80211_STA_INFO_AFTER_LAST,
@@ -2336,9 +2356,34 @@ enum nl80211_band_attr {
2336 * using this channel as the primary or any of the secondary channels 2356 * using this channel as the primary or any of the secondary channels
2337 * isn't possible 2357 * isn't possible
2338 * @NL80211_FREQUENCY_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds. 2358 * @NL80211_FREQUENCY_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds.
2359 * @NL80211_FREQUENCY_ATTR_INDOOR_ONLY: Only indoor use is permitted on this
2360 * channel. A channel that has the INDOOR_ONLY attribute can only be
2361 * used when there is a clear assessment that the device is operating in
2362 * an indoor surroundings, i.e., it is connected to AC power (and not
2363 * through portable DC inverters) or is under the control of a master
2364 * that is acting as an AP and is connected to AC power.
2365 * @NL80211_FREQUENCY_ATTR_GO_CONCURRENT: GO operation is allowed on this
2366 * channel if it's connected concurrently to a BSS on the same channel on
2367 * the 2 GHz band or to a channel in the same UNII band (on the 5 GHz
2368 * band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO on a
2369 * channel that has the GO_CONCURRENT attribute set can be done when there
2370 * is a clear assessment that the device is operating under the guidance of
2371 * an authorized master, i.e., setting up a GO while the device is also
2372 * connected to an AP with DFS and radar detection on the UNII band (it is
2373 * up to user-space, i.e., wpa_supplicant to perform the required
2374 * verifications)
2375 * @NL80211_FREQUENCY_ATTR_NO_20MHZ: 20 MHz operation is not allowed
2376 * on this channel in current regulatory domain.
2377 * @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed
2378 * on this channel in current regulatory domain.
2339 * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number 2379 * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
2340 * currently defined 2380 * currently defined
2341 * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use 2381 * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
2382 *
2383 * See https://apps.fcc.gov/eas/comments/GetPublishedDocument.html?id=327&tn=528122
2384 * for more information on the FCC description of the relaxations allowed
2385 * by NL80211_FREQUENCY_ATTR_INDOOR_ONLY and
2386 * NL80211_FREQUENCY_ATTR_GO_CONCURRENT.
2342 */ 2387 */
2343enum nl80211_frequency_attr { 2388enum nl80211_frequency_attr {
2344 __NL80211_FREQUENCY_ATTR_INVALID, 2389 __NL80211_FREQUENCY_ATTR_INVALID,
@@ -2355,6 +2400,10 @@ enum nl80211_frequency_attr {
2355 NL80211_FREQUENCY_ATTR_NO_80MHZ, 2400 NL80211_FREQUENCY_ATTR_NO_80MHZ,
2356 NL80211_FREQUENCY_ATTR_NO_160MHZ, 2401 NL80211_FREQUENCY_ATTR_NO_160MHZ,
2357 NL80211_FREQUENCY_ATTR_DFS_CAC_TIME, 2402 NL80211_FREQUENCY_ATTR_DFS_CAC_TIME,
2403 NL80211_FREQUENCY_ATTR_INDOOR_ONLY,
2404 NL80211_FREQUENCY_ATTR_GO_CONCURRENT,
2405 NL80211_FREQUENCY_ATTR_NO_20MHZ,
2406 NL80211_FREQUENCY_ATTR_NO_10MHZ,
2358 2407
2359 /* keep last */ 2408 /* keep last */
2360 __NL80211_FREQUENCY_ATTR_AFTER_LAST, 2409 __NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -2573,10 +2622,13 @@ enum nl80211_dfs_regions {
2573 * present has been registered with the wireless core that 2622 * present has been registered with the wireless core that
2574 * has listed NL80211_FEATURE_CELL_BASE_REG_HINTS as a 2623 * has listed NL80211_FEATURE_CELL_BASE_REG_HINTS as a
2575 * supported feature. 2624 * supported feature.
2625 * @NL80211_USER_REG_HINT_INDOOR: a user sent an hint indicating that the
2626 * platform is operating in an indoor environment.
2576 */ 2627 */
2577enum nl80211_user_reg_hint_type { 2628enum nl80211_user_reg_hint_type {
2578 NL80211_USER_REG_HINT_USER = 0, 2629 NL80211_USER_REG_HINT_USER = 0,
2579 NL80211_USER_REG_HINT_CELL_BASE = 1, 2630 NL80211_USER_REG_HINT_CELL_BASE = 1,
2631 NL80211_USER_REG_HINT_INDOOR = 2,
2580}; 2632};
2581 2633
2582/** 2634/**
@@ -3650,6 +3702,8 @@ enum nl80211_iface_limit_attrs {
3650 * different channels may be used within this group. 3702 * different channels may be used within this group.
3651 * @NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS: u32 attribute containing the bitmap 3703 * @NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS: u32 attribute containing the bitmap
3652 * of supported channel widths for radar detection. 3704 * of supported channel widths for radar detection.
3705 * @NL80211_IFACE_COMB_RADAR_DETECT_REGIONS: u32 attribute containing the bitmap
3706 * of supported regulatory regions for radar detection.
3653 * @NUM_NL80211_IFACE_COMB: number of attributes 3707 * @NUM_NL80211_IFACE_COMB: number of attributes
3654 * @MAX_NL80211_IFACE_COMB: highest attribute number 3708 * @MAX_NL80211_IFACE_COMB: highest attribute number
3655 * 3709 *
@@ -3683,6 +3737,7 @@ enum nl80211_if_combination_attrs {
3683 NL80211_IFACE_COMB_STA_AP_BI_MATCH, 3737 NL80211_IFACE_COMB_STA_AP_BI_MATCH,
3684 NL80211_IFACE_COMB_NUM_CHANNELS, 3738 NL80211_IFACE_COMB_NUM_CHANNELS,
3685 NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS, 3739 NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
3740 NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
3686 3741
3687 /* keep last */ 3742 /* keep last */
3688 NUM_NL80211_IFACE_COMB, 3743 NUM_NL80211_IFACE_COMB,
@@ -3893,6 +3948,9 @@ enum nl80211_ap_sme_features {
3893 * interface. An active monitor interface behaves like a normal monitor 3948 * interface. An active monitor interface behaves like a normal monitor
3894 * interface, but gets added to the driver. It ensures that incoming 3949 * interface, but gets added to the driver. It ensures that incoming
3895 * unicast packets directed at the configured interface address get ACKed. 3950 * unicast packets directed at the configured interface address get ACKed.
3951 * @NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE: This driver supports dynamic
3952 * channel bandwidth change (e.g., HT 20 <-> 40 MHz channel) during the
3953 * lifetime of a BSS.
3896 */ 3954 */
3897enum nl80211_feature_flags { 3955enum nl80211_feature_flags {
3898 NL80211_FEATURE_SK_TX_STATUS = 1 << 0, 3956 NL80211_FEATURE_SK_TX_STATUS = 1 << 0,
@@ -3913,6 +3971,7 @@ enum nl80211_feature_flags {
3913 NL80211_FEATURE_FULL_AP_CLIENT_STATE = 1 << 15, 3971 NL80211_FEATURE_FULL_AP_CLIENT_STATE = 1 << 15,
3914 NL80211_FEATURE_USERSPACE_MPM = 1 << 16, 3972 NL80211_FEATURE_USERSPACE_MPM = 1 << 16,
3915 NL80211_FEATURE_ACTIVE_MONITOR = 1 << 17, 3973 NL80211_FEATURE_ACTIVE_MONITOR = 1 << 17,
3974 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE = 1 << 18,
3916}; 3975};
3917 3976
3918/** 3977/**
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 970553cbbc8e..0b979ee4bfc0 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -395,7 +395,9 @@ struct ovs_key_nd {
395 * @OVS_FLOW_ATTR_ACTIONS: Nested %OVS_ACTION_ATTR_* attributes specifying 395 * @OVS_FLOW_ATTR_ACTIONS: Nested %OVS_ACTION_ATTR_* attributes specifying
396 * the actions to take for packets that match the key. Always present in 396 * the actions to take for packets that match the key. Always present in
397 * notifications. Required for %OVS_FLOW_CMD_NEW requests, optional for 397 * notifications. Required for %OVS_FLOW_CMD_NEW requests, optional for
398 * %OVS_FLOW_CMD_SET requests. 398 * %OVS_FLOW_CMD_SET requests. An %OVS_FLOW_CMD_SET without
399 * %OVS_FLOW_ATTR_ACTIONS will not modify the actions. To clear the actions,
400 * an %OVS_FLOW_ATTR_ACTIONS without any nested attributes must be given.
399 * @OVS_FLOW_ATTR_STATS: &struct ovs_flow_stats giving statistics for this 401 * @OVS_FLOW_ATTR_STATS: &struct ovs_flow_stats giving statistics for this
400 * flow. Present in notifications if the stats would be nonzero. Ignored in 402 * flow. Present in notifications if the stats would be nonzero. Ignored in
401 * requests. 403 * requests.
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index 852373d27dbb..6f71b9b41595 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -38,6 +38,7 @@
38#define _LINUX_TIPC_H_ 38#define _LINUX_TIPC_H_
39 39
40#include <linux/types.h> 40#include <linux/types.h>
41#include <linux/sockios.h>
41 42
42/* 43/*
43 * TIPC addressing primitives 44 * TIPC addressing primitives
@@ -87,6 +88,7 @@ static inline unsigned int tipc_node(__u32 addr)
87 88
88#define TIPC_CFG_SRV 0 /* configuration service name type */ 89#define TIPC_CFG_SRV 0 /* configuration service name type */
89#define TIPC_TOP_SRV 1 /* topology service name type */ 90#define TIPC_TOP_SRV 1 /* topology service name type */
91#define TIPC_LINK_STATE 2 /* link state name type */
90#define TIPC_RESERVED_TYPES 64 /* lowest user-publishable name type */ 92#define TIPC_RESERVED_TYPES 64 /* lowest user-publishable name type */
91 93
92/* 94/*
@@ -206,4 +208,25 @@ struct sockaddr_tipc {
206#define TIPC_NODE_RECVQ_DEPTH 131 /* Default: none (read only) */ 208#define TIPC_NODE_RECVQ_DEPTH 131 /* Default: none (read only) */
207#define TIPC_SOCK_RECVQ_DEPTH 132 /* Default: none (read only) */ 209#define TIPC_SOCK_RECVQ_DEPTH 132 /* Default: none (read only) */
208 210
211/*
212 * Maximum sizes of TIPC bearer-related names (including terminating NULL)
213 * The string formatting for each name element is:
214 * media: media
215 * interface: media:interface name
216 * link: Z.C.N:interface-Z.C.N:interface
217 *
218 */
219
220#define TIPC_MAX_MEDIA_NAME 16
221#define TIPC_MAX_IF_NAME 16
222#define TIPC_MAX_BEARER_NAME 32
223#define TIPC_MAX_LINK_NAME 60
224
225#define SIOCGETLINKNAME SIOCPROTOPRIVATE
226
227struct tipc_sioc_ln_req {
228 __u32 peer;
229 __u32 bearer_id;
230 char linkname[TIPC_MAX_LINK_NAME];
231};
209#endif 232#endif
diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h
index 6b0bff09b3a7..41a76acbb305 100644
--- a/include/uapi/linux/tipc_config.h
+++ b/include/uapi/linux/tipc_config.h
@@ -39,6 +39,7 @@
39 39
40#include <linux/types.h> 40#include <linux/types.h>
41#include <linux/string.h> 41#include <linux/string.h>
42#include <linux/tipc.h>
42#include <asm/byteorder.h> 43#include <asm/byteorder.h>
43 44
44#ifndef __KERNEL__ 45#ifndef __KERNEL__
@@ -155,15 +156,6 @@
155#define TIPC_TLV_PORT_REF 26 /* 32-bit port reference */ 156#define TIPC_TLV_PORT_REF 26 /* 32-bit port reference */
156 157
157/* 158/*
158 * Maximum sizes of TIPC bearer-related names (including terminating NUL)
159 */
160
161#define TIPC_MAX_MEDIA_NAME 16 /* format = media */
162#define TIPC_MAX_IF_NAME 16 /* format = interface */
163#define TIPC_MAX_BEARER_NAME 32 /* format = media:interface */
164#define TIPC_MAX_LINK_NAME 60 /* format = Z.C.N:interface-Z.C.N:interface */
165
166/*
167 * Link priority limits (min, default, max, media default) 159 * Link priority limits (min, default, max, media default)
168 */ 160 */
169 161
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index e2bcfd75a30d..16574ea18f0c 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -29,6 +29,8 @@ struct udphdr {
29/* UDP socket options */ 29/* UDP socket options */
30#define UDP_CORK 1 /* Never send partially complete segments */ 30#define UDP_CORK 1 /* Never send partially complete segments */
31#define UDP_ENCAP 100 /* Set the socket to accept encapsulated packets */ 31#define UDP_ENCAP 100 /* Set the socket to accept encapsulated packets */
32#define UDP_NO_CHECK6_TX 101 /* Disable sending checksum for UDP6X */
33#define UDP_NO_CHECK6_RX 102 /* Disable accpeting checksum for UDP6 */
32 34
33/* UDP encapsulation types */ 35/* UDP encapsulation types */
34#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */ 36#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index c50061db6098..70054cc0708d 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -51,6 +51,59 @@
51 */ 51 */
52 52
53/* 53/*
54 * Multiple transmit and receive queues:
55 * If supported, the backend will write the key "multi-queue-max-queues" to
56 * the directory for that vif, and set its value to the maximum supported
57 * number of queues.
58 * Frontends that are aware of this feature and wish to use it can write the
59 * key "multi-queue-num-queues", set to the number they wish to use, which
60 * must be greater than zero, and no more than the value reported by the backend
61 * in "multi-queue-max-queues".
62 *
63 * Queues replicate the shared rings and event channels.
64 * "feature-split-event-channels" may optionally be used when using
65 * multiple queues, but is not mandatory.
66 *
67 * Each queue consists of one shared ring pair, i.e. there must be the same
68 * number of tx and rx rings.
69 *
70 * For frontends requesting just one queue, the usual event-channel and
71 * ring-ref keys are written as before, simplifying the backend processing
72 * to avoid distinguishing between a frontend that doesn't understand the
73 * multi-queue feature, and one that does, but requested only one queue.
74 *
75 * Frontends requesting two or more queues must not write the toplevel
76 * event-channel (or event-channel-{tx,rx}) and {tx,rx}-ring-ref keys,
77 * instead writing those keys under sub-keys having the name "queue-N" where
78 * N is the integer ID of the queue for which those keys belong. Queues
79 * are indexed from zero. For example, a frontend with two queues and split
80 * event channels must write the following set of queue-related keys:
81 *
82 * /local/domain/1/device/vif/0/multi-queue-num-queues = "2"
83 * /local/domain/1/device/vif/0/queue-0 = ""
84 * /local/domain/1/device/vif/0/queue-0/tx-ring-ref = "<ring-ref-tx0>"
85 * /local/domain/1/device/vif/0/queue-0/rx-ring-ref = "<ring-ref-rx0>"
86 * /local/domain/1/device/vif/0/queue-0/event-channel-tx = "<evtchn-tx0>"
87 * /local/domain/1/device/vif/0/queue-0/event-channel-rx = "<evtchn-rx0>"
88 * /local/domain/1/device/vif/0/queue-1 = ""
89 * /local/domain/1/device/vif/0/queue-1/tx-ring-ref = "<ring-ref-tx1>"
90 * /local/domain/1/device/vif/0/queue-1/rx-ring-ref = "<ring-ref-rx1"
91 * /local/domain/1/device/vif/0/queue-1/event-channel-tx = "<evtchn-tx1>"
92 * /local/domain/1/device/vif/0/queue-1/event-channel-rx = "<evtchn-rx1>"
93 *
94 * If there is any inconsistency in the XenStore data, the backend may
95 * choose not to connect any queues, instead treating the request as an
96 * error. This includes scenarios where more (or fewer) queues were
97 * requested than the frontend provided details for.
98 *
99 * Mapping of packets to queues is considered to be a function of the
100 * transmitting system (backend or frontend) and is not negotiated
101 * between the two. Guests are free to transmit packets on any queue
102 * they choose, provided it has been set up correctly. Guests must be
103 * prepared to receive packets on any queue they have requested be set up.
104 */
105
106/*
54 * "feature-no-csum-offload" should be used to turn IPv4 TCP/UDP checksum 107 * "feature-no-csum-offload" should be used to turn IPv4 TCP/UDP checksum
55 * offload off or on. If it is missing then the feature is assumed to be on. 108 * offload off or on. If it is missing then the feature is assumed to be on.
56 * "feature-ipv6-csum-offload" should be used to turn IPv6 TCP/UDP checksum 109 * "feature-ipv6-csum-offload" should be used to turn IPv6 TCP/UDP checksum
diff --git a/kernel/audit.c b/kernel/audit.c
index f30106459a32..3ef2e0e797e8 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -424,6 +424,38 @@ static void kauditd_send_skb(struct sk_buff *skb)
424} 424}
425 425
426/* 426/*
427 * kauditd_send_multicast_skb - send the skb to multicast userspace listeners
428 *
429 * This function doesn't consume an skb as might be expected since it has to
430 * copy it anyways.
431 */
432static void kauditd_send_multicast_skb(struct sk_buff *skb)
433{
434 struct sk_buff *copy;
435 struct audit_net *aunet = net_generic(&init_net, audit_net_id);
436 struct sock *sock = aunet->nlsk;
437
438 if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG))
439 return;
440
441 /*
442 * The seemingly wasteful skb_copy() rather than bumping the refcount
443 * using skb_get() is necessary because non-standard mods are made to
444 * the skb by the original kaudit unicast socket send routine. The
445 * existing auditd daemon assumes this breakage. Fixing this would
446 * require co-ordinating a change in the established protocol between
447 * the kaudit kernel subsystem and the auditd userspace code. There is
448 * no reason for new multicast clients to continue with this
449 * non-compliance.
450 */
451 copy = skb_copy(skb, GFP_KERNEL);
452 if (!copy)
453 return;
454
455 nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL);
456}
457
458/*
427 * flush_hold_queue - empty the hold queue if auditd appears 459 * flush_hold_queue - empty the hold queue if auditd appears
428 * 460 *
429 * If auditd just started, drain the queue of messages already 461 * If auditd just started, drain the queue of messages already
@@ -1076,10 +1108,22 @@ static void audit_receive(struct sk_buff *skb)
1076 mutex_unlock(&audit_cmd_mutex); 1108 mutex_unlock(&audit_cmd_mutex);
1077} 1109}
1078 1110
1111/* Run custom bind function on netlink socket group connect or bind requests. */
1112static int audit_bind(int group)
1113{
1114 if (!capable(CAP_AUDIT_READ))
1115 return -EPERM;
1116
1117 return 0;
1118}
1119
1079static int __net_init audit_net_init(struct net *net) 1120static int __net_init audit_net_init(struct net *net)
1080{ 1121{
1081 struct netlink_kernel_cfg cfg = { 1122 struct netlink_kernel_cfg cfg = {
1082 .input = audit_receive, 1123 .input = audit_receive,
1124 .bind = audit_bind,
1125 .flags = NL_CFG_F_NONROOT_RECV,
1126 .groups = AUDIT_NLGRP_MAX,
1083 }; 1127 };
1084 1128
1085 struct audit_net *aunet = net_generic(net, audit_net_id); 1129 struct audit_net *aunet = net_generic(net, audit_net_id);
@@ -1901,10 +1945,10 @@ out:
1901 * audit_log_end - end one audit record 1945 * audit_log_end - end one audit record
1902 * @ab: the audit_buffer 1946 * @ab: the audit_buffer
1903 * 1947 *
1904 * The netlink_* functions cannot be called inside an irq context, so 1948 * netlink_unicast() cannot be called inside an irq context because it blocks
1905 * the audit buffer is placed on a queue and a tasklet is scheduled to 1949 * (last arg, flags, is not set to MSG_DONTWAIT), so the audit buffer is placed
1906 * remove them from the queue outside the irq context. May be called in 1950 * on a queue and a tasklet is scheduled to remove them from the queue outside
1907 * any context. 1951 * the irq context. May be called in any context.
1908 */ 1952 */
1909void audit_log_end(struct audit_buffer *ab) 1953void audit_log_end(struct audit_buffer *ab)
1910{ 1954{
@@ -1914,6 +1958,18 @@ void audit_log_end(struct audit_buffer *ab)
1914 audit_log_lost("rate limit exceeded"); 1958 audit_log_lost("rate limit exceeded");
1915 } else { 1959 } else {
1916 struct nlmsghdr *nlh = nlmsg_hdr(ab->skb); 1960 struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
1961
1962 kauditd_send_multicast_skb(ab->skb);
1963
1964 /*
1965 * The original kaudit unicast socket sends up messages with
1966 * nlmsg_len set to the payload length rather than the entire
1967 * message length. This breaks the standard set by netlink.
1968 * The existing auditd daemon assumes this breakage. Fixing
1969 * this would require co-ordinating a change in the established
1970 * protocol between the kaudit kernel subsystem and the auditd
1971 * userspace code.
1972 */
1917 nlh->nlmsg_len = ab->skb->len - NLMSG_HDRLEN; 1973 nlh->nlmsg_len = ab->skb->len - NLMSG_HDRLEN;
1918 1974
1919 if (audit_pid) { 1975 if (audit_pid) {
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index f6d76bebe69f..301bbc24739c 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -54,8 +54,7 @@
54struct seccomp_filter { 54struct seccomp_filter {
55 atomic_t usage; 55 atomic_t usage;
56 struct seccomp_filter *prev; 56 struct seccomp_filter *prev;
57 unsigned short len; /* Instruction count */ 57 struct sk_filter *prog;
58 struct sock_filter_int insnsi[];
59}; 58};
60 59
61/* Limit any path through the tree to 256KB worth of instructions. */ 60/* Limit any path through the tree to 256KB worth of instructions. */
@@ -104,60 +103,59 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
104 u32 k = ftest->k; 103 u32 k = ftest->k;
105 104
106 switch (code) { 105 switch (code) {
107 case BPF_S_LD_W_ABS: 106 case BPF_LD | BPF_W | BPF_ABS:
108 ftest->code = BPF_LDX | BPF_W | BPF_ABS; 107 ftest->code = BPF_LDX | BPF_W | BPF_ABS;
109 /* 32-bit aligned and not out of bounds. */ 108 /* 32-bit aligned and not out of bounds. */
110 if (k >= sizeof(struct seccomp_data) || k & 3) 109 if (k >= sizeof(struct seccomp_data) || k & 3)
111 return -EINVAL; 110 return -EINVAL;
112 continue; 111 continue;
113 case BPF_S_LD_W_LEN: 112 case BPF_LD | BPF_W | BPF_LEN:
114 ftest->code = BPF_LD | BPF_IMM; 113 ftest->code = BPF_LD | BPF_IMM;
115 ftest->k = sizeof(struct seccomp_data); 114 ftest->k = sizeof(struct seccomp_data);
116 continue; 115 continue;
117 case BPF_S_LDX_W_LEN: 116 case BPF_LDX | BPF_W | BPF_LEN:
118 ftest->code = BPF_LDX | BPF_IMM; 117 ftest->code = BPF_LDX | BPF_IMM;
119 ftest->k = sizeof(struct seccomp_data); 118 ftest->k = sizeof(struct seccomp_data);
120 continue; 119 continue;
121 /* Explicitly include allowed calls. */ 120 /* Explicitly include allowed calls. */
122 case BPF_S_RET_K: 121 case BPF_RET | BPF_K:
123 case BPF_S_RET_A: 122 case BPF_RET | BPF_A:
124 case BPF_S_ALU_ADD_K: 123 case BPF_ALU | BPF_ADD | BPF_K:
125 case BPF_S_ALU_ADD_X: 124 case BPF_ALU | BPF_ADD | BPF_X:
126 case BPF_S_ALU_SUB_K: 125 case BPF_ALU | BPF_SUB | BPF_K:
127 case BPF_S_ALU_SUB_X: 126 case BPF_ALU | BPF_SUB | BPF_X:
128 case BPF_S_ALU_MUL_K: 127 case BPF_ALU | BPF_MUL | BPF_K:
129 case BPF_S_ALU_MUL_X: 128 case BPF_ALU | BPF_MUL | BPF_X:
130 case BPF_S_ALU_DIV_X: 129 case BPF_ALU | BPF_DIV | BPF_K:
131 case BPF_S_ALU_AND_K: 130 case BPF_ALU | BPF_DIV | BPF_X:
132 case BPF_S_ALU_AND_X: 131 case BPF_ALU | BPF_AND | BPF_K:
133 case BPF_S_ALU_OR_K: 132 case BPF_ALU | BPF_AND | BPF_X:
134 case BPF_S_ALU_OR_X: 133 case BPF_ALU | BPF_OR | BPF_K:
135 case BPF_S_ALU_XOR_K: 134 case BPF_ALU | BPF_OR | BPF_X:
136 case BPF_S_ALU_XOR_X: 135 case BPF_ALU | BPF_XOR | BPF_K:
137 case BPF_S_ALU_LSH_K: 136 case BPF_ALU | BPF_XOR | BPF_X:
138 case BPF_S_ALU_LSH_X: 137 case BPF_ALU | BPF_LSH | BPF_K:
139 case BPF_S_ALU_RSH_K: 138 case BPF_ALU | BPF_LSH | BPF_X:
140 case BPF_S_ALU_RSH_X: 139 case BPF_ALU | BPF_RSH | BPF_K:
141 case BPF_S_ALU_NEG: 140 case BPF_ALU | BPF_RSH | BPF_X:
142 case BPF_S_LD_IMM: 141 case BPF_ALU | BPF_NEG:
143 case BPF_S_LDX_IMM: 142 case BPF_LD | BPF_IMM:
144 case BPF_S_MISC_TAX: 143 case BPF_LDX | BPF_IMM:
145 case BPF_S_MISC_TXA: 144 case BPF_MISC | BPF_TAX:
146 case BPF_S_ALU_DIV_K: 145 case BPF_MISC | BPF_TXA:
147 case BPF_S_LD_MEM: 146 case BPF_LD | BPF_MEM:
148 case BPF_S_LDX_MEM: 147 case BPF_LDX | BPF_MEM:
149 case BPF_S_ST: 148 case BPF_ST:
150 case BPF_S_STX: 149 case BPF_STX:
151 case BPF_S_JMP_JA: 150 case BPF_JMP | BPF_JA:
152 case BPF_S_JMP_JEQ_K: 151 case BPF_JMP | BPF_JEQ | BPF_K:
153 case BPF_S_JMP_JEQ_X: 152 case BPF_JMP | BPF_JEQ | BPF_X:
154 case BPF_S_JMP_JGE_K: 153 case BPF_JMP | BPF_JGE | BPF_K:
155 case BPF_S_JMP_JGE_X: 154 case BPF_JMP | BPF_JGE | BPF_X:
156 case BPF_S_JMP_JGT_K: 155 case BPF_JMP | BPF_JGT | BPF_K:
157 case BPF_S_JMP_JGT_X: 156 case BPF_JMP | BPF_JGT | BPF_X:
158 case BPF_S_JMP_JSET_K: 157 case BPF_JMP | BPF_JSET | BPF_K:
159 case BPF_S_JMP_JSET_X: 158 case BPF_JMP | BPF_JSET | BPF_X:
160 sk_decode_filter(ftest, ftest);
161 continue; 159 continue;
162 default: 160 default:
163 return -EINVAL; 161 return -EINVAL;
@@ -189,7 +187,8 @@ static u32 seccomp_run_filters(int syscall)
189 * value always takes priority (ignoring the DATA). 187 * value always takes priority (ignoring the DATA).
190 */ 188 */
191 for (f = current->seccomp.filter; f; f = f->prev) { 189 for (f = current->seccomp.filter; f; f = f->prev) {
192 u32 cur_ret = sk_run_filter_int_seccomp(&sd, f->insnsi); 190 u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd);
191
193 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) 192 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
194 ret = cur_ret; 193 ret = cur_ret;
195 } 194 }
@@ -215,7 +214,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
215 return -EINVAL; 214 return -EINVAL;
216 215
217 for (filter = current->seccomp.filter; filter; filter = filter->prev) 216 for (filter = current->seccomp.filter; filter; filter = filter->prev)
218 total_insns += filter->len + 4; /* include a 4 instr penalty */ 217 total_insns += filter->prog->len + 4; /* include a 4 instr penalty */
219 if (total_insns > MAX_INSNS_PER_PATH) 218 if (total_insns > MAX_INSNS_PER_PATH)
220 return -ENOMEM; 219 return -ENOMEM;
221 220
@@ -256,19 +255,25 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
256 255
257 /* Allocate a new seccomp_filter */ 256 /* Allocate a new seccomp_filter */
258 ret = -ENOMEM; 257 ret = -ENOMEM;
259 filter = kzalloc(sizeof(struct seccomp_filter) + 258 filter = kzalloc(sizeof(struct seccomp_filter),
260 sizeof(struct sock_filter_int) * new_len,
261 GFP_KERNEL|__GFP_NOWARN); 259 GFP_KERNEL|__GFP_NOWARN);
262 if (!filter) 260 if (!filter)
263 goto free_prog; 261 goto free_prog;
264 262
265 ret = sk_convert_filter(fp, fprog->len, filter->insnsi, &new_len); 263 filter->prog = kzalloc(sk_filter_size(new_len),
266 if (ret) 264 GFP_KERNEL|__GFP_NOWARN);
265 if (!filter->prog)
267 goto free_filter; 266 goto free_filter;
267
268 ret = sk_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
269 if (ret)
270 goto free_filter_prog;
268 kfree(fp); 271 kfree(fp);
269 272
270 atomic_set(&filter->usage, 1); 273 atomic_set(&filter->usage, 1);
271 filter->len = new_len; 274 filter->prog->len = new_len;
275
276 sk_filter_select_runtime(filter->prog);
272 277
273 /* 278 /*
274 * If there is an existing filter, make it the prev and don't drop its 279 * If there is an existing filter, make it the prev and don't drop its
@@ -278,6 +283,8 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
278 current->seccomp.filter = filter; 283 current->seccomp.filter = filter;
279 return 0; 284 return 0;
280 285
286free_filter_prog:
287 kfree(filter->prog);
281free_filter: 288free_filter:
282 kfree(filter); 289 kfree(filter);
283free_prog: 290free_prog:
@@ -330,6 +337,7 @@ void put_seccomp_filter(struct task_struct *tsk)
330 while (orig && atomic_dec_and_test(&orig->usage)) { 337 while (orig && atomic_dec_and_test(&orig->usage)) {
331 struct seccomp_filter *freeme = orig; 338 struct seccomp_filter *freeme = orig;
332 orig = orig->prev; 339 orig = orig->prev;
340 sk_filter_free(freeme->prog);
333 kfree(freeme); 341 kfree(freeme);
334 } 342 }
335} 343}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index db19e3e2aa4b..ba9ed453c4ed 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2568,11 +2568,11 @@ int proc_do_large_bitmap(struct ctl_table *table, int write,
2568 bool first = 1; 2568 bool first = 1;
2569 size_t left = *lenp; 2569 size_t left = *lenp;
2570 unsigned long bitmap_len = table->maxlen; 2570 unsigned long bitmap_len = table->maxlen;
2571 unsigned long *bitmap = (unsigned long *) table->data; 2571 unsigned long *bitmap = *(unsigned long **) table->data;
2572 unsigned long *tmp_bitmap = NULL; 2572 unsigned long *tmp_bitmap = NULL;
2573 char tr_a[] = { '-', ',', '\n' }, tr_b[] = { ',', '\n', 0 }, c; 2573 char tr_a[] = { '-', ',', '\n' }, tr_b[] = { ',', '\n', 0 }, c;
2574 2574
2575 if (!bitmap_len || !left || (*ppos && !write)) { 2575 if (!bitmap || !bitmap_len || !left || (*ppos && !write)) {
2576 *lenp = 0; 2576 *lenp = 0;
2577 return 0; 2577 return 0;
2578 } 2578 }
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e34d11d70bbf..7cfcc1b8e101 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1636,6 +1636,19 @@ config TEST_USER_COPY
1636 1636
1637 If unsure, say N. 1637 If unsure, say N.
1638 1638
1639config TEST_BPF
1640 tristate "Test BPF filter functionality"
1641 default n
1642 depends on m && NET
1643 help
1644 This builds the "test_bpf" module that runs various test vectors
1645 against the BPF interpreter or BPF JIT compiler depending on the
1646 current setting. This is in particular useful for BPF JIT compiler
1647 development, but also to run regression tests against changes in
1648 the interpreter code.
1649
1650 If unsure, say N.
1651
1639source "samples/Kconfig" 1652source "samples/Kconfig"
1640 1653
1641source "lib/Kconfig.kgdb" 1654source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index 4a4078987a4c..ba967a19edba 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -33,6 +33,7 @@ obj-y += kstrtox.o
33obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 33obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
34obj-$(CONFIG_TEST_MODULE) += test_module.o 34obj-$(CONFIG_TEST_MODULE) += test_module.o
35obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o 35obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
36obj-$(CONFIG_TEST_BPF) += test_bpf.o
36 37
37ifeq ($(CONFIG_DEBUG_KOBJECT),y) 38ifeq ($(CONFIG_DEBUG_KOBJECT),y)
38CFLAGS_kobject.o += -DDEBUG 39CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/cpumask.c b/lib/cpumask.c
index b810b753c607..c101230658eb 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -164,3 +164,66 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
164 memblock_free_early(__pa(mask), cpumask_size()); 164 memblock_free_early(__pa(mask), cpumask_size());
165} 165}
166#endif 166#endif
167
168/**
169 * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first
170 *
171 * @i: index number
172 * @numa_node: local numa_node
173 * @dstp: cpumask with the relevant cpu bit set according to the policy
174 *
175 * This function sets the cpumask according to a numa aware policy.
176 * cpumask could be used as an affinity hint for the IRQ related to a
177 * queue. When the policy is to spread queues across cores - local cores
178 * first.
179 *
180 * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set
181 * the cpu bit and need to re-call the function.
182 */
183int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
184{
185 cpumask_var_t mask;
186 int cpu;
187 int ret = 0;
188
189 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
190 return -ENOMEM;
191
192 i %= num_online_cpus();
193
194 if (!cpumask_of_node(numa_node)) {
195 /* Use all online cpu's for non numa aware system */
196 cpumask_copy(mask, cpu_online_mask);
197 } else {
198 int n;
199
200 cpumask_and(mask,
201 cpumask_of_node(numa_node), cpu_online_mask);
202
203 n = cpumask_weight(mask);
204 if (i >= n) {
205 i -= n;
206
207 /* If index > number of local cpu's, mask out local
208 * cpu's
209 */
210 cpumask_andnot(mask, cpu_online_mask, mask);
211 }
212 }
213
214 for_each_cpu(cpu, mask) {
215 if (--i < 0)
216 goto out;
217 }
218
219 ret = -EAGAIN;
220
221out:
222 free_cpumask_var(mask);
223
224 if (!ret)
225 cpumask_set_cpu(cpu, dstp);
226
227 return ret;
228}
229EXPORT_SYMBOL(cpumask_set_cpu_local_first);
diff --git a/lib/crc7.c b/lib/crc7.c
index f1c3a144cec1..bf6255e23919 100644
--- a/lib/crc7.c
+++ b/lib/crc7.c
@@ -10,42 +10,47 @@
10#include <linux/crc7.h> 10#include <linux/crc7.h>
11 11
12 12
13/* Table for CRC-7 (polynomial x^7 + x^3 + 1) */ 13/*
14const u8 crc7_syndrome_table[256] = { 14 * Table for CRC-7 (polynomial x^7 + x^3 + 1).
15 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, 15 * This is a big-endian CRC (msbit is highest power of x),
16 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77, 16 * aligned so the msbit of the byte is the x^6 coefficient
17 0x19, 0x10, 0x0b, 0x02, 0x3d, 0x34, 0x2f, 0x26, 17 * and the lsbit is not used.
18 0x51, 0x58, 0x43, 0x4a, 0x75, 0x7c, 0x67, 0x6e, 18 */
19 0x32, 0x3b, 0x20, 0x29, 0x16, 0x1f, 0x04, 0x0d, 19const u8 crc7_be_syndrome_table[256] = {
20 0x7a, 0x73, 0x68, 0x61, 0x5e, 0x57, 0x4c, 0x45, 20 0x00, 0x12, 0x24, 0x36, 0x48, 0x5a, 0x6c, 0x7e,
21 0x2b, 0x22, 0x39, 0x30, 0x0f, 0x06, 0x1d, 0x14, 21 0x90, 0x82, 0xb4, 0xa6, 0xd8, 0xca, 0xfc, 0xee,
22 0x63, 0x6a, 0x71, 0x78, 0x47, 0x4e, 0x55, 0x5c, 22 0x32, 0x20, 0x16, 0x04, 0x7a, 0x68, 0x5e, 0x4c,
23 0x64, 0x6d, 0x76, 0x7f, 0x40, 0x49, 0x52, 0x5b, 23 0xa2, 0xb0, 0x86, 0x94, 0xea, 0xf8, 0xce, 0xdc,
24 0x2c, 0x25, 0x3e, 0x37, 0x08, 0x01, 0x1a, 0x13, 24 0x64, 0x76, 0x40, 0x52, 0x2c, 0x3e, 0x08, 0x1a,
25 0x7d, 0x74, 0x6f, 0x66, 0x59, 0x50, 0x4b, 0x42, 25 0xf4, 0xe6, 0xd0, 0xc2, 0xbc, 0xae, 0x98, 0x8a,
26 0x35, 0x3c, 0x27, 0x2e, 0x11, 0x18, 0x03, 0x0a, 26 0x56, 0x44, 0x72, 0x60, 0x1e, 0x0c, 0x3a, 0x28,
27 0x56, 0x5f, 0x44, 0x4d, 0x72, 0x7b, 0x60, 0x69, 27 0xc6, 0xd4, 0xe2, 0xf0, 0x8e, 0x9c, 0xaa, 0xb8,
28 0x1e, 0x17, 0x0c, 0x05, 0x3a, 0x33, 0x28, 0x21, 28 0xc8, 0xda, 0xec, 0xfe, 0x80, 0x92, 0xa4, 0xb6,
29 0x4f, 0x46, 0x5d, 0x54, 0x6b, 0x62, 0x79, 0x70, 29 0x58, 0x4a, 0x7c, 0x6e, 0x10, 0x02, 0x34, 0x26,
30 0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31, 0x38, 30 0xfa, 0xe8, 0xde, 0xcc, 0xb2, 0xa0, 0x96, 0x84,
31 0x41, 0x48, 0x53, 0x5a, 0x65, 0x6c, 0x77, 0x7e, 31 0x6a, 0x78, 0x4e, 0x5c, 0x22, 0x30, 0x06, 0x14,
32 0x09, 0x00, 0x1b, 0x12, 0x2d, 0x24, 0x3f, 0x36, 32 0xac, 0xbe, 0x88, 0x9a, 0xe4, 0xf6, 0xc0, 0xd2,
33 0x58, 0x51, 0x4a, 0x43, 0x7c, 0x75, 0x6e, 0x67, 33 0x3c, 0x2e, 0x18, 0x0a, 0x74, 0x66, 0x50, 0x42,
34 0x10, 0x19, 0x02, 0x0b, 0x34, 0x3d, 0x26, 0x2f, 34 0x9e, 0x8c, 0xba, 0xa8, 0xd6, 0xc4, 0xf2, 0xe0,
35 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c, 35 0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62, 0x70,
36 0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04, 36 0x82, 0x90, 0xa6, 0xb4, 0xca, 0xd8, 0xee, 0xfc,
37 0x6a, 0x63, 0x78, 0x71, 0x4e, 0x47, 0x5c, 0x55, 37 0x12, 0x00, 0x36, 0x24, 0x5a, 0x48, 0x7e, 0x6c,
38 0x22, 0x2b, 0x30, 0x39, 0x06, 0x0f, 0x14, 0x1d, 38 0xb0, 0xa2, 0x94, 0x86, 0xf8, 0xea, 0xdc, 0xce,
39 0x25, 0x2c, 0x37, 0x3e, 0x01, 0x08, 0x13, 0x1a, 39 0x20, 0x32, 0x04, 0x16, 0x68, 0x7a, 0x4c, 0x5e,
40 0x6d, 0x64, 0x7f, 0x76, 0x49, 0x40, 0x5b, 0x52, 40 0xe6, 0xf4, 0xc2, 0xd0, 0xae, 0xbc, 0x8a, 0x98,
41 0x3c, 0x35, 0x2e, 0x27, 0x18, 0x11, 0x0a, 0x03, 41 0x76, 0x64, 0x52, 0x40, 0x3e, 0x2c, 0x1a, 0x08,
42 0x74, 0x7d, 0x66, 0x6f, 0x50, 0x59, 0x42, 0x4b, 42 0xd4, 0xc6, 0xf0, 0xe2, 0x9c, 0x8e, 0xb8, 0xaa,
43 0x17, 0x1e, 0x05, 0x0c, 0x33, 0x3a, 0x21, 0x28, 43 0x44, 0x56, 0x60, 0x72, 0x0c, 0x1e, 0x28, 0x3a,
44 0x5f, 0x56, 0x4d, 0x44, 0x7b, 0x72, 0x69, 0x60, 44 0x4a, 0x58, 0x6e, 0x7c, 0x02, 0x10, 0x26, 0x34,
45 0x0e, 0x07, 0x1c, 0x15, 0x2a, 0x23, 0x38, 0x31, 45 0xda, 0xc8, 0xfe, 0xec, 0x92, 0x80, 0xb6, 0xa4,
46 0x46, 0x4f, 0x54, 0x5d, 0x62, 0x6b, 0x70, 0x79 46 0x78, 0x6a, 0x5c, 0x4e, 0x30, 0x22, 0x14, 0x06,
47 0xe8, 0xfa, 0xcc, 0xde, 0xa0, 0xb2, 0x84, 0x96,
48 0x2e, 0x3c, 0x0a, 0x18, 0x66, 0x74, 0x42, 0x50,
49 0xbe, 0xac, 0x9a, 0x88, 0xf6, 0xe4, 0xd2, 0xc0,
50 0x1c, 0x0e, 0x38, 0x2a, 0x54, 0x46, 0x70, 0x62,
51 0x8c, 0x9e, 0xa8, 0xba, 0xc4, 0xd6, 0xe0, 0xf2
47}; 52};
48EXPORT_SYMBOL(crc7_syndrome_table); 53EXPORT_SYMBOL(crc7_be_syndrome_table);
49 54
50/** 55/**
51 * crc7 - update the CRC7 for the data buffer 56 * crc7 - update the CRC7 for the data buffer
@@ -55,14 +60,17 @@ EXPORT_SYMBOL(crc7_syndrome_table);
55 * Context: any 60 * Context: any
56 * 61 *
57 * Returns the updated CRC7 value. 62 * Returns the updated CRC7 value.
63 * The CRC7 is left-aligned in the byte (the lsbit is always 0), as that
64 * makes the computation easier, and all callers want it in that form.
65 *
58 */ 66 */
59u8 crc7(u8 crc, const u8 *buffer, size_t len) 67u8 crc7_be(u8 crc, const u8 *buffer, size_t len)
60{ 68{
61 while (len--) 69 while (len--)
62 crc = crc7_byte(crc, *buffer++); 70 crc = crc7_be_byte(crc, *buffer++);
63 return crc; 71 return crc;
64} 72}
65EXPORT_SYMBOL(crc7); 73EXPORT_SYMBOL(crc7_be);
66 74
67MODULE_DESCRIPTION("CRC7 calculations"); 75MODULE_DESCRIPTION("CRC7 calculations");
68MODULE_LICENSE("GPL"); 76MODULE_LICENSE("GPL");
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
new file mode 100644
index 000000000000..c579e0f58818
--- /dev/null
+++ b/lib/test_bpf.c
@@ -0,0 +1,1929 @@
1/*
2 * Testsuite for BPF interpreter and BPF JIT compiler
3 *
4 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/filter.h>
21#include <linux/skbuff.h>
22#include <linux/netdevice.h>
23#include <linux/if_vlan.h>
24
25/* General test specific settings */
26#define MAX_SUBTESTS 3
27#define MAX_TESTRUNS 10000
28#define MAX_DATA 128
29#define MAX_INSNS 512
30#define MAX_K 0xffffFFFF
31
32/* Few constants used to init test 'skb' */
33#define SKB_TYPE 3
34#define SKB_MARK 0x1234aaaa
35#define SKB_HASH 0x1234aaab
36#define SKB_QUEUE_MAP 123
37#define SKB_VLAN_TCI 0xffff
38#define SKB_DEV_IFINDEX 577
39#define SKB_DEV_TYPE 588
40
41/* Redefine REGs to make tests less verbose */
42#define R0 BPF_REG_0
43#define R1 BPF_REG_1
44#define R2 BPF_REG_2
45#define R3 BPF_REG_3
46#define R4 BPF_REG_4
47#define R5 BPF_REG_5
48#define R6 BPF_REG_6
49#define R7 BPF_REG_7
50#define R8 BPF_REG_8
51#define R9 BPF_REG_9
52#define R10 BPF_REG_10
53
54/* Flags that can be passed to test cases */
55#define FLAG_NO_DATA BIT(0)
56#define FLAG_EXPECTED_FAIL BIT(1)
57
58enum {
59 CLASSIC = BIT(6), /* Old BPF instructions only. */
60 INTERNAL = BIT(7), /* Extended instruction set. */
61};
62
63#define TEST_TYPE_MASK (CLASSIC | INTERNAL)
64
65struct bpf_test {
66 const char *descr;
67 union {
68 struct sock_filter insns[MAX_INSNS];
69 struct sock_filter_int insns_int[MAX_INSNS];
70 } u;
71 __u8 aux;
72 __u8 data[MAX_DATA];
73 struct {
74 int data_size;
75 __u32 result;
76 } test[MAX_SUBTESTS];
77};
78
79static struct bpf_test tests[] = {
80 {
81 "TAX",
82 .u.insns = {
83 BPF_STMT(BPF_LD | BPF_IMM, 1),
84 BPF_STMT(BPF_MISC | BPF_TAX, 0),
85 BPF_STMT(BPF_LD | BPF_IMM, 2),
86 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
87 BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
88 BPF_STMT(BPF_MISC | BPF_TAX, 0),
89 BPF_STMT(BPF_LD | BPF_LEN, 0),
90 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
91 BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
92 BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
93 BPF_STMT(BPF_RET | BPF_A, 0)
94 },
95 CLASSIC,
96 { 10, 20, 30, 40, 50 },
97 { { 2, 10 }, { 3, 20 }, { 4, 30 } },
98 },
99 {
100 "TXA",
101 .u.insns = {
102 BPF_STMT(BPF_LDX | BPF_LEN, 0),
103 BPF_STMT(BPF_MISC | BPF_TXA, 0),
104 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
105 BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
106 },
107 CLASSIC,
108 { 10, 20, 30, 40, 50 },
109 { { 1, 2 }, { 3, 6 }, { 4, 8 } },
110 },
111 {
112 "ADD_SUB_MUL_K",
113 .u.insns = {
114 BPF_STMT(BPF_LD | BPF_IMM, 1),
115 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
116 BPF_STMT(BPF_LDX | BPF_IMM, 3),
117 BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
118 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
119 BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
120 BPF_STMT(BPF_RET | BPF_A, 0)
121 },
122 CLASSIC | FLAG_NO_DATA,
123 { },
124 { { 0, 0xfffffffd } }
125 },
126 {
127 "DIV_KX",
128 .u.insns = {
129 BPF_STMT(BPF_LD | BPF_IMM, 8),
130 BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
131 BPF_STMT(BPF_MISC | BPF_TAX, 0),
132 BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
133 BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
134 BPF_STMT(BPF_MISC | BPF_TAX, 0),
135 BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
136 BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
137 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
138 BPF_STMT(BPF_RET | BPF_A, 0)
139 },
140 CLASSIC | FLAG_NO_DATA,
141 { },
142 { { 0, 0x40000001 } }
143 },
144 {
145 "AND_OR_LSH_K",
146 .u.insns = {
147 BPF_STMT(BPF_LD | BPF_IMM, 0xff),
148 BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
149 BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
150 BPF_STMT(BPF_MISC | BPF_TAX, 0),
151 BPF_STMT(BPF_LD | BPF_IMM, 0xf),
152 BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
153 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
154 BPF_STMT(BPF_RET | BPF_A, 0)
155 },
156 CLASSIC | FLAG_NO_DATA,
157 { },
158 { { 0, 0x800000ff }, { 1, 0x800000ff } },
159 },
160 {
161 "LD_IMM_0",
162 .u.insns = {
163 BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */
164 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0),
165 BPF_STMT(BPF_RET | BPF_K, 0),
166 BPF_STMT(BPF_RET | BPF_K, 1),
167 },
168 CLASSIC,
169 { },
170 { { 1, 1 } },
171 },
172 {
173 "LD_IND",
174 .u.insns = {
175 BPF_STMT(BPF_LDX | BPF_LEN, 0),
176 BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
177 BPF_STMT(BPF_RET | BPF_K, 1)
178 },
179 CLASSIC,
180 { },
181 { { 1, 0 }, { 10, 0 }, { 60, 0 } },
182 },
183 {
184 "LD_ABS",
185 .u.insns = {
186 BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
187 BPF_STMT(BPF_RET | BPF_K, 1)
188 },
189 CLASSIC,
190 { },
191 { { 1, 0 }, { 10, 0 }, { 60, 0 } },
192 },
193 {
194 "LD_ABS_LL",
195 .u.insns = {
196 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
197 BPF_STMT(BPF_MISC | BPF_TAX, 0),
198 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
199 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
200 BPF_STMT(BPF_RET | BPF_A, 0)
201 },
202 CLASSIC,
203 { 1, 2, 3 },
204 { { 1, 0 }, { 2, 3 } },
205 },
206 {
207 "LD_IND_LL",
208 .u.insns = {
209 BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
210 BPF_STMT(BPF_LDX | BPF_LEN, 0),
211 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
212 BPF_STMT(BPF_MISC | BPF_TAX, 0),
213 BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
214 BPF_STMT(BPF_RET | BPF_A, 0)
215 },
216 CLASSIC,
217 { 1, 2, 3, 0xff },
218 { { 1, 1 }, { 3, 3 }, { 4, 0xff } },
219 },
220 {
221 "LD_ABS_NET",
222 .u.insns = {
223 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
224 BPF_STMT(BPF_MISC | BPF_TAX, 0),
225 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
226 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
227 BPF_STMT(BPF_RET | BPF_A, 0)
228 },
229 CLASSIC,
230 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
231 { { 15, 0 }, { 16, 3 } },
232 },
233 {
234 "LD_IND_NET",
235 .u.insns = {
236 BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
237 BPF_STMT(BPF_LDX | BPF_LEN, 0),
238 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
239 BPF_STMT(BPF_MISC | BPF_TAX, 0),
240 BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
241 BPF_STMT(BPF_RET | BPF_A, 0)
242 },
243 CLASSIC,
244 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
245 { { 14, 0 }, { 15, 1 }, { 17, 3 } },
246 },
247 {
248 "LD_PKTTYPE",
249 .u.insns = {
250 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
251 SKF_AD_OFF + SKF_AD_PKTTYPE),
252 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
253 BPF_STMT(BPF_RET | BPF_K, 1),
254 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
255 SKF_AD_OFF + SKF_AD_PKTTYPE),
256 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
257 BPF_STMT(BPF_RET | BPF_K, 1),
258 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
259 SKF_AD_OFF + SKF_AD_PKTTYPE),
260 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
261 BPF_STMT(BPF_RET | BPF_K, 1),
262 BPF_STMT(BPF_RET | BPF_A, 0)
263 },
264 CLASSIC,
265 { },
266 { { 1, 3 }, { 10, 3 } },
267 },
268 {
269 "LD_MARK",
270 .u.insns = {
271 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
272 SKF_AD_OFF + SKF_AD_MARK),
273 BPF_STMT(BPF_RET | BPF_A, 0)
274 },
275 CLASSIC,
276 { },
277 { { 1, SKB_MARK}, { 10, SKB_MARK} },
278 },
279 {
280 "LD_RXHASH",
281 .u.insns = {
282 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
283 SKF_AD_OFF + SKF_AD_RXHASH),
284 BPF_STMT(BPF_RET | BPF_A, 0)
285 },
286 CLASSIC,
287 { },
288 { { 1, SKB_HASH}, { 10, SKB_HASH} },
289 },
290 {
291 "LD_QUEUE",
292 .u.insns = {
293 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
294 SKF_AD_OFF + SKF_AD_QUEUE),
295 BPF_STMT(BPF_RET | BPF_A, 0)
296 },
297 CLASSIC,
298 { },
299 { { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
300 },
301 {
302 "LD_PROTOCOL",
303 .u.insns = {
304 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
305 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
306 BPF_STMT(BPF_RET | BPF_K, 0),
307 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
308 SKF_AD_OFF + SKF_AD_PROTOCOL),
309 BPF_STMT(BPF_MISC | BPF_TAX, 0),
310 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
311 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
312 BPF_STMT(BPF_RET | BPF_K, 0),
313 BPF_STMT(BPF_MISC | BPF_TXA, 0),
314 BPF_STMT(BPF_RET | BPF_A, 0)
315 },
316 CLASSIC,
317 { 10, 20, 30 },
318 { { 10, ETH_P_IP }, { 100, ETH_P_IP } },
319 },
320 {
321 "LD_VLAN_TAG",
322 .u.insns = {
323 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
324 SKF_AD_OFF + SKF_AD_VLAN_TAG),
325 BPF_STMT(BPF_RET | BPF_A, 0)
326 },
327 CLASSIC,
328 { },
329 {
330 { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
331 { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }
332 },
333 },
334 {
335 "LD_VLAN_TAG_PRESENT",
336 .u.insns = {
337 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
338 SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
339 BPF_STMT(BPF_RET | BPF_A, 0)
340 },
341 CLASSIC,
342 { },
343 {
344 { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
345 { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
346 },
347 },
348 {
349 "LD_IFINDEX",
350 .u.insns = {
351 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
352 SKF_AD_OFF + SKF_AD_IFINDEX),
353 BPF_STMT(BPF_RET | BPF_A, 0)
354 },
355 CLASSIC,
356 { },
357 { { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
358 },
359 {
360 "LD_HATYPE",
361 .u.insns = {
362 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
363 SKF_AD_OFF + SKF_AD_HATYPE),
364 BPF_STMT(BPF_RET | BPF_A, 0)
365 },
366 CLASSIC,
367 { },
368 { { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
369 },
370 {
371 "LD_CPU",
372 .u.insns = {
373 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
374 SKF_AD_OFF + SKF_AD_CPU),
375 BPF_STMT(BPF_MISC | BPF_TAX, 0),
376 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
377 SKF_AD_OFF + SKF_AD_CPU),
378 BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
379 BPF_STMT(BPF_RET | BPF_A, 0)
380 },
381 CLASSIC,
382 { },
383 { { 1, 0 }, { 10, 0 } },
384 },
385 {
386 "LD_NLATTR",
387 .u.insns = {
388 BPF_STMT(BPF_LDX | BPF_IMM, 2),
389 BPF_STMT(BPF_MISC | BPF_TXA, 0),
390 BPF_STMT(BPF_LDX | BPF_IMM, 3),
391 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
392 SKF_AD_OFF + SKF_AD_NLATTR),
393 BPF_STMT(BPF_RET | BPF_A, 0)
394 },
395 CLASSIC,
396#ifdef __BIG_ENDIAN
397 { 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 },
398#else
399 { 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
400#endif
401 { { 4, 0 }, { 20, 6 } },
402 },
403 {
404 "LD_NLATTR_NEST",
405 .u.insns = {
406 BPF_STMT(BPF_LD | BPF_IMM, 2),
407 BPF_STMT(BPF_LDX | BPF_IMM, 3),
408 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
409 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
410 BPF_STMT(BPF_LD | BPF_IMM, 2),
411 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
412 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
413 BPF_STMT(BPF_LD | BPF_IMM, 2),
414 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
415 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
416 BPF_STMT(BPF_LD | BPF_IMM, 2),
417 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
418 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
419 BPF_STMT(BPF_LD | BPF_IMM, 2),
420 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
421 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
422 BPF_STMT(BPF_LD | BPF_IMM, 2),
423 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
424 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
425 BPF_STMT(BPF_LD | BPF_IMM, 2),
426 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
427 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
428 BPF_STMT(BPF_LD | BPF_IMM, 2),
429 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
430 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
431 BPF_STMT(BPF_RET | BPF_A, 0)
432 },
433 CLASSIC,
434#ifdef __BIG_ENDIAN
435 { 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 },
436#else
437 { 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
438#endif
439 { { 4, 0 }, { 20, 10 } },
440 },
441 {
442 "LD_PAYLOAD_OFF",
443 .u.insns = {
444 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
445 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
446 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
447 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
448 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
449 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
450 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
451 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
452 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
453 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
454 BPF_STMT(BPF_RET | BPF_A, 0)
455 },
456 CLASSIC,
457 /* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
458 * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
459 * id 9737, seq 1, length 64
460 */
461 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
462 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
463 0x08, 0x00,
464 0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
465 0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
466 { { 30, 0 }, { 100, 42 } },
467 },
468 {
469 "LD_ANC_XOR",
470 .u.insns = {
471 BPF_STMT(BPF_LD | BPF_IMM, 10),
472 BPF_STMT(BPF_LDX | BPF_IMM, 300),
473 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
474 SKF_AD_OFF + SKF_AD_ALU_XOR_X),
475 BPF_STMT(BPF_RET | BPF_A, 0)
476 },
477 CLASSIC,
478 { },
479 { { 4, 10 ^ 300 }, { 20, 10 ^ 300 } },
480 },
481 {
482 "SPILL_FILL",
483 .u.insns = {
484 BPF_STMT(BPF_LDX | BPF_LEN, 0),
485 BPF_STMT(BPF_LD | BPF_IMM, 2),
486 BPF_STMT(BPF_ALU | BPF_RSH, 1),
487 BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
488 BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
489 BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
490 BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
491 BPF_STMT(BPF_STX, 15), /* M3 = len */
492 BPF_STMT(BPF_LDX | BPF_MEM, 1),
493 BPF_STMT(BPF_LD | BPF_MEM, 2),
494 BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
495 BPF_STMT(BPF_LDX | BPF_MEM, 15),
496 BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
497 BPF_STMT(BPF_RET | BPF_A, 0)
498 },
499 CLASSIC,
500 { },
501 { { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
502 },
503 {
504 "JEQ",
505 .u.insns = {
506 BPF_STMT(BPF_LDX | BPF_LEN, 0),
507 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
508 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
509 BPF_STMT(BPF_RET | BPF_K, 1),
510 BPF_STMT(BPF_RET | BPF_K, MAX_K)
511 },
512 CLASSIC,
513 { 3, 3, 3, 3, 3 },
514 { { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
515 },
516 {
517 "JGT",
518 .u.insns = {
519 BPF_STMT(BPF_LDX | BPF_LEN, 0),
520 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
521 BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
522 BPF_STMT(BPF_RET | BPF_K, 1),
523 BPF_STMT(BPF_RET | BPF_K, MAX_K)
524 },
525 CLASSIC,
526 { 4, 4, 4, 3, 3 },
527 { { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
528 },
529 {
530 "JGE",
531 .u.insns = {
532 BPF_STMT(BPF_LDX | BPF_LEN, 0),
533 BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
534 BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
535 BPF_STMT(BPF_RET | BPF_K, 10),
536 BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
537 BPF_STMT(BPF_RET | BPF_K, 20),
538 BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
539 BPF_STMT(BPF_RET | BPF_K, 30),
540 BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
541 BPF_STMT(BPF_RET | BPF_K, 40),
542 BPF_STMT(BPF_RET | BPF_K, MAX_K)
543 },
544 CLASSIC,
545 { 1, 2, 3, 4, 5 },
546 { { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
547 },
548 {
549 "JSET",
550 .u.insns = {
551 BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
552 BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
553 BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
554 BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
555 BPF_STMT(BPF_LDX | BPF_LEN, 0),
556 BPF_STMT(BPF_MISC | BPF_TXA, 0),
557 BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
558 BPF_STMT(BPF_MISC | BPF_TAX, 0),
559 BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
560 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
561 BPF_STMT(BPF_RET | BPF_K, 10),
562 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
563 BPF_STMT(BPF_RET | BPF_K, 20),
564 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
565 BPF_STMT(BPF_RET | BPF_K, 30),
566 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
567 BPF_STMT(BPF_RET | BPF_K, 30),
568 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
569 BPF_STMT(BPF_RET | BPF_K, 30),
570 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
571 BPF_STMT(BPF_RET | BPF_K, 30),
572 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
573 BPF_STMT(BPF_RET | BPF_K, 30),
574 BPF_STMT(BPF_RET | BPF_K, MAX_K)
575 },
576 CLASSIC,
577 { 0, 0xAA, 0x55, 1 },
578 { { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
579 },
580 {
581 "tcpdump port 22",
582 .u.insns = {
583 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
584 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */
585 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20),
586 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
587 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
588 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17),
589 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54),
590 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0),
591 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56),
592 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13),
593 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */
594 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
595 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
596 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
597 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8),
598 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
599 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0),
600 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
601 BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
602 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
603 BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
604 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1),
605 BPF_STMT(BPF_RET | BPF_K, 0xffff),
606 BPF_STMT(BPF_RET | BPF_K, 0),
607 },
608 CLASSIC,
609 /* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
610 * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
611 * seq 1305692979:1305693027, ack 3650467037, win 65535,
612 * options [nop,nop,TS val 2502645400 ecr 3971138], length 48
613 */
614 { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
615 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
616 0x08, 0x00,
617 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
618 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
619 0x0a, 0x01, 0x01, 0x95, /* ip src */
620 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
621 0xc2, 0x24,
622 0x00, 0x16 /* dst port */ },
623 { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
624 },
625 {
626 "tcpdump complex",
627 .u.insns = {
628 /* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
629 * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
630 * (len > 115 or len < 30000000000)' -d
631 */
632 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
633 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0),
634 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29),
635 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
636 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27),
637 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
638 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0),
639 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
640 BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
641 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
642 BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
643 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20),
644 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16),
645 BPF_STMT(BPF_ST, 1),
646 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14),
647 BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf),
648 BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2),
649 BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */
650 BPF_STMT(BPF_LD | BPF_MEM, 1),
651 BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
652 BPF_STMT(BPF_ST, 5),
653 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
654 BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26),
655 BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
656 BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2),
657 BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */
658 BPF_STMT(BPF_LD | BPF_MEM, 5),
659 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0),
660 BPF_STMT(BPF_LD | BPF_LEN, 0),
661 BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0),
662 BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0),
663 BPF_STMT(BPF_RET | BPF_K, 0xffff),
664 BPF_STMT(BPF_RET | BPF_K, 0),
665 },
666 CLASSIC,
667 { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
668 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
669 0x08, 0x00,
670 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
671 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
672 0x0a, 0x01, 0x01, 0x95, /* ip src */
673 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
674 0xc2, 0x24,
675 0x00, 0x16 /* dst port */ },
676 { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
677 },
678 {
679 "RET_A",
680 .u.insns = {
681 /* check that unitialized X and A contain zeros */
682 BPF_STMT(BPF_MISC | BPF_TXA, 0),
683 BPF_STMT(BPF_RET | BPF_A, 0)
684 },
685 CLASSIC,
686 { },
687 { {1, 0}, {2, 0} },
688 },
689 {
690 "INT: ADD trivial",
691 .u.insns_int = {
692 BPF_ALU64_IMM(BPF_MOV, R1, 1),
693 BPF_ALU64_IMM(BPF_ADD, R1, 2),
694 BPF_ALU64_IMM(BPF_MOV, R2, 3),
695 BPF_ALU64_REG(BPF_SUB, R1, R2),
696 BPF_ALU64_IMM(BPF_ADD, R1, -1),
697 BPF_ALU64_IMM(BPF_MUL, R1, 3),
698 BPF_ALU64_REG(BPF_MOV, R0, R1),
699 BPF_EXIT_INSN(),
700 },
701 INTERNAL,
702 { },
703 { { 0, 0xfffffffd } }
704 },
705 {
706 "INT: MUL_X",
707 .u.insns_int = {
708 BPF_ALU64_IMM(BPF_MOV, R0, -1),
709 BPF_ALU64_IMM(BPF_MOV, R1, -1),
710 BPF_ALU64_IMM(BPF_MOV, R2, 3),
711 BPF_ALU64_REG(BPF_MUL, R1, R2),
712 BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
713 BPF_EXIT_INSN(),
714 BPF_ALU64_IMM(BPF_MOV, R0, 1),
715 BPF_EXIT_INSN(),
716 },
717 INTERNAL,
718 { },
719 { { 0, 1 } }
720 },
721 {
722 "INT: MUL_X2",
723 .u.insns_int = {
724 BPF_ALU32_IMM(BPF_MOV, R0, -1),
725 BPF_ALU32_IMM(BPF_MOV, R1, -1),
726 BPF_ALU32_IMM(BPF_MOV, R2, 3),
727 BPF_ALU64_REG(BPF_MUL, R1, R2),
728 BPF_ALU64_IMM(BPF_RSH, R1, 8),
729 BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
730 BPF_EXIT_INSN(),
731 BPF_ALU32_IMM(BPF_MOV, R0, 1),
732 BPF_EXIT_INSN(),
733 },
734 INTERNAL,
735 { },
736 { { 0, 1 } }
737 },
738 {
739 "INT: MUL32_X",
740 .u.insns_int = {
741 BPF_ALU32_IMM(BPF_MOV, R0, -1),
742 BPF_ALU64_IMM(BPF_MOV, R1, -1),
743 BPF_ALU32_IMM(BPF_MOV, R2, 3),
744 BPF_ALU32_REG(BPF_MUL, R1, R2),
745 BPF_ALU64_IMM(BPF_RSH, R1, 8),
746 BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
747 BPF_EXIT_INSN(),
748 BPF_ALU32_IMM(BPF_MOV, R0, 1),
749 BPF_EXIT_INSN(),
750 },
751 INTERNAL,
752 { },
753 { { 0, 1 } }
754 },
755 {
756 /* Have to test all register combinations, since
757 * JITing of different registers will produce
758 * different asm code.
759 */
760 "INT: ADD 64-bit",
761 .u.insns_int = {
762 BPF_ALU64_IMM(BPF_MOV, R0, 0),
763 BPF_ALU64_IMM(BPF_MOV, R1, 1),
764 BPF_ALU64_IMM(BPF_MOV, R2, 2),
765 BPF_ALU64_IMM(BPF_MOV, R3, 3),
766 BPF_ALU64_IMM(BPF_MOV, R4, 4),
767 BPF_ALU64_IMM(BPF_MOV, R5, 5),
768 BPF_ALU64_IMM(BPF_MOV, R6, 6),
769 BPF_ALU64_IMM(BPF_MOV, R7, 7),
770 BPF_ALU64_IMM(BPF_MOV, R8, 8),
771 BPF_ALU64_IMM(BPF_MOV, R9, 9),
772 BPF_ALU64_IMM(BPF_ADD, R0, 20),
773 BPF_ALU64_IMM(BPF_ADD, R1, 20),
774 BPF_ALU64_IMM(BPF_ADD, R2, 20),
775 BPF_ALU64_IMM(BPF_ADD, R3, 20),
776 BPF_ALU64_IMM(BPF_ADD, R4, 20),
777 BPF_ALU64_IMM(BPF_ADD, R5, 20),
778 BPF_ALU64_IMM(BPF_ADD, R6, 20),
779 BPF_ALU64_IMM(BPF_ADD, R7, 20),
780 BPF_ALU64_IMM(BPF_ADD, R8, 20),
781 BPF_ALU64_IMM(BPF_ADD, R9, 20),
782 BPF_ALU64_IMM(BPF_SUB, R0, 10),
783 BPF_ALU64_IMM(BPF_SUB, R1, 10),
784 BPF_ALU64_IMM(BPF_SUB, R2, 10),
785 BPF_ALU64_IMM(BPF_SUB, R3, 10),
786 BPF_ALU64_IMM(BPF_SUB, R4, 10),
787 BPF_ALU64_IMM(BPF_SUB, R5, 10),
788 BPF_ALU64_IMM(BPF_SUB, R6, 10),
789 BPF_ALU64_IMM(BPF_SUB, R7, 10),
790 BPF_ALU64_IMM(BPF_SUB, R8, 10),
791 BPF_ALU64_IMM(BPF_SUB, R9, 10),
792 BPF_ALU64_REG(BPF_ADD, R0, R0),
793 BPF_ALU64_REG(BPF_ADD, R0, R1),
794 BPF_ALU64_REG(BPF_ADD, R0, R2),
795 BPF_ALU64_REG(BPF_ADD, R0, R3),
796 BPF_ALU64_REG(BPF_ADD, R0, R4),
797 BPF_ALU64_REG(BPF_ADD, R0, R5),
798 BPF_ALU64_REG(BPF_ADD, R0, R6),
799 BPF_ALU64_REG(BPF_ADD, R0, R7),
800 BPF_ALU64_REG(BPF_ADD, R0, R8),
801 BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
802 BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
803 BPF_EXIT_INSN(),
804 BPF_ALU64_REG(BPF_ADD, R1, R0),
805 BPF_ALU64_REG(BPF_ADD, R1, R1),
806 BPF_ALU64_REG(BPF_ADD, R1, R2),
807 BPF_ALU64_REG(BPF_ADD, R1, R3),
808 BPF_ALU64_REG(BPF_ADD, R1, R4),
809 BPF_ALU64_REG(BPF_ADD, R1, R5),
810 BPF_ALU64_REG(BPF_ADD, R1, R6),
811 BPF_ALU64_REG(BPF_ADD, R1, R7),
812 BPF_ALU64_REG(BPF_ADD, R1, R8),
813 BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
814 BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
815 BPF_EXIT_INSN(),
816 BPF_ALU64_REG(BPF_ADD, R2, R0),
817 BPF_ALU64_REG(BPF_ADD, R2, R1),
818 BPF_ALU64_REG(BPF_ADD, R2, R2),
819 BPF_ALU64_REG(BPF_ADD, R2, R3),
820 BPF_ALU64_REG(BPF_ADD, R2, R4),
821 BPF_ALU64_REG(BPF_ADD, R2, R5),
822 BPF_ALU64_REG(BPF_ADD, R2, R6),
823 BPF_ALU64_REG(BPF_ADD, R2, R7),
824 BPF_ALU64_REG(BPF_ADD, R2, R8),
825 BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
826 BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
827 BPF_EXIT_INSN(),
828 BPF_ALU64_REG(BPF_ADD, R3, R0),
829 BPF_ALU64_REG(BPF_ADD, R3, R1),
830 BPF_ALU64_REG(BPF_ADD, R3, R2),
831 BPF_ALU64_REG(BPF_ADD, R3, R3),
832 BPF_ALU64_REG(BPF_ADD, R3, R4),
833 BPF_ALU64_REG(BPF_ADD, R3, R5),
834 BPF_ALU64_REG(BPF_ADD, R3, R6),
835 BPF_ALU64_REG(BPF_ADD, R3, R7),
836 BPF_ALU64_REG(BPF_ADD, R3, R8),
837 BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
838 BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
839 BPF_EXIT_INSN(),
840 BPF_ALU64_REG(BPF_ADD, R4, R0),
841 BPF_ALU64_REG(BPF_ADD, R4, R1),
842 BPF_ALU64_REG(BPF_ADD, R4, R2),
843 BPF_ALU64_REG(BPF_ADD, R4, R3),
844 BPF_ALU64_REG(BPF_ADD, R4, R4),
845 BPF_ALU64_REG(BPF_ADD, R4, R5),
846 BPF_ALU64_REG(BPF_ADD, R4, R6),
847 BPF_ALU64_REG(BPF_ADD, R4, R7),
848 BPF_ALU64_REG(BPF_ADD, R4, R8),
849 BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
850 BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
851 BPF_EXIT_INSN(),
852 BPF_ALU64_REG(BPF_ADD, R5, R0),
853 BPF_ALU64_REG(BPF_ADD, R5, R1),
854 BPF_ALU64_REG(BPF_ADD, R5, R2),
855 BPF_ALU64_REG(BPF_ADD, R5, R3),
856 BPF_ALU64_REG(BPF_ADD, R5, R4),
857 BPF_ALU64_REG(BPF_ADD, R5, R5),
858 BPF_ALU64_REG(BPF_ADD, R5, R6),
859 BPF_ALU64_REG(BPF_ADD, R5, R7),
860 BPF_ALU64_REG(BPF_ADD, R5, R8),
861 BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
862 BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
863 BPF_EXIT_INSN(),
864 BPF_ALU64_REG(BPF_ADD, R6, R0),
865 BPF_ALU64_REG(BPF_ADD, R6, R1),
866 BPF_ALU64_REG(BPF_ADD, R6, R2),
867 BPF_ALU64_REG(BPF_ADD, R6, R3),
868 BPF_ALU64_REG(BPF_ADD, R6, R4),
869 BPF_ALU64_REG(BPF_ADD, R6, R5),
870 BPF_ALU64_REG(BPF_ADD, R6, R6),
871 BPF_ALU64_REG(BPF_ADD, R6, R7),
872 BPF_ALU64_REG(BPF_ADD, R6, R8),
873 BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
874 BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
875 BPF_EXIT_INSN(),
876 BPF_ALU64_REG(BPF_ADD, R7, R0),
877 BPF_ALU64_REG(BPF_ADD, R7, R1),
878 BPF_ALU64_REG(BPF_ADD, R7, R2),
879 BPF_ALU64_REG(BPF_ADD, R7, R3),
880 BPF_ALU64_REG(BPF_ADD, R7, R4),
881 BPF_ALU64_REG(BPF_ADD, R7, R5),
882 BPF_ALU64_REG(BPF_ADD, R7, R6),
883 BPF_ALU64_REG(BPF_ADD, R7, R7),
884 BPF_ALU64_REG(BPF_ADD, R7, R8),
885 BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
886 BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
887 BPF_EXIT_INSN(),
888 BPF_ALU64_REG(BPF_ADD, R8, R0),
889 BPF_ALU64_REG(BPF_ADD, R8, R1),
890 BPF_ALU64_REG(BPF_ADD, R8, R2),
891 BPF_ALU64_REG(BPF_ADD, R8, R3),
892 BPF_ALU64_REG(BPF_ADD, R8, R4),
893 BPF_ALU64_REG(BPF_ADD, R8, R5),
894 BPF_ALU64_REG(BPF_ADD, R8, R6),
895 BPF_ALU64_REG(BPF_ADD, R8, R7),
896 BPF_ALU64_REG(BPF_ADD, R8, R8),
897 BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
898 BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
899 BPF_EXIT_INSN(),
900 BPF_ALU64_REG(BPF_ADD, R9, R0),
901 BPF_ALU64_REG(BPF_ADD, R9, R1),
902 BPF_ALU64_REG(BPF_ADD, R9, R2),
903 BPF_ALU64_REG(BPF_ADD, R9, R3),
904 BPF_ALU64_REG(BPF_ADD, R9, R4),
905 BPF_ALU64_REG(BPF_ADD, R9, R5),
906 BPF_ALU64_REG(BPF_ADD, R9, R6),
907 BPF_ALU64_REG(BPF_ADD, R9, R7),
908 BPF_ALU64_REG(BPF_ADD, R9, R8),
909 BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
910 BPF_ALU64_REG(BPF_MOV, R0, R9),
911 BPF_EXIT_INSN(),
912 },
913 INTERNAL,
914 { },
915 { { 0, 2957380 } }
916 },
917 {
918 "INT: ADD 32-bit",
919 .u.insns_int = {
920 BPF_ALU32_IMM(BPF_MOV, R0, 20),
921 BPF_ALU32_IMM(BPF_MOV, R1, 1),
922 BPF_ALU32_IMM(BPF_MOV, R2, 2),
923 BPF_ALU32_IMM(BPF_MOV, R3, 3),
924 BPF_ALU32_IMM(BPF_MOV, R4, 4),
925 BPF_ALU32_IMM(BPF_MOV, R5, 5),
926 BPF_ALU32_IMM(BPF_MOV, R6, 6),
927 BPF_ALU32_IMM(BPF_MOV, R7, 7),
928 BPF_ALU32_IMM(BPF_MOV, R8, 8),
929 BPF_ALU32_IMM(BPF_MOV, R9, 9),
930 BPF_ALU64_IMM(BPF_ADD, R1, 10),
931 BPF_ALU64_IMM(BPF_ADD, R2, 10),
932 BPF_ALU64_IMM(BPF_ADD, R3, 10),
933 BPF_ALU64_IMM(BPF_ADD, R4, 10),
934 BPF_ALU64_IMM(BPF_ADD, R5, 10),
935 BPF_ALU64_IMM(BPF_ADD, R6, 10),
936 BPF_ALU64_IMM(BPF_ADD, R7, 10),
937 BPF_ALU64_IMM(BPF_ADD, R8, 10),
938 BPF_ALU64_IMM(BPF_ADD, R9, 10),
939 BPF_ALU32_REG(BPF_ADD, R0, R1),
940 BPF_ALU32_REG(BPF_ADD, R0, R2),
941 BPF_ALU32_REG(BPF_ADD, R0, R3),
942 BPF_ALU32_REG(BPF_ADD, R0, R4),
943 BPF_ALU32_REG(BPF_ADD, R0, R5),
944 BPF_ALU32_REG(BPF_ADD, R0, R6),
945 BPF_ALU32_REG(BPF_ADD, R0, R7),
946 BPF_ALU32_REG(BPF_ADD, R0, R8),
947 BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
948 BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
949 BPF_EXIT_INSN(),
950 BPF_ALU32_REG(BPF_ADD, R1, R0),
951 BPF_ALU32_REG(BPF_ADD, R1, R1),
952 BPF_ALU32_REG(BPF_ADD, R1, R2),
953 BPF_ALU32_REG(BPF_ADD, R1, R3),
954 BPF_ALU32_REG(BPF_ADD, R1, R4),
955 BPF_ALU32_REG(BPF_ADD, R1, R5),
956 BPF_ALU32_REG(BPF_ADD, R1, R6),
957 BPF_ALU32_REG(BPF_ADD, R1, R7),
958 BPF_ALU32_REG(BPF_ADD, R1, R8),
959 BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
960 BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
961 BPF_EXIT_INSN(),
962 BPF_ALU32_REG(BPF_ADD, R2, R0),
963 BPF_ALU32_REG(BPF_ADD, R2, R1),
964 BPF_ALU32_REG(BPF_ADD, R2, R2),
965 BPF_ALU32_REG(BPF_ADD, R2, R3),
966 BPF_ALU32_REG(BPF_ADD, R2, R4),
967 BPF_ALU32_REG(BPF_ADD, R2, R5),
968 BPF_ALU32_REG(BPF_ADD, R2, R6),
969 BPF_ALU32_REG(BPF_ADD, R2, R7),
970 BPF_ALU32_REG(BPF_ADD, R2, R8),
971 BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
972 BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
973 BPF_EXIT_INSN(),
974 BPF_ALU32_REG(BPF_ADD, R3, R0),
975 BPF_ALU32_REG(BPF_ADD, R3, R1),
976 BPF_ALU32_REG(BPF_ADD, R3, R2),
977 BPF_ALU32_REG(BPF_ADD, R3, R3),
978 BPF_ALU32_REG(BPF_ADD, R3, R4),
979 BPF_ALU32_REG(BPF_ADD, R3, R5),
980 BPF_ALU32_REG(BPF_ADD, R3, R6),
981 BPF_ALU32_REG(BPF_ADD, R3, R7),
982 BPF_ALU32_REG(BPF_ADD, R3, R8),
983 BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
984 BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
985 BPF_EXIT_INSN(),
986 BPF_ALU32_REG(BPF_ADD, R4, R0),
987 BPF_ALU32_REG(BPF_ADD, R4, R1),
988 BPF_ALU32_REG(BPF_ADD, R4, R2),
989 BPF_ALU32_REG(BPF_ADD, R4, R3),
990 BPF_ALU32_REG(BPF_ADD, R4, R4),
991 BPF_ALU32_REG(BPF_ADD, R4, R5),
992 BPF_ALU32_REG(BPF_ADD, R4, R6),
993 BPF_ALU32_REG(BPF_ADD, R4, R7),
994 BPF_ALU32_REG(BPF_ADD, R4, R8),
995 BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
996 BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
997 BPF_EXIT_INSN(),
998 BPF_ALU32_REG(BPF_ADD, R5, R0),
999 BPF_ALU32_REG(BPF_ADD, R5, R1),
1000 BPF_ALU32_REG(BPF_ADD, R5, R2),
1001 BPF_ALU32_REG(BPF_ADD, R5, R3),
1002 BPF_ALU32_REG(BPF_ADD, R5, R4),
1003 BPF_ALU32_REG(BPF_ADD, R5, R5),
1004 BPF_ALU32_REG(BPF_ADD, R5, R6),
1005 BPF_ALU32_REG(BPF_ADD, R5, R7),
1006 BPF_ALU32_REG(BPF_ADD, R5, R8),
1007 BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
1008 BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
1009 BPF_EXIT_INSN(),
1010 BPF_ALU32_REG(BPF_ADD, R6, R0),
1011 BPF_ALU32_REG(BPF_ADD, R6, R1),
1012 BPF_ALU32_REG(BPF_ADD, R6, R2),
1013 BPF_ALU32_REG(BPF_ADD, R6, R3),
1014 BPF_ALU32_REG(BPF_ADD, R6, R4),
1015 BPF_ALU32_REG(BPF_ADD, R6, R5),
1016 BPF_ALU32_REG(BPF_ADD, R6, R6),
1017 BPF_ALU32_REG(BPF_ADD, R6, R7),
1018 BPF_ALU32_REG(BPF_ADD, R6, R8),
1019 BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
1020 BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
1021 BPF_EXIT_INSN(),
1022 BPF_ALU32_REG(BPF_ADD, R7, R0),
1023 BPF_ALU32_REG(BPF_ADD, R7, R1),
1024 BPF_ALU32_REG(BPF_ADD, R7, R2),
1025 BPF_ALU32_REG(BPF_ADD, R7, R3),
1026 BPF_ALU32_REG(BPF_ADD, R7, R4),
1027 BPF_ALU32_REG(BPF_ADD, R7, R5),
1028 BPF_ALU32_REG(BPF_ADD, R7, R6),
1029 BPF_ALU32_REG(BPF_ADD, R7, R7),
1030 BPF_ALU32_REG(BPF_ADD, R7, R8),
1031 BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
1032 BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
1033 BPF_EXIT_INSN(),
1034 BPF_ALU32_REG(BPF_ADD, R8, R0),
1035 BPF_ALU32_REG(BPF_ADD, R8, R1),
1036 BPF_ALU32_REG(BPF_ADD, R8, R2),
1037 BPF_ALU32_REG(BPF_ADD, R8, R3),
1038 BPF_ALU32_REG(BPF_ADD, R8, R4),
1039 BPF_ALU32_REG(BPF_ADD, R8, R5),
1040 BPF_ALU32_REG(BPF_ADD, R8, R6),
1041 BPF_ALU32_REG(BPF_ADD, R8, R7),
1042 BPF_ALU32_REG(BPF_ADD, R8, R8),
1043 BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
1044 BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
1045 BPF_EXIT_INSN(),
1046 BPF_ALU32_REG(BPF_ADD, R9, R0),
1047 BPF_ALU32_REG(BPF_ADD, R9, R1),
1048 BPF_ALU32_REG(BPF_ADD, R9, R2),
1049 BPF_ALU32_REG(BPF_ADD, R9, R3),
1050 BPF_ALU32_REG(BPF_ADD, R9, R4),
1051 BPF_ALU32_REG(BPF_ADD, R9, R5),
1052 BPF_ALU32_REG(BPF_ADD, R9, R6),
1053 BPF_ALU32_REG(BPF_ADD, R9, R7),
1054 BPF_ALU32_REG(BPF_ADD, R9, R8),
1055 BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
1056 BPF_ALU32_REG(BPF_MOV, R0, R9),
1057 BPF_EXIT_INSN(),
1058 },
1059 INTERNAL,
1060 { },
1061 { { 0, 2957380 } }
1062 },
1063 { /* Mainly checking JIT here. */
1064 "INT: SUB",
1065 .u.insns_int = {
1066 BPF_ALU64_IMM(BPF_MOV, R0, 0),
1067 BPF_ALU64_IMM(BPF_MOV, R1, 1),
1068 BPF_ALU64_IMM(BPF_MOV, R2, 2),
1069 BPF_ALU64_IMM(BPF_MOV, R3, 3),
1070 BPF_ALU64_IMM(BPF_MOV, R4, 4),
1071 BPF_ALU64_IMM(BPF_MOV, R5, 5),
1072 BPF_ALU64_IMM(BPF_MOV, R6, 6),
1073 BPF_ALU64_IMM(BPF_MOV, R7, 7),
1074 BPF_ALU64_IMM(BPF_MOV, R8, 8),
1075 BPF_ALU64_IMM(BPF_MOV, R9, 9),
1076 BPF_ALU64_REG(BPF_SUB, R0, R0),
1077 BPF_ALU64_REG(BPF_SUB, R0, R1),
1078 BPF_ALU64_REG(BPF_SUB, R0, R2),
1079 BPF_ALU64_REG(BPF_SUB, R0, R3),
1080 BPF_ALU64_REG(BPF_SUB, R0, R4),
1081 BPF_ALU64_REG(BPF_SUB, R0, R5),
1082 BPF_ALU64_REG(BPF_SUB, R0, R6),
1083 BPF_ALU64_REG(BPF_SUB, R0, R7),
1084 BPF_ALU64_REG(BPF_SUB, R0, R8),
1085 BPF_ALU64_REG(BPF_SUB, R0, R9),
1086 BPF_ALU64_IMM(BPF_SUB, R0, 10),
1087 BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
1088 BPF_EXIT_INSN(),
1089 BPF_ALU64_REG(BPF_SUB, R1, R0),
1090 BPF_ALU64_REG(BPF_SUB, R1, R2),
1091 BPF_ALU64_REG(BPF_SUB, R1, R3),
1092 BPF_ALU64_REG(BPF_SUB, R1, R4),
1093 BPF_ALU64_REG(BPF_SUB, R1, R5),
1094 BPF_ALU64_REG(BPF_SUB, R1, R6),
1095 BPF_ALU64_REG(BPF_SUB, R1, R7),
1096 BPF_ALU64_REG(BPF_SUB, R1, R8),
1097 BPF_ALU64_REG(BPF_SUB, R1, R9),
1098 BPF_ALU64_IMM(BPF_SUB, R1, 10),
1099 BPF_ALU64_REG(BPF_SUB, R2, R0),
1100 BPF_ALU64_REG(BPF_SUB, R2, R1),
1101 BPF_ALU64_REG(BPF_SUB, R2, R3),
1102 BPF_ALU64_REG(BPF_SUB, R2, R4),
1103 BPF_ALU64_REG(BPF_SUB, R2, R5),
1104 BPF_ALU64_REG(BPF_SUB, R2, R6),
1105 BPF_ALU64_REG(BPF_SUB, R2, R7),
1106 BPF_ALU64_REG(BPF_SUB, R2, R8),
1107 BPF_ALU64_REG(BPF_SUB, R2, R9),
1108 BPF_ALU64_IMM(BPF_SUB, R2, 10),
1109 BPF_ALU64_REG(BPF_SUB, R3, R0),
1110 BPF_ALU64_REG(BPF_SUB, R3, R1),
1111 BPF_ALU64_REG(BPF_SUB, R3, R2),
1112 BPF_ALU64_REG(BPF_SUB, R3, R4),
1113 BPF_ALU64_REG(BPF_SUB, R3, R5),
1114 BPF_ALU64_REG(BPF_SUB, R3, R6),
1115 BPF_ALU64_REG(BPF_SUB, R3, R7),
1116 BPF_ALU64_REG(BPF_SUB, R3, R8),
1117 BPF_ALU64_REG(BPF_SUB, R3, R9),
1118 BPF_ALU64_IMM(BPF_SUB, R3, 10),
1119 BPF_ALU64_REG(BPF_SUB, R4, R0),
1120 BPF_ALU64_REG(BPF_SUB, R4, R1),
1121 BPF_ALU64_REG(BPF_SUB, R4, R2),
1122 BPF_ALU64_REG(BPF_SUB, R4, R3),
1123 BPF_ALU64_REG(BPF_SUB, R4, R5),
1124 BPF_ALU64_REG(BPF_SUB, R4, R6),
1125 BPF_ALU64_REG(BPF_SUB, R4, R7),
1126 BPF_ALU64_REG(BPF_SUB, R4, R8),
1127 BPF_ALU64_REG(BPF_SUB, R4, R9),
1128 BPF_ALU64_IMM(BPF_SUB, R4, 10),
1129 BPF_ALU64_REG(BPF_SUB, R5, R0),
1130 BPF_ALU64_REG(BPF_SUB, R5, R1),
1131 BPF_ALU64_REG(BPF_SUB, R5, R2),
1132 BPF_ALU64_REG(BPF_SUB, R5, R3),
1133 BPF_ALU64_REG(BPF_SUB, R5, R4),
1134 BPF_ALU64_REG(BPF_SUB, R5, R6),
1135 BPF_ALU64_REG(BPF_SUB, R5, R7),
1136 BPF_ALU64_REG(BPF_SUB, R5, R8),
1137 BPF_ALU64_REG(BPF_SUB, R5, R9),
1138 BPF_ALU64_IMM(BPF_SUB, R5, 10),
1139 BPF_ALU64_REG(BPF_SUB, R6, R0),
1140 BPF_ALU64_REG(BPF_SUB, R6, R1),
1141 BPF_ALU64_REG(BPF_SUB, R6, R2),
1142 BPF_ALU64_REG(BPF_SUB, R6, R3),
1143 BPF_ALU64_REG(BPF_SUB, R6, R4),
1144 BPF_ALU64_REG(BPF_SUB, R6, R5),
1145 BPF_ALU64_REG(BPF_SUB, R6, R7),
1146 BPF_ALU64_REG(BPF_SUB, R6, R8),
1147 BPF_ALU64_REG(BPF_SUB, R6, R9),
1148 BPF_ALU64_IMM(BPF_SUB, R6, 10),
1149 BPF_ALU64_REG(BPF_SUB, R7, R0),
1150 BPF_ALU64_REG(BPF_SUB, R7, R1),
1151 BPF_ALU64_REG(BPF_SUB, R7, R2),
1152 BPF_ALU64_REG(BPF_SUB, R7, R3),
1153 BPF_ALU64_REG(BPF_SUB, R7, R4),
1154 BPF_ALU64_REG(BPF_SUB, R7, R5),
1155 BPF_ALU64_REG(BPF_SUB, R7, R6),
1156 BPF_ALU64_REG(BPF_SUB, R7, R8),
1157 BPF_ALU64_REG(BPF_SUB, R7, R9),
1158 BPF_ALU64_IMM(BPF_SUB, R7, 10),
1159 BPF_ALU64_REG(BPF_SUB, R8, R0),
1160 BPF_ALU64_REG(BPF_SUB, R8, R1),
1161 BPF_ALU64_REG(BPF_SUB, R8, R2),
1162 BPF_ALU64_REG(BPF_SUB, R8, R3),
1163 BPF_ALU64_REG(BPF_SUB, R8, R4),
1164 BPF_ALU64_REG(BPF_SUB, R8, R5),
1165 BPF_ALU64_REG(BPF_SUB, R8, R6),
1166 BPF_ALU64_REG(BPF_SUB, R8, R7),
1167 BPF_ALU64_REG(BPF_SUB, R8, R9),
1168 BPF_ALU64_IMM(BPF_SUB, R8, 10),
1169 BPF_ALU64_REG(BPF_SUB, R9, R0),
1170 BPF_ALU64_REG(BPF_SUB, R9, R1),
1171 BPF_ALU64_REG(BPF_SUB, R9, R2),
1172 BPF_ALU64_REG(BPF_SUB, R9, R3),
1173 BPF_ALU64_REG(BPF_SUB, R9, R4),
1174 BPF_ALU64_REG(BPF_SUB, R9, R5),
1175 BPF_ALU64_REG(BPF_SUB, R9, R6),
1176 BPF_ALU64_REG(BPF_SUB, R9, R7),
1177 BPF_ALU64_REG(BPF_SUB, R9, R8),
1178 BPF_ALU64_IMM(BPF_SUB, R9, 10),
1179 BPF_ALU64_IMM(BPF_SUB, R0, 10),
1180 BPF_ALU64_IMM(BPF_NEG, R0, 0),
1181 BPF_ALU64_REG(BPF_SUB, R0, R1),
1182 BPF_ALU64_REG(BPF_SUB, R0, R2),
1183 BPF_ALU64_REG(BPF_SUB, R0, R3),
1184 BPF_ALU64_REG(BPF_SUB, R0, R4),
1185 BPF_ALU64_REG(BPF_SUB, R0, R5),
1186 BPF_ALU64_REG(BPF_SUB, R0, R6),
1187 BPF_ALU64_REG(BPF_SUB, R0, R7),
1188 BPF_ALU64_REG(BPF_SUB, R0, R8),
1189 BPF_ALU64_REG(BPF_SUB, R0, R9),
1190 BPF_EXIT_INSN(),
1191 },
1192 INTERNAL,
1193 { },
1194 { { 0, 11 } }
1195 },
1196 { /* Mainly checking JIT here. */
1197 "INT: XOR",
1198 .u.insns_int = {
1199 BPF_ALU64_REG(BPF_SUB, R0, R0),
1200 BPF_ALU64_REG(BPF_XOR, R1, R1),
1201 BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
1202 BPF_EXIT_INSN(),
1203 BPF_ALU64_IMM(BPF_MOV, R0, 10),
1204 BPF_ALU64_IMM(BPF_MOV, R1, -1),
1205 BPF_ALU64_REG(BPF_SUB, R1, R1),
1206 BPF_ALU64_REG(BPF_XOR, R2, R2),
1207 BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
1208 BPF_EXIT_INSN(),
1209 BPF_ALU64_REG(BPF_SUB, R2, R2),
1210 BPF_ALU64_REG(BPF_XOR, R3, R3),
1211 BPF_ALU64_IMM(BPF_MOV, R0, 10),
1212 BPF_ALU64_IMM(BPF_MOV, R1, -1),
1213 BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
1214 BPF_EXIT_INSN(),
1215 BPF_ALU64_REG(BPF_SUB, R3, R3),
1216 BPF_ALU64_REG(BPF_XOR, R4, R4),
1217 BPF_ALU64_IMM(BPF_MOV, R2, 1),
1218 BPF_ALU64_IMM(BPF_MOV, R5, -1),
1219 BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
1220 BPF_EXIT_INSN(),
1221 BPF_ALU64_REG(BPF_SUB, R4, R4),
1222 BPF_ALU64_REG(BPF_XOR, R5, R5),
1223 BPF_ALU64_IMM(BPF_MOV, R3, 1),
1224 BPF_ALU64_IMM(BPF_MOV, R7, -1),
1225 BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
1226 BPF_EXIT_INSN(),
1227 BPF_ALU64_IMM(BPF_MOV, R5, 1),
1228 BPF_ALU64_REG(BPF_SUB, R5, R5),
1229 BPF_ALU64_REG(BPF_XOR, R6, R6),
1230 BPF_ALU64_IMM(BPF_MOV, R1, 1),
1231 BPF_ALU64_IMM(BPF_MOV, R8, -1),
1232 BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
1233 BPF_EXIT_INSN(),
1234 BPF_ALU64_REG(BPF_SUB, R6, R6),
1235 BPF_ALU64_REG(BPF_XOR, R7, R7),
1236 BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
1237 BPF_EXIT_INSN(),
1238 BPF_ALU64_REG(BPF_SUB, R7, R7),
1239 BPF_ALU64_REG(BPF_XOR, R8, R8),
1240 BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
1241 BPF_EXIT_INSN(),
1242 BPF_ALU64_REG(BPF_SUB, R8, R8),
1243 BPF_ALU64_REG(BPF_XOR, R9, R9),
1244 BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
1245 BPF_EXIT_INSN(),
1246 BPF_ALU64_REG(BPF_SUB, R9, R9),
1247 BPF_ALU64_REG(BPF_XOR, R0, R0),
1248 BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
1249 BPF_EXIT_INSN(),
1250 BPF_ALU64_REG(BPF_SUB, R1, R1),
1251 BPF_ALU64_REG(BPF_XOR, R0, R0),
1252 BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
1253 BPF_ALU64_IMM(BPF_MOV, R0, 0),
1254 BPF_EXIT_INSN(),
1255 BPF_ALU64_IMM(BPF_MOV, R0, 1),
1256 BPF_EXIT_INSN(),
1257 },
1258 INTERNAL,
1259 { },
1260 { { 0, 1 } }
1261 },
1262 { /* Mainly checking JIT here. */
1263 "INT: MUL",
1264 .u.insns_int = {
1265 BPF_ALU64_IMM(BPF_MOV, R0, 11),
1266 BPF_ALU64_IMM(BPF_MOV, R1, 1),
1267 BPF_ALU64_IMM(BPF_MOV, R2, 2),
1268 BPF_ALU64_IMM(BPF_MOV, R3, 3),
1269 BPF_ALU64_IMM(BPF_MOV, R4, 4),
1270 BPF_ALU64_IMM(BPF_MOV, R5, 5),
1271 BPF_ALU64_IMM(BPF_MOV, R6, 6),
1272 BPF_ALU64_IMM(BPF_MOV, R7, 7),
1273 BPF_ALU64_IMM(BPF_MOV, R8, 8),
1274 BPF_ALU64_IMM(BPF_MOV, R9, 9),
1275 BPF_ALU64_REG(BPF_MUL, R0, R0),
1276 BPF_ALU64_REG(BPF_MUL, R0, R1),
1277 BPF_ALU64_REG(BPF_MUL, R0, R2),
1278 BPF_ALU64_REG(BPF_MUL, R0, R3),
1279 BPF_ALU64_REG(BPF_MUL, R0, R4),
1280 BPF_ALU64_REG(BPF_MUL, R0, R5),
1281 BPF_ALU64_REG(BPF_MUL, R0, R6),
1282 BPF_ALU64_REG(BPF_MUL, R0, R7),
1283 BPF_ALU64_REG(BPF_MUL, R0, R8),
1284 BPF_ALU64_REG(BPF_MUL, R0, R9),
1285 BPF_ALU64_IMM(BPF_MUL, R0, 10),
1286 BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
1287 BPF_EXIT_INSN(),
1288 BPF_ALU64_REG(BPF_MUL, R1, R0),
1289 BPF_ALU64_REG(BPF_MUL, R1, R2),
1290 BPF_ALU64_REG(BPF_MUL, R1, R3),
1291 BPF_ALU64_REG(BPF_MUL, R1, R4),
1292 BPF_ALU64_REG(BPF_MUL, R1, R5),
1293 BPF_ALU64_REG(BPF_MUL, R1, R6),
1294 BPF_ALU64_REG(BPF_MUL, R1, R7),
1295 BPF_ALU64_REG(BPF_MUL, R1, R8),
1296 BPF_ALU64_REG(BPF_MUL, R1, R9),
1297 BPF_ALU64_IMM(BPF_MUL, R1, 10),
1298 BPF_ALU64_REG(BPF_MOV, R2, R1),
1299 BPF_ALU64_IMM(BPF_RSH, R2, 32),
1300 BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
1301 BPF_EXIT_INSN(),
1302 BPF_ALU64_IMM(BPF_LSH, R1, 32),
1303 BPF_ALU64_IMM(BPF_ARSH, R1, 32),
1304 BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
1305 BPF_EXIT_INSN(),
1306 BPF_ALU64_REG(BPF_MUL, R2, R0),
1307 BPF_ALU64_REG(BPF_MUL, R2, R1),
1308 BPF_ALU64_REG(BPF_MUL, R2, R3),
1309 BPF_ALU64_REG(BPF_MUL, R2, R4),
1310 BPF_ALU64_REG(BPF_MUL, R2, R5),
1311 BPF_ALU64_REG(BPF_MUL, R2, R6),
1312 BPF_ALU64_REG(BPF_MUL, R2, R7),
1313 BPF_ALU64_REG(BPF_MUL, R2, R8),
1314 BPF_ALU64_REG(BPF_MUL, R2, R9),
1315 BPF_ALU64_IMM(BPF_MUL, R2, 10),
1316 BPF_ALU64_IMM(BPF_RSH, R2, 32),
1317 BPF_ALU64_REG(BPF_MOV, R0, R2),
1318 BPF_EXIT_INSN(),
1319 },
1320 INTERNAL,
1321 { },
1322 { { 0, 0x35d97ef2 } }
1323 },
1324 {
1325 "INT: ALU MIX",
1326 .u.insns_int = {
1327 BPF_ALU64_IMM(BPF_MOV, R0, 11),
1328 BPF_ALU64_IMM(BPF_ADD, R0, -1),
1329 BPF_ALU64_IMM(BPF_MOV, R2, 2),
1330 BPF_ALU64_IMM(BPF_XOR, R2, 3),
1331 BPF_ALU64_REG(BPF_DIV, R0, R2),
1332 BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
1333 BPF_EXIT_INSN(),
1334 BPF_ALU64_IMM(BPF_MOD, R0, 3),
1335 BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
1336 BPF_EXIT_INSN(),
1337 BPF_ALU64_IMM(BPF_MOV, R0, -1),
1338 BPF_EXIT_INSN(),
1339 },
1340 INTERNAL,
1341 { },
1342 { { 0, -1 } }
1343 },
1344 {
1345 "INT: DIV + ABS",
1346 .u.insns_int = {
1347 BPF_ALU64_REG(BPF_MOV, R6, R1),
1348 BPF_LD_ABS(BPF_B, 3),
1349 BPF_ALU64_IMM(BPF_MOV, R2, 2),
1350 BPF_ALU32_REG(BPF_DIV, R0, R2),
1351 BPF_ALU64_REG(BPF_MOV, R8, R0),
1352 BPF_LD_ABS(BPF_B, 4),
1353 BPF_ALU64_REG(BPF_ADD, R8, R0),
1354 BPF_LD_IND(BPF_B, R8, -70),
1355 BPF_EXIT_INSN(),
1356 },
1357 INTERNAL,
1358 { 10, 20, 30, 40, 50 },
1359 { { 4, 0 }, { 5, 10 } }
1360 },
1361 {
1362 "INT: DIV by zero",
1363 .u.insns_int = {
1364 BPF_ALU64_REG(BPF_MOV, R6, R1),
1365 BPF_ALU64_IMM(BPF_MOV, R7, 0),
1366 BPF_LD_ABS(BPF_B, 3),
1367 BPF_ALU32_REG(BPF_DIV, R0, R7),
1368 BPF_EXIT_INSN(),
1369 },
1370 INTERNAL,
1371 { 10, 20, 30, 40, 50 },
1372 { { 3, 0 }, { 4, 0 } }
1373 },
1374 {
1375 "check: missing ret",
1376 .u.insns = {
1377 BPF_STMT(BPF_LD | BPF_IMM, 1),
1378 },
1379 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1380 { },
1381 { }
1382 },
1383 {
1384 "check: div_k_0",
1385 .u.insns = {
1386 BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
1387 BPF_STMT(BPF_RET | BPF_K, 0)
1388 },
1389 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1390 { },
1391 { }
1392 },
1393 {
1394 "check: unknown insn",
1395 .u.insns = {
1396 /* seccomp insn, rejected in socket filter */
1397 BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
1398 BPF_STMT(BPF_RET | BPF_K, 0)
1399 },
1400 CLASSIC | FLAG_EXPECTED_FAIL,
1401 { },
1402 { }
1403 },
1404 {
1405 "check: out of range spill/fill",
1406 .u.insns = {
1407 BPF_STMT(BPF_STX, 16),
1408 BPF_STMT(BPF_RET | BPF_K, 0)
1409 },
1410 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1411 { },
1412 { }
1413 },
1414 {
1415 "JUMPS + HOLES",
1416 .u.insns = {
1417 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1418 BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
1419 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1420 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1421 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1422 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1423 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1424 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1425 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1426 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1427 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1428 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1429 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1430 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1431 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1432 BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
1433 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1434 BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
1435 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1436 BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
1437 BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
1438 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1439 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1440 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1441 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1442 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1443 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1444 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1445 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1446 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1447 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1448 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1449 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1450 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1451 BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
1452 BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
1453 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1454 BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
1455 BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
1456 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1457 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1458 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1459 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1460 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1461 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1462 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1463 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1464 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1465 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1466 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1467 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1468 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1469 BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
1470 BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
1471 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1472 BPF_STMT(BPF_RET | BPF_A, 0),
1473 BPF_STMT(BPF_RET | BPF_A, 0),
1474 },
1475 CLASSIC,
1476 { 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8,
1477 0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4,
1478 0x08, 0x00,
1479 0x45, 0x00, 0x00, 0x28, 0x00, 0x00,
1480 0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */
1481 0xc0, 0xa8, 0x33, 0x01,
1482 0xc0, 0xa8, 0x33, 0x02,
1483 0xbb, 0xb6,
1484 0xa9, 0xfa,
1485 0x00, 0x14, 0x00, 0x00,
1486 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
1487 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
1488 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
1489 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
1490 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
1491 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
1492 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
1493 0xcc, 0xcc, 0xcc, 0xcc },
1494 { { 88, 0x001b } }
1495 },
1496 {
1497 "check: RET X",
1498 .u.insns = {
1499 BPF_STMT(BPF_RET | BPF_X, 0),
1500 },
1501 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1502 { },
1503 { },
1504 },
1505 {
1506 "check: LDX + RET X",
1507 .u.insns = {
1508 BPF_STMT(BPF_LDX | BPF_IMM, 42),
1509 BPF_STMT(BPF_RET | BPF_X, 0),
1510 },
1511 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1512 { },
1513 { },
1514 },
1515 { /* Mainly checking JIT here. */
1516 "M[]: alt STX + LDX",
1517 .u.insns = {
1518 BPF_STMT(BPF_LDX | BPF_IMM, 100),
1519 BPF_STMT(BPF_STX, 0),
1520 BPF_STMT(BPF_LDX | BPF_MEM, 0),
1521 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1522 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1523 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1524 BPF_STMT(BPF_STX, 1),
1525 BPF_STMT(BPF_LDX | BPF_MEM, 1),
1526 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1527 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1528 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1529 BPF_STMT(BPF_STX, 2),
1530 BPF_STMT(BPF_LDX | BPF_MEM, 2),
1531 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1532 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1533 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1534 BPF_STMT(BPF_STX, 3),
1535 BPF_STMT(BPF_LDX | BPF_MEM, 3),
1536 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1537 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1538 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1539 BPF_STMT(BPF_STX, 4),
1540 BPF_STMT(BPF_LDX | BPF_MEM, 4),
1541 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1542 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1543 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1544 BPF_STMT(BPF_STX, 5),
1545 BPF_STMT(BPF_LDX | BPF_MEM, 5),
1546 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1547 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1548 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1549 BPF_STMT(BPF_STX, 6),
1550 BPF_STMT(BPF_LDX | BPF_MEM, 6),
1551 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1552 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1553 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1554 BPF_STMT(BPF_STX, 7),
1555 BPF_STMT(BPF_LDX | BPF_MEM, 7),
1556 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1557 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1558 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1559 BPF_STMT(BPF_STX, 8),
1560 BPF_STMT(BPF_LDX | BPF_MEM, 8),
1561 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1562 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1563 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1564 BPF_STMT(BPF_STX, 9),
1565 BPF_STMT(BPF_LDX | BPF_MEM, 9),
1566 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1567 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1568 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1569 BPF_STMT(BPF_STX, 10),
1570 BPF_STMT(BPF_LDX | BPF_MEM, 10),
1571 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1572 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1573 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1574 BPF_STMT(BPF_STX, 11),
1575 BPF_STMT(BPF_LDX | BPF_MEM, 11),
1576 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1577 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1578 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1579 BPF_STMT(BPF_STX, 12),
1580 BPF_STMT(BPF_LDX | BPF_MEM, 12),
1581 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1582 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1583 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1584 BPF_STMT(BPF_STX, 13),
1585 BPF_STMT(BPF_LDX | BPF_MEM, 13),
1586 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1587 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1588 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1589 BPF_STMT(BPF_STX, 14),
1590 BPF_STMT(BPF_LDX | BPF_MEM, 14),
1591 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1592 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1593 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1594 BPF_STMT(BPF_STX, 15),
1595 BPF_STMT(BPF_LDX | BPF_MEM, 15),
1596 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1597 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1598 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1599 BPF_STMT(BPF_RET | BPF_A, 0),
1600 },
1601 CLASSIC | FLAG_NO_DATA,
1602 { },
1603 { { 0, 116 } },
1604 },
1605 { /* Mainly checking JIT here. */
1606 "M[]: full STX + full LDX",
1607 .u.insns = {
1608 BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
1609 BPF_STMT(BPF_STX, 0),
1610 BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
1611 BPF_STMT(BPF_STX, 1),
1612 BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
1613 BPF_STMT(BPF_STX, 2),
1614 BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
1615 BPF_STMT(BPF_STX, 3),
1616 BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
1617 BPF_STMT(BPF_STX, 4),
1618 BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
1619 BPF_STMT(BPF_STX, 5),
1620 BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
1621 BPF_STMT(BPF_STX, 6),
1622 BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
1623 BPF_STMT(BPF_STX, 7),
1624 BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
1625 BPF_STMT(BPF_STX, 8),
1626 BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
1627 BPF_STMT(BPF_STX, 9),
1628 BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
1629 BPF_STMT(BPF_STX, 10),
1630 BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
1631 BPF_STMT(BPF_STX, 11),
1632 BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
1633 BPF_STMT(BPF_STX, 12),
1634 BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
1635 BPF_STMT(BPF_STX, 13),
1636 BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
1637 BPF_STMT(BPF_STX, 14),
1638 BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
1639 BPF_STMT(BPF_STX, 15),
1640 BPF_STMT(BPF_LDX | BPF_MEM, 0),
1641 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1642 BPF_STMT(BPF_LDX | BPF_MEM, 1),
1643 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1644 BPF_STMT(BPF_LDX | BPF_MEM, 2),
1645 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1646 BPF_STMT(BPF_LDX | BPF_MEM, 3),
1647 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1648 BPF_STMT(BPF_LDX | BPF_MEM, 4),
1649 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1650 BPF_STMT(BPF_LDX | BPF_MEM, 5),
1651 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1652 BPF_STMT(BPF_LDX | BPF_MEM, 6),
1653 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1654 BPF_STMT(BPF_LDX | BPF_MEM, 7),
1655 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1656 BPF_STMT(BPF_LDX | BPF_MEM, 8),
1657 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1658 BPF_STMT(BPF_LDX | BPF_MEM, 9),
1659 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1660 BPF_STMT(BPF_LDX | BPF_MEM, 10),
1661 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1662 BPF_STMT(BPF_LDX | BPF_MEM, 11),
1663 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1664 BPF_STMT(BPF_LDX | BPF_MEM, 12),
1665 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1666 BPF_STMT(BPF_LDX | BPF_MEM, 13),
1667 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1668 BPF_STMT(BPF_LDX | BPF_MEM, 14),
1669 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1670 BPF_STMT(BPF_LDX | BPF_MEM, 15),
1671 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1672 BPF_STMT(BPF_RET | BPF_A, 0),
1673 },
1674 CLASSIC | FLAG_NO_DATA,
1675 { },
1676 { { 0, 0x2a5a5e5 } },
1677 },
1678 {
1679 "check: SKF_AD_MAX",
1680 .u.insns = {
1681 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
1682 SKF_AD_OFF + SKF_AD_MAX),
1683 BPF_STMT(BPF_RET | BPF_A, 0),
1684 },
1685 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1686 { },
1687 { },
1688 },
1689 { /* Passes checker but fails during runtime. */
1690 "LD [SKF_AD_OFF-1]",
1691 .u.insns = {
1692 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
1693 SKF_AD_OFF - 1),
1694 BPF_STMT(BPF_RET | BPF_K, 1),
1695 },
1696 CLASSIC,
1697 { },
1698 { { 1, 0 } },
1699 },
1700};
1701
1702static struct net_device dev;
1703
1704static struct sk_buff *populate_skb(char *buf, int size)
1705{
1706 struct sk_buff *skb;
1707
1708 if (size >= MAX_DATA)
1709 return NULL;
1710
1711 skb = alloc_skb(MAX_DATA, GFP_KERNEL);
1712 if (!skb)
1713 return NULL;
1714
1715 memcpy(__skb_put(skb, size), buf, size);
1716
1717 /* Initialize a fake skb with test pattern. */
1718 skb_reset_mac_header(skb);
1719 skb->protocol = htons(ETH_P_IP);
1720 skb->pkt_type = SKB_TYPE;
1721 skb->mark = SKB_MARK;
1722 skb->hash = SKB_HASH;
1723 skb->queue_mapping = SKB_QUEUE_MAP;
1724 skb->vlan_tci = SKB_VLAN_TCI;
1725 skb->dev = &dev;
1726 skb->dev->ifindex = SKB_DEV_IFINDEX;
1727 skb->dev->type = SKB_DEV_TYPE;
1728 skb_set_network_header(skb, min(size, ETH_HLEN));
1729
1730 return skb;
1731}
1732
1733static void *generate_test_data(struct bpf_test *test, int sub)
1734{
1735 if (test->aux & FLAG_NO_DATA)
1736 return NULL;
1737
1738 /* Test case expects an skb, so populate one. Various
1739 * subtests generate skbs of different sizes based on
1740 * the same data.
1741 */
1742 return populate_skb(test->data, test->test[sub].data_size);
1743}
1744
1745static void release_test_data(const struct bpf_test *test, void *data)
1746{
1747 if (test->aux & FLAG_NO_DATA)
1748 return;
1749
1750 kfree_skb(data);
1751}
1752
1753static int probe_filter_length(struct sock_filter *fp)
1754{
1755 int len = 0;
1756
1757 for (len = MAX_INSNS - 1; len > 0; --len)
1758 if (fp[len].code != 0 || fp[len].k != 0)
1759 break;
1760
1761 return len + 1;
1762}
1763
1764static struct sk_filter *generate_filter(int which, int *err)
1765{
1766 struct sk_filter *fp;
1767 struct sock_fprog_kern fprog;
1768 unsigned int flen = probe_filter_length(tests[which].u.insns);
1769 __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
1770
1771 switch (test_type) {
1772 case CLASSIC:
1773 fprog.filter = tests[which].u.insns;
1774 fprog.len = flen;
1775
1776 *err = sk_unattached_filter_create(&fp, &fprog);
1777 if (tests[which].aux & FLAG_EXPECTED_FAIL) {
1778 if (*err == -EINVAL) {
1779 pr_cont("PASS\n");
1780 /* Verifier rejected filter as expected. */
1781 *err = 0;
1782 return NULL;
1783 } else {
1784 pr_cont("UNEXPECTED_PASS\n");
1785 /* Verifier didn't reject the test that's
1786 * bad enough, just return!
1787 */
1788 *err = -EINVAL;
1789 return NULL;
1790 }
1791 }
1792 /* We don't expect to fail. */
1793 if (*err) {
1794 pr_cont("FAIL to attach err=%d len=%d\n",
1795 *err, fprog.len);
1796 return NULL;
1797 }
1798 break;
1799
1800 case INTERNAL:
1801 fp = kzalloc(sk_filter_size(flen), GFP_KERNEL);
1802 if (fp == NULL) {
1803 pr_cont("UNEXPECTED_FAIL no memory left\n");
1804 *err = -ENOMEM;
1805 return NULL;
1806 }
1807
1808 fp->len = flen;
1809 memcpy(fp->insnsi, tests[which].u.insns_int,
1810 fp->len * sizeof(struct sock_filter_int));
1811
1812 sk_filter_select_runtime(fp);
1813 break;
1814 }
1815
1816 *err = 0;
1817 return fp;
1818}
1819
1820static void release_filter(struct sk_filter *fp, int which)
1821{
1822 __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
1823
1824 switch (test_type) {
1825 case CLASSIC:
1826 sk_unattached_filter_destroy(fp);
1827 break;
1828 case INTERNAL:
1829 sk_filter_free(fp);
1830 break;
1831 }
1832}
1833
1834static int __run_one(const struct sk_filter *fp, const void *data,
1835 int runs, u64 *duration)
1836{
1837 u64 start, finish;
1838 int ret, i;
1839
1840 start = ktime_to_us(ktime_get());
1841
1842 for (i = 0; i < runs; i++)
1843 ret = SK_RUN_FILTER(fp, data);
1844
1845 finish = ktime_to_us(ktime_get());
1846
1847 *duration = (finish - start) * 1000ULL;
1848 do_div(*duration, runs);
1849
1850 return ret;
1851}
1852
1853static int run_one(const struct sk_filter *fp, struct bpf_test *test)
1854{
1855 int err_cnt = 0, i, runs = MAX_TESTRUNS;
1856
1857 for (i = 0; i < MAX_SUBTESTS; i++) {
1858 void *data;
1859 u64 duration;
1860 u32 ret;
1861
1862 if (test->test[i].data_size == 0 &&
1863 test->test[i].result == 0)
1864 break;
1865
1866 data = generate_test_data(test, i);
1867 ret = __run_one(fp, data, runs, &duration);
1868 release_test_data(test, data);
1869
1870 if (ret == test->test[i].result) {
1871 pr_cont("%lld ", duration);
1872 } else {
1873 pr_cont("ret %d != %d ", ret,
1874 test->test[i].result);
1875 err_cnt++;
1876 }
1877 }
1878
1879 return err_cnt;
1880}
1881
1882static __init int test_bpf(void)
1883{
1884 int i, err_cnt = 0, pass_cnt = 0;
1885
1886 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1887 struct sk_filter *fp;
1888 int err;
1889
1890 pr_info("#%d %s ", i, tests[i].descr);
1891
1892 fp = generate_filter(i, &err);
1893 if (fp == NULL) {
1894 if (err == 0) {
1895 pass_cnt++;
1896 continue;
1897 }
1898
1899 return err;
1900 }
1901 err = run_one(fp, &tests[i]);
1902 release_filter(fp, i);
1903
1904 if (err) {
1905 pr_cont("FAIL (%d times)\n", err);
1906 err_cnt++;
1907 } else {
1908 pr_cont("PASS\n");
1909 pass_cnt++;
1910 }
1911 }
1912
1913 pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
1914 return err_cnt ? -EINVAL : 0;
1915}
1916
1917static int __init test_bpf_init(void)
1918{
1919 return test_bpf();
1920}
1921
1922static void __exit test_bpf_exit(void)
1923{
1924}
1925
1926module_init(test_bpf_init);
1927module_exit(test_bpf_exit);
1928
1929MODULE_LICENSE("GPL");
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 3c32bd257b73..9012b1c922b6 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -63,7 +63,7 @@ bool vlan_do_receive(struct sk_buff **skbp)
63} 63}
64 64
65/* Must be invoked with rcu_read_lock. */ 65/* Must be invoked with rcu_read_lock. */
66struct net_device *__vlan_find_dev_deep(struct net_device *dev, 66struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
67 __be16 vlan_proto, u16 vlan_id) 67 __be16 vlan_proto, u16 vlan_id)
68{ 68{
69 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info); 69 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
@@ -81,13 +81,13 @@ struct net_device *__vlan_find_dev_deep(struct net_device *dev,
81 81
82 upper_dev = netdev_master_upper_dev_get_rcu(dev); 82 upper_dev = netdev_master_upper_dev_get_rcu(dev);
83 if (upper_dev) 83 if (upper_dev)
84 return __vlan_find_dev_deep(upper_dev, 84 return __vlan_find_dev_deep_rcu(upper_dev,
85 vlan_proto, vlan_id); 85 vlan_proto, vlan_id);
86 } 86 }
87 87
88 return NULL; 88 return NULL;
89} 89}
90EXPORT_SYMBOL(__vlan_find_dev_deep); 90EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
91 91
92struct net_device *vlan_dev_real_dev(const struct net_device *dev) 92struct net_device *vlan_dev_real_dev(const struct net_device *dev)
93{ 93{
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 019efb79708f..ad2ac3c00398 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -643,9 +643,9 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
643 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 643 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
644 netdev_features_t old_features = features; 644 netdev_features_t old_features = features;
645 645
646 features &= real_dev->vlan_features; 646 features = netdev_intersect_features(features, real_dev->vlan_features);
647 features |= NETIF_F_RXCSUM; 647 features |= NETIF_F_RXCSUM;
648 features &= real_dev->features; 648 features = netdev_intersect_features(features, real_dev->features);
649 649
650 features |= old_features & NETIF_F_SOFT_FEATURES; 650 features |= old_features & NETIF_F_SOFT_FEATURES;
651 features |= NETIF_F_LLTX; 651 features |= NETIF_F_LLTX;
@@ -671,38 +671,36 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
671 671
672static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 672static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
673{ 673{
674 struct vlan_pcpu_stats *p;
675 u32 rx_errors = 0, tx_dropped = 0;
676 int i;
674 677
675 if (vlan_dev_priv(dev)->vlan_pcpu_stats) { 678 for_each_possible_cpu(i) {
676 struct vlan_pcpu_stats *p; 679 u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
677 u32 rx_errors = 0, tx_dropped = 0; 680 unsigned int start;
678 int i; 681
679 682 p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
680 for_each_possible_cpu(i) { 683 do {
681 u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes; 684 start = u64_stats_fetch_begin_irq(&p->syncp);
682 unsigned int start; 685 rxpackets = p->rx_packets;
683 686 rxbytes = p->rx_bytes;
684 p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); 687 rxmulticast = p->rx_multicast;
685 do { 688 txpackets = p->tx_packets;
686 start = u64_stats_fetch_begin_irq(&p->syncp); 689 txbytes = p->tx_bytes;
687 rxpackets = p->rx_packets; 690 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
688 rxbytes = p->rx_bytes; 691
689 rxmulticast = p->rx_multicast; 692 stats->rx_packets += rxpackets;
690 txpackets = p->tx_packets; 693 stats->rx_bytes += rxbytes;
691 txbytes = p->tx_bytes; 694 stats->multicast += rxmulticast;
692 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 695 stats->tx_packets += txpackets;
693 696 stats->tx_bytes += txbytes;
694 stats->rx_packets += rxpackets; 697 /* rx_errors & tx_dropped are u32 */
695 stats->rx_bytes += rxbytes; 698 rx_errors += p->rx_errors;
696 stats->multicast += rxmulticast; 699 tx_dropped += p->tx_dropped;
697 stats->tx_packets += txpackets;
698 stats->tx_bytes += txbytes;
699 /* rx_errors & tx_dropped are u32 */
700 rx_errors += p->rx_errors;
701 tx_dropped += p->tx_dropped;
702 }
703 stats->rx_errors = rx_errors;
704 stats->tx_dropped = tx_dropped;
705 } 700 }
701 stats->rx_errors = rx_errors;
702 stats->tx_dropped = tx_dropped;
703
706 return stats; 704 return stats;
707} 705}
708 706
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 786ee2f83d5f..01a1082e02b3 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1669,7 +1669,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1669 goto out; 1669 goto out;
1670 } 1670 }
1671 1671
1672 if (sk->sk_no_check == 1) 1672 if (sk->sk_no_check_tx)
1673 ddp->deh_sum = 0; 1673 ddp->deh_sum = 0;
1674 else 1674 else
1675 ddp->deh_sum = atalk_checksum(skb, len + sizeof(*ddp)); 1675 ddp->deh_sum = atalk_checksum(skb, len + sizeof(*ddp));
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 1281049c135f..d8e5d0c2ebbc 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -263,17 +263,11 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
263 goto out; 263 goto out;
264 } 264 }
265 } 265 }
266/* 266
267 * Not supported yet
268 *
269 * #ifndef CONFIG_SINGLE_SIGITF
270 */
271 vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp); 267 vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp);
272 vcc->qos.txtp.pcr = 0; 268 vcc->qos.txtp.pcr = 0;
273 vcc->qos.txtp.min_pcr = 0; 269 vcc->qos.txtp.min_pcr = 0;
274/* 270
275 * #endif
276 */
277 error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci); 271 error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci);
278 if (!error) 272 if (!error)
279 sock->state = SS_CONNECTED; 273 sock->state = SS_CONNECTED;
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index b758881be108..a12e25efaf6f 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -245,6 +245,7 @@ static int batadv_algorithms_open(struct inode *inode, struct file *file)
245static int batadv_originators_open(struct inode *inode, struct file *file) 245static int batadv_originators_open(struct inode *inode, struct file *file)
246{ 246{
247 struct net_device *net_dev = (struct net_device *)inode->i_private; 247 struct net_device *net_dev = (struct net_device *)inode->i_private;
248
248 return single_open(file, batadv_orig_seq_print_text, net_dev); 249 return single_open(file, batadv_orig_seq_print_text, net_dev);
249} 250}
250 251
@@ -258,18 +259,21 @@ static int batadv_originators_hardif_open(struct inode *inode,
258 struct file *file) 259 struct file *file)
259{ 260{
260 struct net_device *net_dev = (struct net_device *)inode->i_private; 261 struct net_device *net_dev = (struct net_device *)inode->i_private;
262
261 return single_open(file, batadv_orig_hardif_seq_print_text, net_dev); 263 return single_open(file, batadv_orig_hardif_seq_print_text, net_dev);
262} 264}
263 265
264static int batadv_gateways_open(struct inode *inode, struct file *file) 266static int batadv_gateways_open(struct inode *inode, struct file *file)
265{ 267{
266 struct net_device *net_dev = (struct net_device *)inode->i_private; 268 struct net_device *net_dev = (struct net_device *)inode->i_private;
269
267 return single_open(file, batadv_gw_client_seq_print_text, net_dev); 270 return single_open(file, batadv_gw_client_seq_print_text, net_dev);
268} 271}
269 272
270static int batadv_transtable_global_open(struct inode *inode, struct file *file) 273static int batadv_transtable_global_open(struct inode *inode, struct file *file)
271{ 274{
272 struct net_device *net_dev = (struct net_device *)inode->i_private; 275 struct net_device *net_dev = (struct net_device *)inode->i_private;
276
273 return single_open(file, batadv_tt_global_seq_print_text, net_dev); 277 return single_open(file, batadv_tt_global_seq_print_text, net_dev);
274} 278}
275 279
@@ -277,6 +281,7 @@ static int batadv_transtable_global_open(struct inode *inode, struct file *file)
277static int batadv_bla_claim_table_open(struct inode *inode, struct file *file) 281static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
278{ 282{
279 struct net_device *net_dev = (struct net_device *)inode->i_private; 283 struct net_device *net_dev = (struct net_device *)inode->i_private;
284
280 return single_open(file, batadv_bla_claim_table_seq_print_text, 285 return single_open(file, batadv_bla_claim_table_seq_print_text,
281 net_dev); 286 net_dev);
282} 287}
@@ -285,6 +290,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
285 struct file *file) 290 struct file *file)
286{ 291{
287 struct net_device *net_dev = (struct net_device *)inode->i_private; 292 struct net_device *net_dev = (struct net_device *)inode->i_private;
293
288 return single_open(file, batadv_bla_backbone_table_seq_print_text, 294 return single_open(file, batadv_bla_backbone_table_seq_print_text,
289 net_dev); 295 net_dev);
290} 296}
@@ -300,6 +306,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
300static int batadv_dat_cache_open(struct inode *inode, struct file *file) 306static int batadv_dat_cache_open(struct inode *inode, struct file *file)
301{ 307{
302 struct net_device *net_dev = (struct net_device *)inode->i_private; 308 struct net_device *net_dev = (struct net_device *)inode->i_private;
309
303 return single_open(file, batadv_dat_cache_seq_print_text, net_dev); 310 return single_open(file, batadv_dat_cache_seq_print_text, net_dev);
304} 311}
305#endif 312#endif
@@ -307,6 +314,7 @@ static int batadv_dat_cache_open(struct inode *inode, struct file *file)
307static int batadv_transtable_local_open(struct inode *inode, struct file *file) 314static int batadv_transtable_local_open(struct inode *inode, struct file *file)
308{ 315{
309 struct net_device *net_dev = (struct net_device *)inode->i_private; 316 struct net_device *net_dev = (struct net_device *)inode->i_private;
317
310 return single_open(file, batadv_tt_local_seq_print_text, net_dev); 318 return single_open(file, batadv_tt_local_seq_print_text, net_dev);
311} 319}
312 320
@@ -319,6 +327,7 @@ struct batadv_debuginfo {
319static int batadv_nc_nodes_open(struct inode *inode, struct file *file) 327static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
320{ 328{
321 struct net_device *net_dev = (struct net_device *)inode->i_private; 329 struct net_device *net_dev = (struct net_device *)inode->i_private;
330
322 return single_open(file, batadv_nc_nodes_seq_print_text, net_dev); 331 return single_open(file, batadv_nc_nodes_seq_print_text, net_dev);
323} 332}
324#endif 333#endif
@@ -333,7 +342,7 @@ struct batadv_debuginfo batadv_debuginfo_##_name = { \
333 .llseek = seq_lseek, \ 342 .llseek = seq_lseek, \
334 .release = single_release, \ 343 .release = single_release, \
335 } \ 344 } \
336}; 345}
337 346
338/* the following attributes are general and therefore they will be directly 347/* the following attributes are general and therefore they will be directly
339 * placed in the BATADV_DEBUGFS_SUBDIR subdirectory of debugfs 348 * placed in the BATADV_DEBUGFS_SUBDIR subdirectory of debugfs
@@ -395,7 +404,7 @@ struct batadv_debuginfo batadv_hardif_debuginfo_##_name = { \
395 .llseek = seq_lseek, \ 404 .llseek = seq_lseek, \
396 .release = single_release, \ 405 .release = single_release, \
397 }, \ 406 }, \
398}; 407}
399static BATADV_HARDIF_DEBUGINFO(originators, S_IRUGO, 408static BATADV_HARDIF_DEBUGINFO(originators, S_IRUGO,
400 batadv_originators_hardif_open); 409 batadv_originators_hardif_open);
401 410
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index aa5d4946d0d7..f2c066b21716 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -594,7 +594,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
594 if (!neigh_node) 594 if (!neigh_node)
595 goto free_orig; 595 goto free_orig;
596 596
597 tmp_skb = pskb_copy(skb, GFP_ATOMIC); 597 tmp_skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
598 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb, 598 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb,
599 cand[i].orig_node, 599 cand[i].orig_node,
600 packet_subtype)) { 600 packet_subtype)) {
@@ -662,6 +662,7 @@ static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
662void batadv_dat_status_update(struct net_device *net_dev) 662void batadv_dat_status_update(struct net_device *net_dev)
663{ 663{
664 struct batadv_priv *bat_priv = netdev_priv(net_dev); 664 struct batadv_priv *bat_priv = netdev_priv(net_dev);
665
665 batadv_dat_tvlv_container_update(bat_priv); 666 batadv_dat_tvlv_container_update(bat_priv);
666} 667}
667 668
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 770dc890ceef..118b990bae25 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -24,7 +24,7 @@
24#define BATADV_DRIVER_DEVICE "batman-adv" 24#define BATADV_DRIVER_DEVICE "batman-adv"
25 25
26#ifndef BATADV_SOURCE_VERSION 26#ifndef BATADV_SOURCE_VERSION
27#define BATADV_SOURCE_VERSION "2014.2.0" 27#define BATADV_SOURCE_VERSION "2014.3.0"
28#endif 28#endif
29 29
30/* B.A.T.M.A.N. parameters */ 30/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index a9546fe541eb..8d04d174669e 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -86,6 +86,7 @@ static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
86void batadv_nc_status_update(struct net_device *net_dev) 86void batadv_nc_status_update(struct net_device *net_dev)
87{ 87{
88 struct batadv_priv *bat_priv = netdev_priv(net_dev); 88 struct batadv_priv *bat_priv = netdev_priv(net_dev);
89
89 batadv_nc_tvlv_container_update(bat_priv); 90 batadv_nc_tvlv_container_update(bat_priv);
90} 91}
91 92
@@ -1343,7 +1344,7 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
1343 struct ethhdr *ethhdr; 1344 struct ethhdr *ethhdr;
1344 1345
1345 /* Copy skb header to change the mac header */ 1346 /* Copy skb header to change the mac header */
1346 skb = pskb_copy(skb, GFP_ATOMIC); 1347 skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
1347 if (!skb) 1348 if (!skb)
1348 return; 1349 return;
1349 1350
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 744a59b85e15..e7ee65dc20bf 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -884,7 +884,7 @@ static void batadv_softif_init_early(struct net_device *dev)
884 /* generate random address */ 884 /* generate random address */
885 eth_hw_addr_random(dev); 885 eth_hw_addr_random(dev);
886 886
887 SET_ETHTOOL_OPS(dev, &batadv_ethtool_ops); 887 dev->ethtool_ops = &batadv_ethtool_ops;
888 888
889 memset(priv, 0, sizeof(*priv)); 889 memset(priv, 0, sizeof(*priv));
890} 890}
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 1ebb0d9e2ea5..fc47baa888c5 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -29,12 +29,14 @@
29static struct net_device *batadv_kobj_to_netdev(struct kobject *obj) 29static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
30{ 30{
31 struct device *dev = container_of(obj->parent, struct device, kobj); 31 struct device *dev = container_of(obj->parent, struct device, kobj);
32
32 return to_net_dev(dev); 33 return to_net_dev(dev);
33} 34}
34 35
35static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj) 36static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
36{ 37{
37 struct net_device *net_dev = batadv_kobj_to_netdev(obj); 38 struct net_device *net_dev = batadv_kobj_to_netdev(obj);
39
38 return netdev_priv(net_dev); 40 return netdev_priv(net_dev);
39} 41}
40 42
@@ -106,7 +108,7 @@ struct batadv_attribute batadv_attr_vlan_##_name = { \
106 .mode = _mode }, \ 108 .mode = _mode }, \
107 .show = _show, \ 109 .show = _show, \
108 .store = _store, \ 110 .store = _store, \
109}; 111}
110 112
111/* Use this, if you have customized show and store functions */ 113/* Use this, if you have customized show and store functions */
112#define BATADV_ATTR(_name, _mode, _show, _store) \ 114#define BATADV_ATTR(_name, _mode, _show, _store) \
@@ -115,7 +117,7 @@ struct batadv_attribute batadv_attr_##_name = { \
115 .mode = _mode }, \ 117 .mode = _mode }, \
116 .show = _show, \ 118 .show = _show, \
117 .store = _store, \ 119 .store = _store, \
118}; 120}
119 121
120#define BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \ 122#define BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \
121ssize_t batadv_store_##_name(struct kobject *kobj, \ 123ssize_t batadv_store_##_name(struct kobject *kobj, \
@@ -124,6 +126,7 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
124{ \ 126{ \
125 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \ 127 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
126 struct batadv_priv *bat_priv = netdev_priv(net_dev); \ 128 struct batadv_priv *bat_priv = netdev_priv(net_dev); \
129 \
127 return __batadv_store_bool_attr(buff, count, _post_func, attr, \ 130 return __batadv_store_bool_attr(buff, count, _post_func, attr, \
128 &bat_priv->_name, net_dev); \ 131 &bat_priv->_name, net_dev); \
129} 132}
@@ -133,6 +136,7 @@ ssize_t batadv_show_##_name(struct kobject *kobj, \
133 struct attribute *attr, char *buff) \ 136 struct attribute *attr, char *buff) \
134{ \ 137{ \
135 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \ 138 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
139 \
136 return sprintf(buff, "%s\n", \ 140 return sprintf(buff, "%s\n", \
137 atomic_read(&bat_priv->_name) == 0 ? \ 141 atomic_read(&bat_priv->_name) == 0 ? \
138 "disabled" : "enabled"); \ 142 "disabled" : "enabled"); \
@@ -155,6 +159,7 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
155{ \ 159{ \
156 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \ 160 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
157 struct batadv_priv *bat_priv = netdev_priv(net_dev); \ 161 struct batadv_priv *bat_priv = netdev_priv(net_dev); \
162 \
158 return __batadv_store_uint_attr(buff, count, _min, _max, \ 163 return __batadv_store_uint_attr(buff, count, _min, _max, \
159 _post_func, attr, \ 164 _post_func, attr, \
160 &bat_priv->_name, net_dev); \ 165 &bat_priv->_name, net_dev); \
@@ -165,6 +170,7 @@ ssize_t batadv_show_##_name(struct kobject *kobj, \
165 struct attribute *attr, char *buff) \ 170 struct attribute *attr, char *buff) \
166{ \ 171{ \
167 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \ 172 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
173 \
168 return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \ 174 return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
169} \ 175} \
170 176
@@ -188,6 +194,7 @@ ssize_t batadv_store_vlan_##_name(struct kobject *kobj, \
188 size_t res = __batadv_store_bool_attr(buff, count, _post_func, \ 194 size_t res = __batadv_store_bool_attr(buff, count, _post_func, \
189 attr, &vlan->_name, \ 195 attr, &vlan->_name, \
190 bat_priv->soft_iface); \ 196 bat_priv->soft_iface); \
197 \
191 batadv_softif_vlan_free_ref(vlan); \ 198 batadv_softif_vlan_free_ref(vlan); \
192 return res; \ 199 return res; \
193} 200}
@@ -202,6 +209,7 @@ ssize_t batadv_show_vlan_##_name(struct kobject *kobj, \
202 size_t res = sprintf(buff, "%s\n", \ 209 size_t res = sprintf(buff, "%s\n", \
203 atomic_read(&vlan->_name) == 0 ? \ 210 atomic_read(&vlan->_name) == 0 ? \
204 "disabled" : "enabled"); \ 211 "disabled" : "enabled"); \
212 \
205 batadv_softif_vlan_free_ref(vlan); \ 213 batadv_softif_vlan_free_ref(vlan); \
206 return res; \ 214 return res; \
207} 215}
@@ -324,12 +332,14 @@ static ssize_t batadv_show_bat_algo(struct kobject *kobj,
324 struct attribute *attr, char *buff) 332 struct attribute *attr, char *buff)
325{ 333{
326 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); 334 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
335
327 return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name); 336 return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
328} 337}
329 338
330static void batadv_post_gw_reselect(struct net_device *net_dev) 339static void batadv_post_gw_reselect(struct net_device *net_dev)
331{ 340{
332 struct batadv_priv *bat_priv = netdev_priv(net_dev); 341 struct batadv_priv *bat_priv = netdev_priv(net_dev);
342
333 batadv_gw_reselect(bat_priv); 343 batadv_gw_reselect(bat_priv);
334} 344}
335 345
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 73492b91105a..8796ffa08b43 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -420,12 +420,18 @@ static int conn_send(struct l2cap_conn *conn,
420 return 0; 420 return 0;
421} 421}
422 422
423static void get_dest_bdaddr(struct in6_addr *ip6_daddr, 423static u8 get_addr_type_from_eui64(u8 byte)
424 bdaddr_t *addr, u8 *addr_type)
425{ 424{
426 u8 *eui64; 425 /* Is universal(0) or local(1) bit, */
426 if (byte & 0x02)
427 return ADDR_LE_DEV_RANDOM;
427 428
428 eui64 = ip6_daddr->s6_addr + 8; 429 return ADDR_LE_DEV_PUBLIC;
430}
431
432static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
433{
434 u8 *eui64 = ip6_daddr->s6_addr + 8;
429 435
430 addr->b[0] = eui64[7]; 436 addr->b[0] = eui64[7];
431 addr->b[1] = eui64[6]; 437 addr->b[1] = eui64[6];
@@ -433,16 +439,19 @@ static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
433 addr->b[3] = eui64[2]; 439 addr->b[3] = eui64[2];
434 addr->b[4] = eui64[1]; 440 addr->b[4] = eui64[1];
435 addr->b[5] = eui64[0]; 441 addr->b[5] = eui64[0];
442}
436 443
437 addr->b[5] ^= 2; 444static void convert_dest_bdaddr(struct in6_addr *ip6_daddr,
445 bdaddr_t *addr, u8 *addr_type)
446{
447 copy_to_bdaddr(ip6_daddr, addr);
438 448
439 /* Set universal/local bit to 0 */ 449 /* We need to toggle the U/L bit that we got from IPv6 address
440 if (addr->b[5] & 1) { 450 * so that we get the proper address and type of the BD address.
441 addr->b[5] &= ~1; 451 */
442 *addr_type = ADDR_LE_DEV_PUBLIC; 452 addr->b[5] ^= 0x02;
443 } else { 453
444 *addr_type = ADDR_LE_DEV_RANDOM; 454 *addr_type = get_addr_type_from_eui64(addr->b[5]);
445 }
446} 455}
447 456
448static int header_create(struct sk_buff *skb, struct net_device *netdev, 457static int header_create(struct sk_buff *skb, struct net_device *netdev,
@@ -473,9 +482,11 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
473 /* Get destination BT device from skb. 482 /* Get destination BT device from skb.
474 * If there is no such peer then discard the packet. 483 * If there is no such peer then discard the packet.
475 */ 484 */
476 get_dest_bdaddr(&hdr->daddr, &addr, &addr_type); 485 convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
477 486
478 BT_DBG("dest addr %pMR type %d", &addr, addr_type); 487 BT_DBG("dest addr %pMR type %s IP %pI6c", &addr,
488 addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
489 &hdr->daddr);
479 490
480 read_lock_irqsave(&devices_lock, flags); 491 read_lock_irqsave(&devices_lock, flags);
481 peer = peer_lookup_ba(dev, &addr, addr_type); 492 peer = peer_lookup_ba(dev, &addr, addr_type);
@@ -556,7 +567,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
556 } else { 567 } else {
557 unsigned long flags; 568 unsigned long flags;
558 569
559 get_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type); 570 convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
560 eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8; 571 eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
561 dev = lowpan_dev(netdev); 572 dev = lowpan_dev(netdev);
562 573
@@ -564,8 +575,10 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
564 peer = peer_lookup_ba(dev, &addr, addr_type); 575 peer = peer_lookup_ba(dev, &addr, addr_type);
565 read_unlock_irqrestore(&devices_lock, flags); 576 read_unlock_irqrestore(&devices_lock, flags);
566 577
567 BT_DBG("xmit from %s to %pMR (%pI6c) peer %p", netdev->name, 578 BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p",
568 &addr, &lowpan_cb(skb)->addr, peer); 579 netdev->name, &addr,
580 addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
581 &lowpan_cb(skb)->addr, peer);
569 582
570 if (peer && peer->conn) 583 if (peer && peer->conn)
571 err = send_pkt(peer->conn, netdev->dev_addr, 584 err = send_pkt(peer->conn, netdev->dev_addr,
@@ -620,13 +633,13 @@ static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
620 eui[6] = addr[1]; 633 eui[6] = addr[1];
621 eui[7] = addr[0]; 634 eui[7] = addr[0];
622 635
623 eui[0] ^= 2; 636 /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
624
625 /* Universal/local bit set, RFC 4291 */
626 if (addr_type == ADDR_LE_DEV_PUBLIC) 637 if (addr_type == ADDR_LE_DEV_PUBLIC)
627 eui[0] |= 1; 638 eui[0] &= ~0x02;
628 else 639 else
629 eui[0] &= ~1; 640 eui[0] |= 0x02;
641
642 BT_DBG("type %d addr %*phC", addr_type, 8, eui);
630} 643}
631 644
632static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr, 645static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
@@ -634,7 +647,6 @@ static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
634{ 647{
635 netdev->addr_assign_type = NET_ADDR_PERM; 648 netdev->addr_assign_type = NET_ADDR_PERM;
636 set_addr(netdev->dev_addr, addr->b, addr_type); 649 set_addr(netdev->dev_addr, addr->b, addr_type);
637 netdev->dev_addr[0] ^= 2;
638} 650}
639 651
640static void ifup(struct net_device *netdev) 652static void ifup(struct net_device *netdev)
@@ -684,13 +696,6 @@ static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
684 696
685 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8, 697 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
686 EUI64_ADDR_LEN); 698 EUI64_ADDR_LEN);
687 peer->eui64_addr[0] ^= 2; /* second bit-flip (Universe/Local)
688 * is done according RFC2464
689 */
690
691 raw_dump_inline(__func__, "peer IPv6 address",
692 (unsigned char *)&peer->peer_addr, 16);
693 raw_dump_inline(__func__, "peer EUI64 address", peer->eui64_addr, 8);
694 699
695 write_lock_irqsave(&devices_lock, flags); 700 write_lock_irqsave(&devices_lock, flags);
696 INIT_LIST_HEAD(&peer->list); 701 INIT_LIST_HEAD(&peer->list);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 521fd4f3985e..8671bc79a35b 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -28,6 +28,7 @@
28 28
29#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h>
31 32
32#include "smp.h" 33#include "smp.h"
33#include "a2mp.h" 34#include "a2mp.h"
@@ -367,9 +368,23 @@ static void le_conn_timeout(struct work_struct *work)
367{ 368{
368 struct hci_conn *conn = container_of(work, struct hci_conn, 369 struct hci_conn *conn = container_of(work, struct hci_conn,
369 le_conn_timeout.work); 370 le_conn_timeout.work);
371 struct hci_dev *hdev = conn->hdev;
370 372
371 BT_DBG(""); 373 BT_DBG("");
372 374
375 /* We could end up here due to having done directed advertising,
376 * so clean up the state if necessary. This should however only
377 * happen with broken hardware or if low duty cycle was used
378 * (which doesn't have a timeout of its own).
379 */
380 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
381 u8 enable = 0x00;
382 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
383 &enable);
384 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
385 return;
386 }
387
373 hci_le_create_connection_cancel(conn); 388 hci_le_create_connection_cancel(conn);
374} 389}
375 390
@@ -393,6 +408,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
393 conn->io_capability = hdev->io_capability; 408 conn->io_capability = hdev->io_capability;
394 conn->remote_auth = 0xff; 409 conn->remote_auth = 0xff;
395 conn->key_type = 0xff; 410 conn->key_type = 0xff;
411 conn->tx_power = HCI_TX_POWER_INVALID;
412 conn->max_tx_power = HCI_TX_POWER_INVALID;
396 413
397 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 414 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
398 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 415 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -401,6 +418,10 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
401 case ACL_LINK: 418 case ACL_LINK:
402 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; 419 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
403 break; 420 break;
421 case LE_LINK:
422 /* conn->src should reflect the local identity address */
423 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
424 break;
404 case SCO_LINK: 425 case SCO_LINK:
405 if (lmp_esco_capable(hdev)) 426 if (lmp_esco_capable(hdev))
406 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 427 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
@@ -545,6 +566,11 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
545 * favor of connection establishment, we should restart it. 566 * favor of connection establishment, we should restart it.
546 */ 567 */
547 hci_update_background_scan(hdev); 568 hci_update_background_scan(hdev);
569
570 /* Re-enable advertising in case this was a failed connection
571 * attempt as a peripheral.
572 */
573 mgmt_reenable_advertising(hdev);
548} 574}
549 575
550static void create_le_conn_complete(struct hci_dev *hdev, u8 status) 576static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
@@ -605,6 +631,45 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
605 conn->state = BT_CONNECT; 631 conn->state = BT_CONNECT;
606} 632}
607 633
634static void hci_req_directed_advertising(struct hci_request *req,
635 struct hci_conn *conn)
636{
637 struct hci_dev *hdev = req->hdev;
638 struct hci_cp_le_set_adv_param cp;
639 u8 own_addr_type;
640 u8 enable;
641
642 enable = 0x00;
643 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
644
645 /* Clear the HCI_ADVERTISING bit temporarily so that the
646 * hci_update_random_address knows that it's safe to go ahead
647 * and write a new random address. The flag will be set back on
648 * as soon as the SET_ADV_ENABLE HCI command completes.
649 */
650 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
651
652 /* Set require_privacy to false so that the remote device has a
653 * chance of identifying us.
654 */
655 if (hci_update_random_address(req, false, &own_addr_type) < 0)
656 return;
657
658 memset(&cp, 0, sizeof(cp));
659 cp.type = LE_ADV_DIRECT_IND;
660 cp.own_address_type = own_addr_type;
661 cp.direct_addr_type = conn->dst_type;
662 bacpy(&cp.direct_addr, &conn->dst);
663 cp.channel_map = hdev->le_adv_channel_map;
664
665 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
666
667 enable = 0x01;
668 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
669
670 conn->state = BT_CONNECT;
671}
672
608struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 673struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
609 u8 dst_type, u8 sec_level, u8 auth_type) 674 u8 dst_type, u8 sec_level, u8 auth_type)
610{ 675{
@@ -614,9 +679,6 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
614 struct hci_request req; 679 struct hci_request req;
615 int err; 680 int err;
616 681
617 if (test_bit(HCI_ADVERTISING, &hdev->flags))
618 return ERR_PTR(-ENOTSUPP);
619
620 /* Some devices send ATT messages as soon as the physical link is 682 /* Some devices send ATT messages as soon as the physical link is
621 * established. To be able to handle these ATT messages, the user- 683 * established. To be able to handle these ATT messages, the user-
622 * space first establishes the connection and then starts the pairing 684 * space first establishes the connection and then starts the pairing
@@ -664,13 +726,20 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
664 return ERR_PTR(-ENOMEM); 726 return ERR_PTR(-ENOMEM);
665 727
666 conn->dst_type = dst_type; 728 conn->dst_type = dst_type;
667
668 conn->out = true;
669 conn->link_mode |= HCI_LM_MASTER;
670 conn->sec_level = BT_SECURITY_LOW; 729 conn->sec_level = BT_SECURITY_LOW;
671 conn->pending_sec_level = sec_level; 730 conn->pending_sec_level = sec_level;
672 conn->auth_type = auth_type; 731 conn->auth_type = auth_type;
673 732
733 hci_req_init(&req, hdev);
734
735 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
736 hci_req_directed_advertising(&req, conn);
737 goto create_conn;
738 }
739
740 conn->out = true;
741 conn->link_mode |= HCI_LM_MASTER;
742
674 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 743 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
675 if (params) { 744 if (params) {
676 conn->le_conn_min_interval = params->conn_min_interval; 745 conn->le_conn_min_interval = params->conn_min_interval;
@@ -680,8 +749,6 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
680 conn->le_conn_max_interval = hdev->le_conn_max_interval; 749 conn->le_conn_max_interval = hdev->le_conn_max_interval;
681 } 750 }
682 751
683 hci_req_init(&req, hdev);
684
685 /* If controller is scanning, we stop it since some controllers are 752 /* If controller is scanning, we stop it since some controllers are
686 * not able to scan and connect at the same time. Also set the 753 * not able to scan and connect at the same time. Also set the
687 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete 754 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
@@ -695,6 +762,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
695 762
696 hci_req_add_le_create_conn(&req, conn); 763 hci_req_add_le_create_conn(&req, conn);
697 764
765create_conn:
698 err = hci_req_run(&req, create_le_conn_complete); 766 err = hci_req_run(&req, create_le_conn_complete);
699 if (err) { 767 if (err) {
700 hci_conn_del(conn); 768 hci_conn_del(conn);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 1c6ffaa8902f..0a43cce9a914 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -34,6 +34,7 @@
34 34
35#include <net/bluetooth/bluetooth.h> 35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h> 36#include <net/bluetooth/hci_core.h>
37#include <net/bluetooth/l2cap.h>
37 38
38#include "smp.h" 39#include "smp.h"
39 40
@@ -579,6 +580,62 @@ static int sniff_max_interval_get(void *data, u64 *val)
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get, 580DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n"); 581 sniff_max_interval_set, "%llu\n");
581 582
583static int conn_info_min_age_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val == 0 || val > hdev->conn_info_max_age)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
591 hdev->conn_info_min_age = val;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_info_min_age_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->conn_info_min_age;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
609 conn_info_min_age_set, "%llu\n");
610
611static int conn_info_max_age_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val == 0 || val < hdev->conn_info_min_age)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
619 hdev->conn_info_max_age = val;
620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_info_max_age_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->conn_info_max_age;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
637 conn_info_max_age_set, "%llu\n");
638
582static int identity_show(struct seq_file *f, void *p) 639static int identity_show(struct seq_file *f, void *p)
583{ 640{
584 struct hci_dev *hdev = f->private; 641 struct hci_dev *hdev = f->private;
@@ -955,14 +1012,9 @@ static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
955 if (count < 3) 1012 if (count < 3)
956 return -EINVAL; 1013 return -EINVAL;
957 1014
958 buf = kzalloc(count, GFP_KERNEL); 1015 buf = memdup_user(data, count);
959 if (!buf) 1016 if (IS_ERR(buf))
960 return -ENOMEM; 1017 return PTR_ERR(buf);
961
962 if (copy_from_user(buf, data, count)) {
963 err = -EFAULT;
964 goto done;
965 }
966 1018
967 if (memcmp(buf, "add", 3) == 0) { 1019 if (memcmp(buf, "add", 3) == 0) {
968 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu", 1020 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
@@ -1759,6 +1811,11 @@ static int __hci_init(struct hci_dev *hdev)
1759 &blacklist_fops); 1811 &blacklist_fops);
1760 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); 1812 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1761 1813
1814 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1815 &conn_info_min_age_fops);
1816 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1817 &conn_info_max_age_fops);
1818
1762 if (lmp_bredr_capable(hdev)) { 1819 if (lmp_bredr_capable(hdev)) {
1763 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, 1820 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1764 hdev, &inquiry_cache_fops); 1821 hdev, &inquiry_cache_fops);
@@ -1828,6 +1885,9 @@ static int __hci_init(struct hci_dev *hdev)
1828 &lowpan_debugfs_fops); 1885 &lowpan_debugfs_fops);
1829 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev, 1886 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1830 &le_auto_conn_fops); 1887 &le_auto_conn_fops);
1888 debugfs_create_u16("discov_interleaved_timeout", 0644,
1889 hdev->debugfs,
1890 &hdev->discov_interleaved_timeout);
1831 } 1891 }
1832 1892
1833 return 0; 1893 return 0;
@@ -2033,12 +2093,11 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2033 2093
2034 hci_remove_remote_oob_data(hdev, &data->bdaddr); 2094 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2035 2095
2036 if (ssp) 2096 *ssp = data->ssp_mode;
2037 *ssp = data->ssp_mode;
2038 2097
2039 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 2098 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2040 if (ie) { 2099 if (ie) {
2041 if (ie->data.ssp_mode && ssp) 2100 if (ie->data.ssp_mode)
2042 *ssp = true; 2101 *ssp = true;
2043 2102
2044 if (ie->name_state == NAME_NEEDED && 2103 if (ie->name_state == NAME_NEEDED &&
@@ -3791,6 +3850,9 @@ struct hci_dev *hci_alloc_dev(void)
3791 hdev->le_conn_max_interval = 0x0038; 3850 hdev->le_conn_max_interval = 0x0038;
3792 3851
3793 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; 3852 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3853 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3854 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3855 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3794 3856
3795 mutex_init(&hdev->lock); 3857 mutex_init(&hdev->lock);
3796 mutex_init(&hdev->req_lock); 3858 mutex_init(&hdev->req_lock);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 682f33a38366..21e5913d12e0 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -991,10 +991,25 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
991 if (!sent) 991 if (!sent)
992 return; 992 return;
993 993
994 if (status)
995 return;
996
994 hci_dev_lock(hdev); 997 hci_dev_lock(hdev);
995 998
996 if (!status) 999 /* If we're doing connection initation as peripheral. Set a
997 mgmt_advertising(hdev, *sent); 1000 * timeout in case something goes wrong.
1001 */
1002 if (*sent) {
1003 struct hci_conn *conn;
1004
1005 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1006 if (conn)
1007 queue_delayed_work(hdev->workqueue,
1008 &conn->le_conn_timeout,
1009 HCI_LE_CONN_TIMEOUT);
1010 }
1011
1012 mgmt_advertising(hdev, *sent);
998 1013
999 hci_dev_unlock(hdev); 1014 hci_dev_unlock(hdev);
1000} 1015}
@@ -1018,6 +1033,33 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1018 hci_dev_unlock(hdev); 1033 hci_dev_unlock(hdev);
1019} 1034}
1020 1035
1036static bool has_pending_adv_report(struct hci_dev *hdev)
1037{
1038 struct discovery_state *d = &hdev->discovery;
1039
1040 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1041}
1042
1043static void clear_pending_adv_report(struct hci_dev *hdev)
1044{
1045 struct discovery_state *d = &hdev->discovery;
1046
1047 bacpy(&d->last_adv_addr, BDADDR_ANY);
1048 d->last_adv_data_len = 0;
1049}
1050
1051static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1052 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
1053{
1054 struct discovery_state *d = &hdev->discovery;
1055
1056 bacpy(&d->last_adv_addr, bdaddr);
1057 d->last_adv_addr_type = bdaddr_type;
1058 d->last_adv_rssi = rssi;
1059 memcpy(d->last_adv_data, data, len);
1060 d->last_adv_data_len = len;
1061}
1062
1021static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1063static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1022 struct sk_buff *skb) 1064 struct sk_buff *skb)
1023{ 1065{
@@ -1036,9 +1078,25 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1036 switch (cp->enable) { 1078 switch (cp->enable) {
1037 case LE_SCAN_ENABLE: 1079 case LE_SCAN_ENABLE:
1038 set_bit(HCI_LE_SCAN, &hdev->dev_flags); 1080 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1081 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1082 clear_pending_adv_report(hdev);
1039 break; 1083 break;
1040 1084
1041 case LE_SCAN_DISABLE: 1085 case LE_SCAN_DISABLE:
1086 /* We do this here instead of when setting DISCOVERY_STOPPED
1087 * since the latter would potentially require waiting for
1088 * inquiry to stop too.
1089 */
1090 if (has_pending_adv_report(hdev)) {
1091 struct discovery_state *d = &hdev->discovery;
1092
1093 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1094 d->last_adv_addr_type, NULL,
1095 d->last_adv_rssi, 0, 1,
1096 d->last_adv_data,
1097 d->last_adv_data_len, NULL, 0);
1098 }
1099
1042 /* Cancel this timer so that we don't try to disable scanning 1100 /* Cancel this timer so that we don't try to disable scanning
1043 * when it's already disabled. 1101 * when it's already disabled.
1044 */ 1102 */
@@ -1187,6 +1245,59 @@ static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1187 amp_write_rem_assoc_continue(hdev, rp->phy_handle); 1245 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1188} 1246}
1189 1247
1248static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1249{
1250 struct hci_rp_read_rssi *rp = (void *) skb->data;
1251 struct hci_conn *conn;
1252
1253 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1254
1255 if (rp->status)
1256 return;
1257
1258 hci_dev_lock(hdev);
1259
1260 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1261 if (conn)
1262 conn->rssi = rp->rssi;
1263
1264 hci_dev_unlock(hdev);
1265}
1266
1267static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1268{
1269 struct hci_cp_read_tx_power *sent;
1270 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1271 struct hci_conn *conn;
1272
1273 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1274
1275 if (rp->status)
1276 return;
1277
1278 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1279 if (!sent)
1280 return;
1281
1282 hci_dev_lock(hdev);
1283
1284 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1285 if (!conn)
1286 goto unlock;
1287
1288 switch (sent->type) {
1289 case 0x00:
1290 conn->tx_power = rp->tx_power;
1291 break;
1292 case 0x01:
1293 conn->max_tx_power = rp->tx_power;
1294 break;
1295 }
1296
1297unlock:
1298 hci_dev_unlock(hdev);
1299}
1300
1190static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1301static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1191{ 1302{
1192 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1303 BT_DBG("%s status 0x%2.2x", hdev->name, status);
@@ -1342,6 +1453,7 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1342 * is requested. 1453 * is requested.
1343 */ 1454 */
1344 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 1455 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1456 conn->pending_sec_level != BT_SECURITY_FIPS &&
1345 conn->pending_sec_level != BT_SECURITY_HIGH && 1457 conn->pending_sec_level != BT_SECURITY_HIGH &&
1346 conn->pending_sec_level != BT_SECURITY_MEDIUM) 1458 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1347 return 0; 1459 return 0;
@@ -1827,7 +1939,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1827 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp); 1939 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1828 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 1940 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1829 info->dev_class, 0, !name_known, ssp, NULL, 1941 info->dev_class, 0, !name_known, ssp, NULL,
1830 0); 1942 0, NULL, 0);
1831 } 1943 }
1832 1944
1833 hci_dev_unlock(hdev); 1945 hci_dev_unlock(hdev);
@@ -2579,6 +2691,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2579 hci_cc_write_remote_amp_assoc(hdev, skb); 2691 hci_cc_write_remote_amp_assoc(hdev, skb);
2580 break; 2692 break;
2581 2693
2694 case HCI_OP_READ_RSSI:
2695 hci_cc_read_rssi(hdev, skb);
2696 break;
2697
2698 case HCI_OP_READ_TX_POWER:
2699 hci_cc_read_tx_power(hdev, skb);
2700 break;
2701
2582 default: 2702 default:
2583 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 2703 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2584 break; 2704 break;
@@ -2957,7 +3077,8 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2957 } 3077 }
2958 3078
2959 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 3079 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2960 conn->pending_sec_level == BT_SECURITY_HIGH) { 3080 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3081 conn->pending_sec_level == BT_SECURITY_FIPS)) {
2961 BT_DBG("%s ignoring key unauthenticated for high security", 3082 BT_DBG("%s ignoring key unauthenticated for high security",
2962 hdev->name); 3083 hdev->name);
2963 goto not_found; 3084 goto not_found;
@@ -3102,7 +3223,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3102 false, &ssp); 3223 false, &ssp);
3103 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3224 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3104 info->dev_class, info->rssi, 3225 info->dev_class, info->rssi,
3105 !name_known, ssp, NULL, 0); 3226 !name_known, ssp, NULL, 0, NULL, 0);
3106 } 3227 }
3107 } else { 3228 } else {
3108 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 3229 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
@@ -3120,7 +3241,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3120 false, &ssp); 3241 false, &ssp);
3121 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3242 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3122 info->dev_class, info->rssi, 3243 info->dev_class, info->rssi,
3123 !name_known, ssp, NULL, 0); 3244 !name_known, ssp, NULL, 0, NULL, 0);
3124 } 3245 }
3125 } 3246 }
3126 3247
@@ -3309,7 +3430,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3309 eir_len = eir_get_length(info->data, sizeof(info->data)); 3430 eir_len = eir_get_length(info->data, sizeof(info->data));
3310 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3431 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3311 info->dev_class, info->rssi, !name_known, 3432 info->dev_class, info->rssi, !name_known,
3312 ssp, info->data, eir_len); 3433 ssp, info->data, eir_len, NULL, 0);
3313 } 3434 }
3314 3435
3315 hci_dev_unlock(hdev); 3436 hci_dev_unlock(hdev);
@@ -3367,24 +3488,20 @@ unlock:
3367 3488
3368static u8 hci_get_auth_req(struct hci_conn *conn) 3489static u8 hci_get_auth_req(struct hci_conn *conn)
3369{ 3490{
3370 /* If remote requests dedicated bonding follow that lead */
3371 if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
3372 conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
3373 /* If both remote and local IO capabilities allow MITM
3374 * protection then require it, otherwise don't */
3375 if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
3376 conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
3377 return HCI_AT_DEDICATED_BONDING;
3378 else
3379 return HCI_AT_DEDICATED_BONDING_MITM;
3380 }
3381
3382 /* If remote requests no-bonding follow that lead */ 3491 /* If remote requests no-bonding follow that lead */
3383 if (conn->remote_auth == HCI_AT_NO_BONDING || 3492 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3384 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 3493 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3385 return conn->remote_auth | (conn->auth_type & 0x01); 3494 return conn->remote_auth | (conn->auth_type & 0x01);
3386 3495
3387 return conn->auth_type; 3496 /* If both remote and local have enough IO capabilities, require
3497 * MITM protection
3498 */
3499 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3500 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3501 return conn->remote_auth | 0x01;
3502
3503 /* No MITM protection possible so ignore remote requirement */
3504 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3388} 3505}
3389 3506
3390static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3507static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3414,8 +3531,21 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3414 * to DisplayYesNo as it is not supported by BT spec. */ 3531 * to DisplayYesNo as it is not supported by BT spec. */
3415 cp.capability = (conn->io_capability == 0x04) ? 3532 cp.capability = (conn->io_capability == 0x04) ?
3416 HCI_IO_DISPLAY_YESNO : conn->io_capability; 3533 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3417 conn->auth_type = hci_get_auth_req(conn); 3534
3418 cp.authentication = conn->auth_type; 3535 /* If we are initiators, there is no remote information yet */
3536 if (conn->remote_auth == 0xff) {
3537 cp.authentication = conn->auth_type;
3538
3539 /* Request MITM protection if our IO caps allow it
3540 * except for the no-bonding case
3541 */
3542 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3543 cp.authentication != HCI_AT_NO_BONDING)
3544 cp.authentication |= 0x01;
3545 } else {
3546 conn->auth_type = hci_get_auth_req(conn);
3547 cp.authentication = conn->auth_type;
3548 }
3419 3549
3420 if (hci_find_remote_oob_data(hdev, &conn->dst) && 3550 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3421 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) 3551 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
@@ -3483,12 +3613,9 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3483 rem_mitm = (conn->remote_auth & 0x01); 3613 rem_mitm = (conn->remote_auth & 0x01);
3484 3614
3485 /* If we require MITM but the remote device can't provide that 3615 /* If we require MITM but the remote device can't provide that
3486 * (it has NoInputNoOutput) then reject the confirmation 3616 * (it has NoInputNoOutput) then reject the confirmation request
3487 * request. The only exception is when we're dedicated bonding 3617 */
3488 * initiators (connect_cfm_cb set) since then we always have the MITM 3618 if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3489 * bit set. */
3490 if (!conn->connect_cfm_cb && loc_mitm &&
3491 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3492 BT_DBG("Rejecting request: remote device can't provide MITM"); 3619 BT_DBG("Rejecting request: remote device can't provide MITM");
3493 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 3620 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3494 sizeof(ev->bdaddr), &ev->bdaddr); 3621 sizeof(ev->bdaddr), &ev->bdaddr);
@@ -3846,17 +3973,6 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3846 3973
3847 conn->dst_type = ev->bdaddr_type; 3974 conn->dst_type = ev->bdaddr_type;
3848 3975
3849 /* The advertising parameters for own address type
3850 * define which source address and source address
3851 * type this connections has.
3852 */
3853 if (bacmp(&conn->src, BDADDR_ANY)) {
3854 conn->src_type = ADDR_LE_DEV_PUBLIC;
3855 } else {
3856 bacpy(&conn->src, &hdev->static_addr);
3857 conn->src_type = ADDR_LE_DEV_RANDOM;
3858 }
3859
3860 if (ev->role == LE_CONN_ROLE_MASTER) { 3976 if (ev->role == LE_CONN_ROLE_MASTER) {
3861 conn->out = true; 3977 conn->out = true;
3862 conn->link_mode |= HCI_LM_MASTER; 3978 conn->link_mode |= HCI_LM_MASTER;
@@ -3881,27 +3997,24 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3881 &conn->init_addr, 3997 &conn->init_addr,
3882 &conn->init_addr_type); 3998 &conn->init_addr_type);
3883 } 3999 }
3884 } else {
3885 /* Set the responder (our side) address type based on
3886 * the advertising address type.
3887 */
3888 conn->resp_addr_type = hdev->adv_addr_type;
3889 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
3890 bacpy(&conn->resp_addr, &hdev->random_addr);
3891 else
3892 bacpy(&conn->resp_addr, &hdev->bdaddr);
3893
3894 conn->init_addr_type = ev->bdaddr_type;
3895 bacpy(&conn->init_addr, &ev->bdaddr);
3896 } 4000 }
3897 } else { 4001 } else {
3898 cancel_delayed_work(&conn->le_conn_timeout); 4002 cancel_delayed_work(&conn->le_conn_timeout);
3899 } 4003 }
3900 4004
3901 /* Ensure that the hci_conn contains the identity address type 4005 if (!conn->out) {
3902 * regardless of which address the connection was made with. 4006 /* Set the responder (our side) address type based on
3903 */ 4007 * the advertising address type.
3904 hci_copy_identity_address(hdev, &conn->src, &conn->src_type); 4008 */
4009 conn->resp_addr_type = hdev->adv_addr_type;
4010 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4011 bacpy(&conn->resp_addr, &hdev->random_addr);
4012 else
4013 bacpy(&conn->resp_addr, &hdev->bdaddr);
4014
4015 conn->init_addr_type = ev->bdaddr_type;
4016 bacpy(&conn->init_addr, &ev->bdaddr);
4017 }
3905 4018
3906 /* Lookup the identity address from the stored connection 4019 /* Lookup the identity address from the stored connection
3907 * address and address type. 4020 * address and address type.
@@ -3981,25 +4094,97 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
3981 } 4094 }
3982} 4095}
3983 4096
4097static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4098 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4099{
4100 struct discovery_state *d = &hdev->discovery;
4101 bool match;
4102
4103 /* Passive scanning shouldn't trigger any device found events */
4104 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4105 if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
4106 check_pending_le_conn(hdev, bdaddr, bdaddr_type);
4107 return;
4108 }
4109
4110 /* If there's nothing pending either store the data from this
4111 * event or send an immediate device found event if the data
4112 * should not be stored for later.
4113 */
4114 if (!has_pending_adv_report(hdev)) {
4115 /* If the report will trigger a SCAN_REQ store it for
4116 * later merging.
4117 */
4118 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4119 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4120 rssi, data, len);
4121 return;
4122 }
4123
4124 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4125 rssi, 0, 1, data, len, NULL, 0);
4126 return;
4127 }
4128
4129 /* Check if the pending report is for the same device as the new one */
4130 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4131 bdaddr_type == d->last_adv_addr_type);
4132
4133 /* If the pending data doesn't match this report or this isn't a
4134 * scan response (e.g. we got a duplicate ADV_IND) then force
4135 * sending of the pending data.
4136 */
4137 if (type != LE_ADV_SCAN_RSP || !match) {
4138 /* Send out whatever is in the cache, but skip duplicates */
4139 if (!match)
4140 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4141 d->last_adv_addr_type, NULL,
4142 d->last_adv_rssi, 0, 1,
4143 d->last_adv_data,
4144 d->last_adv_data_len, NULL, 0);
4145
4146 /* If the new report will trigger a SCAN_REQ store it for
4147 * later merging.
4148 */
4149 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4150 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4151 rssi, data, len);
4152 return;
4153 }
4154
4155 /* The advertising reports cannot be merged, so clear
4156 * the pending report and send out a device found event.
4157 */
4158 clear_pending_adv_report(hdev);
4159 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4160 rssi, 0, 1, data, len, NULL, 0);
4161 return;
4162 }
4163
4164 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4165 * the new event is a SCAN_RSP. We can therefore proceed with
4166 * sending a merged device found event.
4167 */
4168 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4169 d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
4170 d->last_adv_data, d->last_adv_data_len);
4171 clear_pending_adv_report(hdev);
4172}
4173
3984static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 4174static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3985{ 4175{
3986 u8 num_reports = skb->data[0]; 4176 u8 num_reports = skb->data[0];
3987 void *ptr = &skb->data[1]; 4177 void *ptr = &skb->data[1];
3988 s8 rssi;
3989 4178
3990 hci_dev_lock(hdev); 4179 hci_dev_lock(hdev);
3991 4180
3992 while (num_reports--) { 4181 while (num_reports--) {
3993 struct hci_ev_le_advertising_info *ev = ptr; 4182 struct hci_ev_le_advertising_info *ev = ptr;
3994 4183 s8 rssi;
3995 if (ev->evt_type == LE_ADV_IND ||
3996 ev->evt_type == LE_ADV_DIRECT_IND)
3997 check_pending_le_conn(hdev, &ev->bdaddr,
3998 ev->bdaddr_type);
3999 4184
4000 rssi = ev->data[ev->length]; 4185 rssi = ev->data[ev->length];
4001 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type, 4186 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4002 NULL, rssi, 0, 1, ev->data, ev->length); 4187 ev->bdaddr_type, rssi, ev->data, ev->length);
4003 4188
4004 ptr += sizeof(*ev) + ev->length + 1; 4189 ptr += sizeof(*ev) + ev->length + 1;
4005 } 4190 }
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index b9a418e578e0..80d25c150a65 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -143,7 +143,7 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
143 143
144 if (!skb_copy) { 144 if (!skb_copy) {
145 /* Create a private copy with headroom */ 145 /* Create a private copy with headroom */
146 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC); 146 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
147 if (!skb_copy) 147 if (!skb_copy)
148 continue; 148 continue;
149 149
@@ -247,8 +247,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
247 struct hci_mon_hdr *hdr; 247 struct hci_mon_hdr *hdr;
248 248
249 /* Create a private copy with headroom */ 249 /* Create a private copy with headroom */
250 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, 250 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
251 GFP_ATOMIC); 251 GFP_ATOMIC, true);
252 if (!skb_copy) 252 if (!skb_copy)
253 continue; 253 continue;
254 254
@@ -524,16 +524,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
524 case HCISETRAW: 524 case HCISETRAW:
525 if (!capable(CAP_NET_ADMIN)) 525 if (!capable(CAP_NET_ADMIN))
526 return -EPERM; 526 return -EPERM;
527 527 return -EOPNOTSUPP;
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
529 return -EPERM;
530
531 if (arg)
532 set_bit(HCI_RAW, &hdev->flags);
533 else
534 clear_bit(HCI_RAW, &hdev->flags);
535
536 return 0;
537 528
538 case HCIGETCONNINFO: 529 case HCIGETCONNINFO:
539 return hci_get_conn_info(hdev, (void __user *) arg); 530 return hci_get_conn_info(hdev, (void __user *) arg);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index dc4d301d3a72..6eabbe05fe54 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -471,8 +471,14 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
471 chan->max_tx = L2CAP_DEFAULT_MAX_TX; 471 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
472 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; 472 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
473 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 473 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
474 chan->remote_max_tx = chan->max_tx;
475 chan->remote_tx_win = chan->tx_win;
474 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW; 476 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
475 chan->sec_level = BT_SECURITY_LOW; 477 chan->sec_level = BT_SECURITY_LOW;
478 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
479 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
480 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
481 chan->conf_state = 0;
476 482
477 set_bit(FLAG_FORCE_ACTIVE, &chan->flags); 483 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
478} 484}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index ef5e5b04f34f..ade3fb4c23bc 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1180,13 +1180,16 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
1180 /* Check for backlog size */ 1180 /* Check for backlog size */
1181 if (sk_acceptq_is_full(parent)) { 1181 if (sk_acceptq_is_full(parent)) {
1182 BT_DBG("backlog full %d", parent->sk_ack_backlog); 1182 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1183 release_sock(parent);
1183 return NULL; 1184 return NULL;
1184 } 1185 }
1185 1186
1186 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, 1187 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
1187 GFP_ATOMIC); 1188 GFP_ATOMIC);
1188 if (!sk) 1189 if (!sk) {
1190 release_sock(parent);
1189 return NULL; 1191 return NULL;
1192 }
1190 1193
1191 bt_sock_reclassify_lock(sk, BTPROTO_L2CAP); 1194 bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);
1192 1195
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index b3fbc73516c4..941ad7530eda 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -58,6 +58,7 @@ int bt_to_errno(__u16 code)
58 return EIO; 58 return EIO;
59 59
60 case 0x04: 60 case 0x04:
61 case 0x3c:
61 return EHOSTDOWN; 62 return EHOSTDOWN;
62 63
63 case 0x05: 64 case 0x05:
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index d2d4e0d5aed0..0fce54412ffd 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -29,12 +29,13 @@
29 29
30#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h> 31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/l2cap.h>
32#include <net/bluetooth/mgmt.h> 33#include <net/bluetooth/mgmt.h>
33 34
34#include "smp.h" 35#include "smp.h"
35 36
36#define MGMT_VERSION 1 37#define MGMT_VERSION 1
37#define MGMT_REVISION 5 38#define MGMT_REVISION 6
38 39
39static const u16 mgmt_commands[] = { 40static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST, 41 MGMT_OP_READ_INDEX_LIST,
@@ -83,6 +84,7 @@ static const u16 mgmt_commands[] = {
83 MGMT_OP_SET_DEBUG_KEYS, 84 MGMT_OP_SET_DEBUG_KEYS,
84 MGMT_OP_SET_PRIVACY, 85 MGMT_OP_SET_PRIVACY,
85 MGMT_OP_LOAD_IRKS, 86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
86}; 88};
87 89
88static const u16 mgmt_events[] = { 90static const u16 mgmt_events[] = {
@@ -2850,10 +2852,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2850 } 2852 }
2851 2853
2852 sec_level = BT_SECURITY_MEDIUM; 2854 sec_level = BT_SECURITY_MEDIUM;
2853 if (cp->io_cap == 0x03) 2855 auth_type = HCI_AT_DEDICATED_BONDING;
2854 auth_type = HCI_AT_DEDICATED_BONDING;
2855 else
2856 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2857 2856
2858 if (cp->addr.type == BDADDR_BREDR) { 2857 if (cp->addr.type == BDADDR_BREDR) {
2859 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level, 2858 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
@@ -3351,6 +3350,8 @@ static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3351 3350
3352static void start_discovery_complete(struct hci_dev *hdev, u8 status) 3351static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3353{ 3352{
3353 unsigned long timeout = 0;
3354
3354 BT_DBG("status %d", status); 3355 BT_DBG("status %d", status);
3355 3356
3356 if (status) { 3357 if (status) {
@@ -3366,13 +3367,11 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3366 3367
3367 switch (hdev->discovery.type) { 3368 switch (hdev->discovery.type) {
3368 case DISCOV_TYPE_LE: 3369 case DISCOV_TYPE_LE:
3369 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, 3370 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3370 DISCOV_LE_TIMEOUT);
3371 break; 3371 break;
3372 3372
3373 case DISCOV_TYPE_INTERLEAVED: 3373 case DISCOV_TYPE_INTERLEAVED:
3374 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, 3374 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3375 DISCOV_INTERLEAVED_TIMEOUT);
3376 break; 3375 break;
3377 3376
3378 case DISCOV_TYPE_BREDR: 3377 case DISCOV_TYPE_BREDR:
@@ -3381,6 +3380,11 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3381 default: 3380 default:
3382 BT_ERR("Invalid discovery type %d", hdev->discovery.type); 3381 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3383 } 3382 }
3383
3384 if (!timeout)
3385 return;
3386
3387 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3384} 3388}
3385 3389
3386static int start_discovery(struct sock *sk, struct hci_dev *hdev, 3390static int start_discovery(struct sock *sk, struct hci_dev *hdev,
@@ -4530,7 +4534,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4530 4534
4531 for (i = 0; i < key_count; i++) { 4535 for (i = 0; i < key_count; i++) {
4532 struct mgmt_ltk_info *key = &cp->keys[i]; 4536 struct mgmt_ltk_info *key = &cp->keys[i];
4533 u8 type, addr_type; 4537 u8 type, addr_type, authenticated;
4534 4538
4535 if (key->addr.type == BDADDR_LE_PUBLIC) 4539 if (key->addr.type == BDADDR_LE_PUBLIC)
4536 addr_type = ADDR_LE_DEV_PUBLIC; 4540 addr_type = ADDR_LE_DEV_PUBLIC;
@@ -4542,8 +4546,19 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4542 else 4546 else
4543 type = HCI_SMP_LTK_SLAVE; 4547 type = HCI_SMP_LTK_SLAVE;
4544 4548
4549 switch (key->type) {
4550 case MGMT_LTK_UNAUTHENTICATED:
4551 authenticated = 0x00;
4552 break;
4553 case MGMT_LTK_AUTHENTICATED:
4554 authenticated = 0x01;
4555 break;
4556 default:
4557 continue;
4558 }
4559
4545 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type, 4560 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4546 key->type, key->val, key->enc_size, key->ediv, 4561 authenticated, key->val, key->enc_size, key->ediv,
4547 key->rand); 4562 key->rand);
4548 } 4563 }
4549 4564
@@ -4555,6 +4570,218 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4555 return err; 4570 return err;
4556} 4571}
4557 4572
4573struct cmd_conn_lookup {
4574 struct hci_conn *conn;
4575 bool valid_tx_power;
4576 u8 mgmt_status;
4577};
4578
4579static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4580{
4581 struct cmd_conn_lookup *match = data;
4582 struct mgmt_cp_get_conn_info *cp;
4583 struct mgmt_rp_get_conn_info rp;
4584 struct hci_conn *conn = cmd->user_data;
4585
4586 if (conn != match->conn)
4587 return;
4588
4589 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4590
4591 memset(&rp, 0, sizeof(rp));
4592 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4593 rp.addr.type = cp->addr.type;
4594
4595 if (!match->mgmt_status) {
4596 rp.rssi = conn->rssi;
4597
4598 if (match->valid_tx_power) {
4599 rp.tx_power = conn->tx_power;
4600 rp.max_tx_power = conn->max_tx_power;
4601 } else {
4602 rp.tx_power = HCI_TX_POWER_INVALID;
4603 rp.max_tx_power = HCI_TX_POWER_INVALID;
4604 }
4605 }
4606
4607 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4608 match->mgmt_status, &rp, sizeof(rp));
4609
4610 hci_conn_drop(conn);
4611
4612 mgmt_pending_remove(cmd);
4613}
4614
4615static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4616{
4617 struct hci_cp_read_rssi *cp;
4618 struct hci_conn *conn;
4619 struct cmd_conn_lookup match;
4620 u16 handle;
4621
4622 BT_DBG("status 0x%02x", status);
4623
4624 hci_dev_lock(hdev);
4625
4626 /* TX power data is valid in case request completed successfully,
4627 * otherwise we assume it's not valid. At the moment we assume that
4628 * either both or none of current and max values are valid to keep code
4629 * simple.
4630 */
4631 match.valid_tx_power = !status;
4632
4633 /* Commands sent in request are either Read RSSI or Read Transmit Power
4634 * Level so we check which one was last sent to retrieve connection
4635 * handle. Both commands have handle as first parameter so it's safe to
4636 * cast data on the same command struct.
4637 *
4638 * First command sent is always Read RSSI and we fail only if it fails.
4639 * In other case we simply override error to indicate success as we
4640 * already remembered if TX power value is actually valid.
4641 */
4642 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4643 if (!cp) {
4644 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4645 status = 0;
4646 }
4647
4648 if (!cp) {
4649 BT_ERR("invalid sent_cmd in response");
4650 goto unlock;
4651 }
4652
4653 handle = __le16_to_cpu(cp->handle);
4654 conn = hci_conn_hash_lookup_handle(hdev, handle);
4655 if (!conn) {
4656 BT_ERR("unknown handle (%d) in response", handle);
4657 goto unlock;
4658 }
4659
4660 match.conn = conn;
4661 match.mgmt_status = mgmt_status(status);
4662
4663 /* Cache refresh is complete, now reply for mgmt request for given
4664 * connection only.
4665 */
4666 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4667 get_conn_info_complete, &match);
4668
4669unlock:
4670 hci_dev_unlock(hdev);
4671}
4672
4673static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4674 u16 len)
4675{
4676 struct mgmt_cp_get_conn_info *cp = data;
4677 struct mgmt_rp_get_conn_info rp;
4678 struct hci_conn *conn;
4679 unsigned long conn_info_age;
4680 int err = 0;
4681
4682 BT_DBG("%s", hdev->name);
4683
4684 memset(&rp, 0, sizeof(rp));
4685 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4686 rp.addr.type = cp->addr.type;
4687
4688 if (!bdaddr_type_is_valid(cp->addr.type))
4689 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4690 MGMT_STATUS_INVALID_PARAMS,
4691 &rp, sizeof(rp));
4692
4693 hci_dev_lock(hdev);
4694
4695 if (!hdev_is_powered(hdev)) {
4696 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4697 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4698 goto unlock;
4699 }
4700
4701 if (cp->addr.type == BDADDR_BREDR)
4702 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4703 &cp->addr.bdaddr);
4704 else
4705 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4706
4707 if (!conn || conn->state != BT_CONNECTED) {
4708 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4709 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4710 goto unlock;
4711 }
4712
4713 /* To avoid client trying to guess when to poll again for information we
4714 * calculate conn info age as random value between min/max set in hdev.
4715 */
4716 conn_info_age = hdev->conn_info_min_age +
4717 prandom_u32_max(hdev->conn_info_max_age -
4718 hdev->conn_info_min_age);
4719
4720 /* Query controller to refresh cached values if they are too old or were
4721 * never read.
4722 */
4723 if (time_after(jiffies, conn->conn_info_timestamp +
4724 msecs_to_jiffies(conn_info_age)) ||
4725 !conn->conn_info_timestamp) {
4726 struct hci_request req;
4727 struct hci_cp_read_tx_power req_txp_cp;
4728 struct hci_cp_read_rssi req_rssi_cp;
4729 struct pending_cmd *cmd;
4730
4731 hci_req_init(&req, hdev);
4732 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4733 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4734 &req_rssi_cp);
4735
4736 /* For LE links TX power does not change thus we don't need to
4737 * query for it once value is known.
4738 */
4739 if (!bdaddr_type_is_le(cp->addr.type) ||
4740 conn->tx_power == HCI_TX_POWER_INVALID) {
4741 req_txp_cp.handle = cpu_to_le16(conn->handle);
4742 req_txp_cp.type = 0x00;
4743 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4744 sizeof(req_txp_cp), &req_txp_cp);
4745 }
4746
4747 /* Max TX power needs to be read only once per connection */
4748 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4749 req_txp_cp.handle = cpu_to_le16(conn->handle);
4750 req_txp_cp.type = 0x01;
4751 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4752 sizeof(req_txp_cp), &req_txp_cp);
4753 }
4754
4755 err = hci_req_run(&req, conn_info_refresh_complete);
4756 if (err < 0)
4757 goto unlock;
4758
4759 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4760 data, len);
4761 if (!cmd) {
4762 err = -ENOMEM;
4763 goto unlock;
4764 }
4765
4766 hci_conn_hold(conn);
4767 cmd->user_data = conn;
4768
4769 conn->conn_info_timestamp = jiffies;
4770 } else {
4771 /* Cache is valid, just reply with values cached in hci_conn */
4772 rp.rssi = conn->rssi;
4773 rp.tx_power = conn->tx_power;
4774 rp.max_tx_power = conn->max_tx_power;
4775
4776 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4777 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4778 }
4779
4780unlock:
4781 hci_dev_unlock(hdev);
4782 return err;
4783}
4784
4558static const struct mgmt_handler { 4785static const struct mgmt_handler {
4559 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, 4786 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4560 u16 data_len); 4787 u16 data_len);
@@ -4610,6 +4837,7 @@ static const struct mgmt_handler {
4610 { set_debug_keys, false, MGMT_SETTING_SIZE }, 4837 { set_debug_keys, false, MGMT_SETTING_SIZE },
4611 { set_privacy, false, MGMT_SET_PRIVACY_SIZE }, 4838 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4612 { load_irks, true, MGMT_LOAD_IRKS_SIZE }, 4839 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4840 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
4613}; 4841};
4614 4842
4615 4843
@@ -5005,6 +5233,14 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5005 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); 5233 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5006} 5234}
5007 5235
5236static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5237{
5238 if (ltk->authenticated)
5239 return MGMT_LTK_AUTHENTICATED;
5240
5241 return MGMT_LTK_UNAUTHENTICATED;
5242}
5243
5008void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) 5244void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5009{ 5245{
5010 struct mgmt_ev_new_long_term_key ev; 5246 struct mgmt_ev_new_long_term_key ev;
@@ -5030,7 +5266,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5030 5266
5031 bacpy(&ev.key.addr.bdaddr, &key->bdaddr); 5267 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5032 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type); 5268 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5033 ev.key.type = key->authenticated; 5269 ev.key.type = mgmt_ltk_type(key);
5034 ev.key.enc_size = key->enc_size; 5270 ev.key.enc_size = key->enc_size;
5035 ev.key.ediv = key->ediv; 5271 ev.key.ediv = key->ediv;
5036 ev.key.rand = key->rand; 5272 ev.key.rand = key->rand;
@@ -5668,8 +5904,9 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5668} 5904}
5669 5905
5670void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 5906void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5671 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8 5907 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
5672 ssp, u8 *eir, u16 eir_len) 5908 u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
5909 u8 scan_rsp_len)
5673{ 5910{
5674 char buf[512]; 5911 char buf[512];
5675 struct mgmt_ev_device_found *ev = (void *) buf; 5912 struct mgmt_ev_device_found *ev = (void *) buf;
@@ -5679,8 +5916,10 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5679 if (!hci_discovery_active(hdev)) 5916 if (!hci_discovery_active(hdev))
5680 return; 5917 return;
5681 5918
5682 /* Leave 5 bytes for a potential CoD field */ 5919 /* Make sure that the buffer is big enough. The 5 extra bytes
5683 if (sizeof(*ev) + eir_len + 5 > sizeof(buf)) 5920 * are for the potential CoD field.
5921 */
5922 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
5684 return; 5923 return;
5685 5924
5686 memset(buf, 0, sizeof(buf)); 5925 memset(buf, 0, sizeof(buf));
@@ -5707,8 +5946,11 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5707 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, 5946 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5708 dev_class, 3); 5947 dev_class, 3);
5709 5948
5710 ev->eir_len = cpu_to_le16(eir_len); 5949 if (scan_rsp_len > 0)
5711 ev_size = sizeof(*ev) + eir_len; 5950 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
5951
5952 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
5953 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
5712 5954
5713 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL); 5955 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5714} 5956}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index cf620260affa..754b6fe4f742 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -307,7 +307,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
307 setup_timer(&d->timer, rfcomm_dlc_timeout, (unsigned long)d); 307 setup_timer(&d->timer, rfcomm_dlc_timeout, (unsigned long)d);
308 308
309 skb_queue_head_init(&d->tx_queue); 309 skb_queue_head_init(&d->tx_queue);
310 spin_lock_init(&d->lock); 310 mutex_init(&d->lock);
311 atomic_set(&d->refcnt, 1); 311 atomic_set(&d->refcnt, 1);
312 312
313 rfcomm_dlc_clear_state(d); 313 rfcomm_dlc_clear_state(d);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 403ec09f480a..8e385a0ae60e 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -70,7 +70,7 @@ struct rfcomm_dev {
70}; 70};
71 71
72static LIST_HEAD(rfcomm_dev_list); 72static LIST_HEAD(rfcomm_dev_list);
73static DEFINE_SPINLOCK(rfcomm_dev_lock); 73static DEFINE_MUTEX(rfcomm_dev_lock);
74 74
75static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb); 75static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
76static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err); 76static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
@@ -96,9 +96,9 @@ static void rfcomm_dev_destruct(struct tty_port *port)
96 if (dev->tty_dev) 96 if (dev->tty_dev)
97 tty_unregister_device(rfcomm_tty_driver, dev->id); 97 tty_unregister_device(rfcomm_tty_driver, dev->id);
98 98
99 spin_lock(&rfcomm_dev_lock); 99 mutex_lock(&rfcomm_dev_lock);
100 list_del(&dev->list); 100 list_del(&dev->list);
101 spin_unlock(&rfcomm_dev_lock); 101 mutex_unlock(&rfcomm_dev_lock);
102 102
103 kfree(dev); 103 kfree(dev);
104 104
@@ -161,14 +161,14 @@ static struct rfcomm_dev *rfcomm_dev_get(int id)
161{ 161{
162 struct rfcomm_dev *dev; 162 struct rfcomm_dev *dev;
163 163
164 spin_lock(&rfcomm_dev_lock); 164 mutex_lock(&rfcomm_dev_lock);
165 165
166 dev = __rfcomm_dev_lookup(id); 166 dev = __rfcomm_dev_lookup(id);
167 167
168 if (dev && !tty_port_get(&dev->port)) 168 if (dev && !tty_port_get(&dev->port))
169 dev = NULL; 169 dev = NULL;
170 170
171 spin_unlock(&rfcomm_dev_lock); 171 mutex_unlock(&rfcomm_dev_lock);
172 172
173 return dev; 173 return dev;
174} 174}
@@ -224,7 +224,7 @@ static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req,
224 if (!dev) 224 if (!dev)
225 return ERR_PTR(-ENOMEM); 225 return ERR_PTR(-ENOMEM);
226 226
227 spin_lock(&rfcomm_dev_lock); 227 mutex_lock(&rfcomm_dev_lock);
228 228
229 if (req->dev_id < 0) { 229 if (req->dev_id < 0) {
230 dev->id = 0; 230 dev->id = 0;
@@ -305,11 +305,11 @@ static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req,
305 holds reference to this module. */ 305 holds reference to this module. */
306 __module_get(THIS_MODULE); 306 __module_get(THIS_MODULE);
307 307
308 spin_unlock(&rfcomm_dev_lock); 308 mutex_unlock(&rfcomm_dev_lock);
309 return dev; 309 return dev;
310 310
311out: 311out:
312 spin_unlock(&rfcomm_dev_lock); 312 mutex_unlock(&rfcomm_dev_lock);
313 kfree(dev); 313 kfree(dev);
314 return ERR_PTR(err); 314 return ERR_PTR(err);
315} 315}
@@ -524,7 +524,7 @@ static int rfcomm_get_dev_list(void __user *arg)
524 524
525 di = dl->dev_info; 525 di = dl->dev_info;
526 526
527 spin_lock(&rfcomm_dev_lock); 527 mutex_lock(&rfcomm_dev_lock);
528 528
529 list_for_each_entry(dev, &rfcomm_dev_list, list) { 529 list_for_each_entry(dev, &rfcomm_dev_list, list) {
530 if (!tty_port_get(&dev->port)) 530 if (!tty_port_get(&dev->port))
@@ -540,7 +540,7 @@ static int rfcomm_get_dev_list(void __user *arg)
540 break; 540 break;
541 } 541 }
542 542
543 spin_unlock(&rfcomm_dev_lock); 543 mutex_unlock(&rfcomm_dev_lock);
544 544
545 dl->dev_num = n; 545 dl->dev_num = n;
546 size = sizeof(*dl) + n * sizeof(*di); 546 size = sizeof(*dl) + n * sizeof(*di);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index dfb4e1161c10..3d1cc164557d 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -35,6 +35,33 @@
35 35
36#define AUTH_REQ_MASK 0x07 36#define AUTH_REQ_MASK 0x07
37 37
38#define SMP_FLAG_TK_VALID 1
39#define SMP_FLAG_CFM_PENDING 2
40#define SMP_FLAG_MITM_AUTH 3
41#define SMP_FLAG_COMPLETE 4
42#define SMP_FLAG_INITIATOR 5
43
44struct smp_chan {
45 struct l2cap_conn *conn;
46 u8 preq[7]; /* SMP Pairing Request */
47 u8 prsp[7]; /* SMP Pairing Response */
48 u8 prnd[16]; /* SMP Pairing Random (local) */
49 u8 rrnd[16]; /* SMP Pairing Random (remote) */
50 u8 pcnf[16]; /* SMP Pairing Confirm */
51 u8 tk[16]; /* SMP Temporary Key */
52 u8 enc_key_size;
53 u8 remote_key_dist;
54 bdaddr_t id_addr;
55 u8 id_addr_type;
56 u8 irk[16];
57 struct smp_csrk *csrk;
58 struct smp_csrk *slave_csrk;
59 struct smp_ltk *ltk;
60 struct smp_ltk *slave_ltk;
61 struct smp_irk *remote_irk;
62 unsigned long flags;
63};
64
38static inline void swap128(const u8 src[16], u8 dst[16]) 65static inline void swap128(const u8 src[16], u8 dst[16])
39{ 66{
40 int i; 67 int i;
@@ -369,7 +396,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
369 396
370 /* Initialize key for JUST WORKS */ 397 /* Initialize key for JUST WORKS */
371 memset(smp->tk, 0, sizeof(smp->tk)); 398 memset(smp->tk, 0, sizeof(smp->tk));
372 clear_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); 399 clear_bit(SMP_FLAG_TK_VALID, &smp->flags);
373 400
374 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io); 401 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
375 402
@@ -388,19 +415,18 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
388 method = JUST_WORKS; 415 method = JUST_WORKS;
389 416
390 /* Don't confirm locally initiated pairing attempts */ 417 /* Don't confirm locally initiated pairing attempts */
391 if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, 418 if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags))
392 &smp->smp_flags))
393 method = JUST_WORKS; 419 method = JUST_WORKS;
394 420
395 /* If Just Works, Continue with Zero TK */ 421 /* If Just Works, Continue with Zero TK */
396 if (method == JUST_WORKS) { 422 if (method == JUST_WORKS) {
397 set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); 423 set_bit(SMP_FLAG_TK_VALID, &smp->flags);
398 return 0; 424 return 0;
399 } 425 }
400 426
401 /* Not Just Works/Confirm results in MITM Authentication */ 427 /* Not Just Works/Confirm results in MITM Authentication */
402 if (method != JUST_CFM) 428 if (method != JUST_CFM)
403 set_bit(SMP_FLAG_MITM_AUTH, &smp->smp_flags); 429 set_bit(SMP_FLAG_MITM_AUTH, &smp->flags);
404 430
405 /* If both devices have Keyoard-Display I/O, the master 431 /* If both devices have Keyoard-Display I/O, the master
406 * Confirms and the slave Enters the passkey. 432 * Confirms and the slave Enters the passkey.
@@ -419,7 +445,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
419 passkey %= 1000000; 445 passkey %= 1000000;
420 put_unaligned_le32(passkey, smp->tk); 446 put_unaligned_le32(passkey, smp->tk);
421 BT_DBG("PassKey: %d", passkey); 447 BT_DBG("PassKey: %d", passkey);
422 set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); 448 set_bit(SMP_FLAG_TK_VALID, &smp->flags);
423 } 449 }
424 450
425 hci_dev_lock(hcon->hdev); 451 hci_dev_lock(hcon->hdev);
@@ -441,15 +467,13 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
441 return ret; 467 return ret;
442} 468}
443 469
444static void confirm_work(struct work_struct *work) 470static u8 smp_confirm(struct smp_chan *smp)
445{ 471{
446 struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
447 struct l2cap_conn *conn = smp->conn; 472 struct l2cap_conn *conn = smp->conn;
448 struct hci_dev *hdev = conn->hcon->hdev; 473 struct hci_dev *hdev = conn->hcon->hdev;
449 struct crypto_blkcipher *tfm = hdev->tfm_aes; 474 struct crypto_blkcipher *tfm = hdev->tfm_aes;
450 struct smp_cmd_pairing_confirm cp; 475 struct smp_cmd_pairing_confirm cp;
451 int ret; 476 int ret;
452 u8 reason;
453 477
454 BT_DBG("conn %p", conn); 478 BT_DBG("conn %p", conn);
455 479
@@ -463,35 +487,27 @@ static void confirm_work(struct work_struct *work)
463 487
464 hci_dev_unlock(hdev); 488 hci_dev_unlock(hdev);
465 489
466 if (ret) { 490 if (ret)
467 reason = SMP_UNSPECIFIED; 491 return SMP_UNSPECIFIED;
468 goto error;
469 }
470 492
471 clear_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); 493 clear_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
472 494
473 smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); 495 smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
474 496
475 return; 497 return 0;
476
477error:
478 smp_failure(conn, reason);
479} 498}
480 499
481static void random_work(struct work_struct *work) 500static u8 smp_random(struct smp_chan *smp)
482{ 501{
483 struct smp_chan *smp = container_of(work, struct smp_chan, random);
484 struct l2cap_conn *conn = smp->conn; 502 struct l2cap_conn *conn = smp->conn;
485 struct hci_conn *hcon = conn->hcon; 503 struct hci_conn *hcon = conn->hcon;
486 struct hci_dev *hdev = hcon->hdev; 504 struct hci_dev *hdev = hcon->hdev;
487 struct crypto_blkcipher *tfm = hdev->tfm_aes; 505 struct crypto_blkcipher *tfm = hdev->tfm_aes;
488 u8 reason, confirm[16]; 506 u8 confirm[16];
489 int ret; 507 int ret;
490 508
491 if (IS_ERR_OR_NULL(tfm)) { 509 if (IS_ERR_OR_NULL(tfm))
492 reason = SMP_UNSPECIFIED; 510 return SMP_UNSPECIFIED;
493 goto error;
494 }
495 511
496 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); 512 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
497 513
@@ -504,15 +520,12 @@ static void random_work(struct work_struct *work)
504 520
505 hci_dev_unlock(hdev); 521 hci_dev_unlock(hdev);
506 522
507 if (ret) { 523 if (ret)
508 reason = SMP_UNSPECIFIED; 524 return SMP_UNSPECIFIED;
509 goto error;
510 }
511 525
512 if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) { 526 if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
513 BT_ERR("Pairing failed (confirmation values mismatch)"); 527 BT_ERR("Pairing failed (confirmation values mismatch)");
514 reason = SMP_CONFIRM_FAILED; 528 return SMP_CONFIRM_FAILED;
515 goto error;
516 } 529 }
517 530
518 if (hcon->out) { 531 if (hcon->out) {
@@ -525,10 +538,8 @@ static void random_work(struct work_struct *work)
525 memset(stk + smp->enc_key_size, 0, 538 memset(stk + smp->enc_key_size, 0,
526 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); 539 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
527 540
528 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) { 541 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
529 reason = SMP_UNSPECIFIED; 542 return SMP_UNSPECIFIED;
530 goto error;
531 }
532 543
533 hci_le_start_enc(hcon, ediv, rand, stk); 544 hci_le_start_enc(hcon, ediv, rand, stk);
534 hcon->enc_key_size = smp->enc_key_size; 545 hcon->enc_key_size = smp->enc_key_size;
@@ -550,10 +561,7 @@ static void random_work(struct work_struct *work)
550 ediv, rand); 561 ediv, rand);
551 } 562 }
552 563
553 return; 564 return 0;
554
555error:
556 smp_failure(conn, reason);
557} 565}
558 566
559static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) 567static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
@@ -564,9 +572,6 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
564 if (!smp) 572 if (!smp)
565 return NULL; 573 return NULL;
566 574
567 INIT_WORK(&smp->confirm, confirm_work);
568 INIT_WORK(&smp->random, random_work);
569
570 smp->conn = conn; 575 smp->conn = conn;
571 conn->smp_chan = smp; 576 conn->smp_chan = smp;
572 conn->hcon->smp_conn = conn; 577 conn->hcon->smp_conn = conn;
@@ -583,7 +588,7 @@ void smp_chan_destroy(struct l2cap_conn *conn)
583 588
584 BUG_ON(!smp); 589 BUG_ON(!smp);
585 590
586 complete = test_bit(SMP_FLAG_COMPLETE, &smp->smp_flags); 591 complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags);
587 mgmt_smp_complete(conn->hcon, complete); 592 mgmt_smp_complete(conn->hcon, complete);
588 593
589 kfree(smp->csrk); 594 kfree(smp->csrk);
@@ -634,7 +639,7 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
634 put_unaligned_le32(value, smp->tk); 639 put_unaligned_le32(value, smp->tk);
635 /* Fall Through */ 640 /* Fall Through */
636 case MGMT_OP_USER_CONFIRM_REPLY: 641 case MGMT_OP_USER_CONFIRM_REPLY:
637 set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); 642 set_bit(SMP_FLAG_TK_VALID, &smp->flags);
638 break; 643 break;
639 case MGMT_OP_USER_PASSKEY_NEG_REPLY: 644 case MGMT_OP_USER_PASSKEY_NEG_REPLY:
640 case MGMT_OP_USER_CONFIRM_NEG_REPLY: 645 case MGMT_OP_USER_CONFIRM_NEG_REPLY:
@@ -646,8 +651,11 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
646 } 651 }
647 652
648 /* If it is our turn to send Pairing Confirm, do so now */ 653 /* If it is our turn to send Pairing Confirm, do so now */
649 if (test_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags)) 654 if (test_bit(SMP_FLAG_CFM_PENDING, &smp->flags)) {
650 queue_work(hcon->hdev->workqueue, &smp->confirm); 655 u8 rsp = smp_confirm(smp);
656 if (rsp)
657 smp_failure(conn, rsp);
658 }
651 659
652 return 0; 660 return 0;
653} 661}
@@ -656,14 +664,13 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
656{ 664{
657 struct smp_cmd_pairing rsp, *req = (void *) skb->data; 665 struct smp_cmd_pairing rsp, *req = (void *) skb->data;
658 struct smp_chan *smp; 666 struct smp_chan *smp;
659 u8 key_size; 667 u8 key_size, auth;
660 u8 auth = SMP_AUTH_NONE;
661 int ret; 668 int ret;
662 669
663 BT_DBG("conn %p", conn); 670 BT_DBG("conn %p", conn);
664 671
665 if (skb->len < sizeof(*req)) 672 if (skb->len < sizeof(*req))
666 return SMP_UNSPECIFIED; 673 return SMP_INVALID_PARAMS;
667 674
668 if (conn->hcon->link_mode & HCI_LM_MASTER) 675 if (conn->hcon->link_mode & HCI_LM_MASTER)
669 return SMP_CMD_NOTSUPP; 676 return SMP_CMD_NOTSUPP;
@@ -681,8 +688,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
681 skb_pull(skb, sizeof(*req)); 688 skb_pull(skb, sizeof(*req));
682 689
683 /* We didn't start the pairing, so match remote */ 690 /* We didn't start the pairing, so match remote */
684 if (req->auth_req & SMP_AUTH_BONDING) 691 auth = req->auth_req;
685 auth = req->auth_req;
686 692
687 conn->hcon->pending_sec_level = authreq_to_seclevel(auth); 693 conn->hcon->pending_sec_level = authreq_to_seclevel(auth);
688 694
@@ -704,7 +710,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
704 if (ret) 710 if (ret)
705 return SMP_UNSPECIFIED; 711 return SMP_UNSPECIFIED;
706 712
707 clear_bit(SMP_FLAG_INITIATOR, &smp->smp_flags); 713 clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
708 714
709 return 0; 715 return 0;
710} 716}
@@ -713,14 +719,13 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
713{ 719{
714 struct smp_cmd_pairing *req, *rsp = (void *) skb->data; 720 struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
715 struct smp_chan *smp = conn->smp_chan; 721 struct smp_chan *smp = conn->smp_chan;
716 struct hci_dev *hdev = conn->hcon->hdev;
717 u8 key_size, auth = SMP_AUTH_NONE; 722 u8 key_size, auth = SMP_AUTH_NONE;
718 int ret; 723 int ret;
719 724
720 BT_DBG("conn %p", conn); 725 BT_DBG("conn %p", conn);
721 726
722 if (skb->len < sizeof(*rsp)) 727 if (skb->len < sizeof(*rsp))
723 return SMP_UNSPECIFIED; 728 return SMP_INVALID_PARAMS;
724 729
725 if (!(conn->hcon->link_mode & HCI_LM_MASTER)) 730 if (!(conn->hcon->link_mode & HCI_LM_MASTER))
726 return SMP_CMD_NOTSUPP; 731 return SMP_CMD_NOTSUPP;
@@ -753,11 +758,11 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
753 if (ret) 758 if (ret)
754 return SMP_UNSPECIFIED; 759 return SMP_UNSPECIFIED;
755 760
756 set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); 761 set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
757 762
758 /* Can't compose response until we have been confirmed */ 763 /* Can't compose response until we have been confirmed */
759 if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) 764 if (test_bit(SMP_FLAG_TK_VALID, &smp->flags))
760 queue_work(hdev->workqueue, &smp->confirm); 765 return smp_confirm(smp);
761 766
762 return 0; 767 return 0;
763} 768}
@@ -765,12 +770,11 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
765static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) 770static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
766{ 771{
767 struct smp_chan *smp = conn->smp_chan; 772 struct smp_chan *smp = conn->smp_chan;
768 struct hci_dev *hdev = conn->hcon->hdev;
769 773
770 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); 774 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
771 775
772 if (skb->len < sizeof(smp->pcnf)) 776 if (skb->len < sizeof(smp->pcnf))
773 return SMP_UNSPECIFIED; 777 return SMP_INVALID_PARAMS;
774 778
775 memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf)); 779 memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
776 skb_pull(skb, sizeof(smp->pcnf)); 780 skb_pull(skb, sizeof(smp->pcnf));
@@ -778,10 +782,10 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
778 if (conn->hcon->out) 782 if (conn->hcon->out)
779 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), 783 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
780 smp->prnd); 784 smp->prnd);
781 else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) 785 else if (test_bit(SMP_FLAG_TK_VALID, &smp->flags))
782 queue_work(hdev->workqueue, &smp->confirm); 786 return smp_confirm(smp);
783 else 787 else
784 set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); 788 set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
785 789
786 return 0; 790 return 0;
787} 791}
@@ -789,19 +793,16 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
789static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) 793static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
790{ 794{
791 struct smp_chan *smp = conn->smp_chan; 795 struct smp_chan *smp = conn->smp_chan;
792 struct hci_dev *hdev = conn->hcon->hdev;
793 796
794 BT_DBG("conn %p", conn); 797 BT_DBG("conn %p", conn);
795 798
796 if (skb->len < sizeof(smp->rrnd)) 799 if (skb->len < sizeof(smp->rrnd))
797 return SMP_UNSPECIFIED; 800 return SMP_INVALID_PARAMS;
798 801
799 memcpy(smp->rrnd, skb->data, sizeof(smp->rrnd)); 802 memcpy(smp->rrnd, skb->data, sizeof(smp->rrnd));
800 skb_pull(skb, sizeof(smp->rrnd)); 803 skb_pull(skb, sizeof(smp->rrnd));
801 804
802 queue_work(hdev->workqueue, &smp->random); 805 return smp_random(smp);
803
804 return 0;
805} 806}
806 807
807static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) 808static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
@@ -836,7 +837,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
836 BT_DBG("conn %p", conn); 837 BT_DBG("conn %p", conn);
837 838
838 if (skb->len < sizeof(*rp)) 839 if (skb->len < sizeof(*rp))
839 return SMP_UNSPECIFIED; 840 return SMP_INVALID_PARAMS;
840 841
841 if (!(conn->hcon->link_mode & HCI_LM_MASTER)) 842 if (!(conn->hcon->link_mode & HCI_LM_MASTER))
842 return SMP_CMD_NOTSUPP; 843 return SMP_CMD_NOTSUPP;
@@ -861,7 +862,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
861 862
862 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); 863 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
863 864
864 clear_bit(SMP_FLAG_INITIATOR, &smp->smp_flags); 865 clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
865 866
866 return 0; 867 return 0;
867} 868}
@@ -908,10 +909,11 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
908 909
909 authreq = seclevel_to_authreq(sec_level); 910 authreq = seclevel_to_authreq(sec_level);
910 911
911 /* hcon->auth_type is set by pair_device in mgmt.c. If the MITM 912 /* Require MITM if IO Capability allows or the security level
912 * flag is set we should also set it for the SMP request. 913 * requires it.
913 */ 914 */
914 if ((hcon->auth_type & 0x01)) 915 if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
916 sec_level > BT_SECURITY_MEDIUM)
915 authreq |= SMP_AUTH_MITM; 917 authreq |= SMP_AUTH_MITM;
916 918
917 if (hcon->link_mode & HCI_LM_MASTER) { 919 if (hcon->link_mode & HCI_LM_MASTER) {
@@ -928,7 +930,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
928 smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); 930 smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
929 } 931 }
930 932
931 set_bit(SMP_FLAG_INITIATOR, &smp->smp_flags); 933 set_bit(SMP_FLAG_INITIATOR, &smp->flags);
932 934
933done: 935done:
934 hcon->pending_sec_level = sec_level; 936 hcon->pending_sec_level = sec_level;
@@ -944,7 +946,7 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
944 BT_DBG("conn %p", conn); 946 BT_DBG("conn %p", conn);
945 947
946 if (skb->len < sizeof(*rp)) 948 if (skb->len < sizeof(*rp))
947 return SMP_UNSPECIFIED; 949 return SMP_INVALID_PARAMS;
948 950
949 /* Ignore this PDU if it wasn't requested */ 951 /* Ignore this PDU if it wasn't requested */
950 if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY)) 952 if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY))
@@ -969,7 +971,7 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
969 BT_DBG("conn %p", conn); 971 BT_DBG("conn %p", conn);
970 972
971 if (skb->len < sizeof(*rp)) 973 if (skb->len < sizeof(*rp))
972 return SMP_UNSPECIFIED; 974 return SMP_INVALID_PARAMS;
973 975
974 /* Ignore this PDU if it wasn't requested */ 976 /* Ignore this PDU if it wasn't requested */
975 if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY)) 977 if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY))
@@ -1001,7 +1003,7 @@ static int smp_cmd_ident_info(struct l2cap_conn *conn, struct sk_buff *skb)
1001 BT_DBG(""); 1003 BT_DBG("");
1002 1004
1003 if (skb->len < sizeof(*info)) 1005 if (skb->len < sizeof(*info))
1004 return SMP_UNSPECIFIED; 1006 return SMP_INVALID_PARAMS;
1005 1007
1006 /* Ignore this PDU if it wasn't requested */ 1008 /* Ignore this PDU if it wasn't requested */
1007 if (!(smp->remote_key_dist & SMP_DIST_ID_KEY)) 1009 if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
@@ -1025,7 +1027,7 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
1025 BT_DBG(""); 1027 BT_DBG("");
1026 1028
1027 if (skb->len < sizeof(*info)) 1029 if (skb->len < sizeof(*info))
1028 return SMP_UNSPECIFIED; 1030 return SMP_INVALID_PARAMS;
1029 1031
1030 /* Ignore this PDU if it wasn't requested */ 1032 /* Ignore this PDU if it wasn't requested */
1031 if (!(smp->remote_key_dist & SMP_DIST_ID_KEY)) 1033 if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
@@ -1075,7 +1077,7 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
1075 BT_DBG("conn %p", conn); 1077 BT_DBG("conn %p", conn);
1076 1078
1077 if (skb->len < sizeof(*rp)) 1079 if (skb->len < sizeof(*rp))
1078 return SMP_UNSPECIFIED; 1080 return SMP_INVALID_PARAMS;
1079 1081
1080 /* Ignore this PDU if it wasn't requested */ 1082 /* Ignore this PDU if it wasn't requested */
1081 if (!(smp->remote_key_dist & SMP_DIST_SIGN)) 1083 if (!(smp->remote_key_dist & SMP_DIST_SIGN))
@@ -1358,7 +1360,7 @@ int smp_distribute_keys(struct l2cap_conn *conn)
1358 1360
1359 clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags); 1361 clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags);
1360 cancel_delayed_work_sync(&conn->security_timer); 1362 cancel_delayed_work_sync(&conn->security_timer);
1361 set_bit(SMP_FLAG_COMPLETE, &smp->smp_flags); 1363 set_bit(SMP_FLAG_COMPLETE, &smp->flags);
1362 smp_notify_keys(conn); 1364 smp_notify_keys(conn);
1363 1365
1364 smp_chan_destroy(conn); 1366 smp_chan_destroy(conn);
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 1277147a9150..5a8dc36460a1 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -111,39 +111,11 @@ struct smp_cmd_security_req {
111#define SMP_CMD_NOTSUPP 0x07 111#define SMP_CMD_NOTSUPP 0x07
112#define SMP_UNSPECIFIED 0x08 112#define SMP_UNSPECIFIED 0x08
113#define SMP_REPEATED_ATTEMPTS 0x09 113#define SMP_REPEATED_ATTEMPTS 0x09
114#define SMP_INVALID_PARAMS 0x0a
114 115
115#define SMP_MIN_ENC_KEY_SIZE 7 116#define SMP_MIN_ENC_KEY_SIZE 7
116#define SMP_MAX_ENC_KEY_SIZE 16 117#define SMP_MAX_ENC_KEY_SIZE 16
117 118
118#define SMP_FLAG_TK_VALID 1
119#define SMP_FLAG_CFM_PENDING 2
120#define SMP_FLAG_MITM_AUTH 3
121#define SMP_FLAG_COMPLETE 4
122#define SMP_FLAG_INITIATOR 5
123
124struct smp_chan {
125 struct l2cap_conn *conn;
126 u8 preq[7]; /* SMP Pairing Request */
127 u8 prsp[7]; /* SMP Pairing Response */
128 u8 prnd[16]; /* SMP Pairing Random (local) */
129 u8 rrnd[16]; /* SMP Pairing Random (remote) */
130 u8 pcnf[16]; /* SMP Pairing Confirm */
131 u8 tk[16]; /* SMP Temporary Key */
132 u8 enc_key_size;
133 u8 remote_key_dist;
134 bdaddr_t id_addr;
135 u8 id_addr_type;
136 u8 irk[16];
137 struct smp_csrk *csrk;
138 struct smp_csrk *slave_csrk;
139 struct smp_ltk *ltk;
140 struct smp_ltk *slave_ltk;
141 struct smp_irk *remote_irk;
142 unsigned long smp_flags;
143 struct work_struct confirm;
144 struct work_struct random;
145};
146
147/* SMP Commands */ 119/* SMP Commands */
148bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level); 120bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level);
149int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); 121int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
diff --git a/net/bridge/Makefile b/net/bridge/Makefile
index e85498b2f166..8590b942bffa 100644
--- a/net/bridge/Makefile
+++ b/net/bridge/Makefile
@@ -5,7 +5,7 @@
5obj-$(CONFIG_BRIDGE) += bridge.o 5obj-$(CONFIG_BRIDGE) += bridge.o
6 6
7bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \ 7bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
8 br_ioctl.o br_notify.o br_stp.o br_stp_bpdu.o \ 8 br_ioctl.o br_stp.o br_stp_bpdu.o \
9 br_stp_if.o br_stp_timer.o br_netlink.o 9 br_stp_if.o br_stp_timer.o br_netlink.o
10 10
11bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o 11bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
@@ -16,4 +16,4 @@ bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
16 16
17bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o 17bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o
18 18
19obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/ 19obj-$(CONFIG_NETFILTER) += netfilter/
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 19311aafcf5a..1a755a1e5410 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -22,6 +22,104 @@
22 22
23#include "br_private.h" 23#include "br_private.h"
24 24
25/*
26 * Handle changes in state of network devices enslaved to a bridge.
27 *
28 * Note: don't care about up/down if bridge itself is down, because
29 * port state is checked when bridge is brought up.
30 */
31static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
32{
33 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
34 struct net_bridge_port *p;
35 struct net_bridge *br;
36 bool changed_addr;
37 int err;
38
39 /* register of bridge completed, add sysfs entries */
40 if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
41 br_sysfs_addbr(dev);
42 return NOTIFY_DONE;
43 }
44
45 /* not a port of a bridge */
46 p = br_port_get_rtnl(dev);
47 if (!p)
48 return NOTIFY_DONE;
49
50 br = p->br;
51
52 switch (event) {
53 case NETDEV_CHANGEMTU:
54 dev_set_mtu(br->dev, br_min_mtu(br));
55 break;
56
57 case NETDEV_CHANGEADDR:
58 spin_lock_bh(&br->lock);
59 br_fdb_changeaddr(p, dev->dev_addr);
60 changed_addr = br_stp_recalculate_bridge_id(br);
61 spin_unlock_bh(&br->lock);
62
63 if (changed_addr)
64 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
65
66 break;
67
68 case NETDEV_CHANGE:
69 br_port_carrier_check(p);
70 break;
71
72 case NETDEV_FEAT_CHANGE:
73 netdev_update_features(br->dev);
74 break;
75
76 case NETDEV_DOWN:
77 spin_lock_bh(&br->lock);
78 if (br->dev->flags & IFF_UP)
79 br_stp_disable_port(p);
80 spin_unlock_bh(&br->lock);
81 break;
82
83 case NETDEV_UP:
84 if (netif_running(br->dev) && netif_oper_up(dev)) {
85 spin_lock_bh(&br->lock);
86 br_stp_enable_port(p);
87 spin_unlock_bh(&br->lock);
88 }
89 break;
90
91 case NETDEV_UNREGISTER:
92 br_del_if(br, dev);
93 break;
94
95 case NETDEV_CHANGENAME:
96 err = br_sysfs_renameif(p);
97 if (err)
98 return notifier_from_errno(err);
99 break;
100
101 case NETDEV_PRE_TYPE_CHANGE:
102 /* Forbid underlaying device to change its type. */
103 return NOTIFY_BAD;
104
105 case NETDEV_RESEND_IGMP:
106 /* Propagate to master device */
107 call_netdevice_notifiers(event, br->dev);
108 break;
109 }
110
111 /* Events that may cause spanning tree to refresh */
112 if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
113 event == NETDEV_CHANGE || event == NETDEV_DOWN)
114 br_ifinfo_notify(RTM_NEWLINK, p);
115
116 return NOTIFY_DONE;
117}
118
119static struct notifier_block br_device_notifier = {
120 .notifier_call = br_device_event
121};
122
25static void __net_exit br_net_exit(struct net *net) 123static void __net_exit br_net_exit(struct net *net)
26{ 124{
27 struct net_device *dev; 125 struct net_device *dev;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 3e2da2cb72db..568cccd39a3d 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -112,6 +112,12 @@ static void br_dev_set_multicast_list(struct net_device *dev)
112{ 112{
113} 113}
114 114
115static void br_dev_change_rx_flags(struct net_device *dev, int change)
116{
117 if (change & IFF_PROMISC)
118 br_manage_promisc(netdev_priv(dev));
119}
120
115static int br_dev_stop(struct net_device *dev) 121static int br_dev_stop(struct net_device *dev)
116{ 122{
117 struct net_bridge *br = netdev_priv(dev); 123 struct net_bridge *br = netdev_priv(dev);
@@ -309,6 +315,7 @@ static const struct net_device_ops br_netdev_ops = {
309 .ndo_get_stats64 = br_get_stats64, 315 .ndo_get_stats64 = br_get_stats64,
310 .ndo_set_mac_address = br_set_mac_address, 316 .ndo_set_mac_address = br_set_mac_address,
311 .ndo_set_rx_mode = br_dev_set_multicast_list, 317 .ndo_set_rx_mode = br_dev_set_multicast_list,
318 .ndo_change_rx_flags = br_dev_change_rx_flags,
312 .ndo_change_mtu = br_change_mtu, 319 .ndo_change_mtu = br_change_mtu,
313 .ndo_do_ioctl = br_dev_ioctl, 320 .ndo_do_ioctl = br_dev_ioctl,
314#ifdef CONFIG_NET_POLL_CONTROLLER 321#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -348,14 +355,15 @@ void br_dev_setup(struct net_device *dev)
348 355
349 dev->netdev_ops = &br_netdev_ops; 356 dev->netdev_ops = &br_netdev_ops;
350 dev->destructor = br_dev_free; 357 dev->destructor = br_dev_free;
351 SET_ETHTOOL_OPS(dev, &br_ethtool_ops); 358 dev->ethtool_ops = &br_ethtool_ops;
352 SET_NETDEV_DEVTYPE(dev, &br_type); 359 SET_NETDEV_DEVTYPE(dev, &br_type);
353 dev->tx_queue_len = 0; 360 dev->tx_queue_len = 0;
354 dev->priv_flags = IFF_EBRIDGE; 361 dev->priv_flags = IFF_EBRIDGE;
355 362
356 dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL | 363 dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
357 NETIF_F_HW_VLAN_CTAG_TX; 364 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
358 dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX; 365 dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
366 NETIF_F_HW_VLAN_STAG_TX;
359 dev->vlan_features = COMMON_FEATURES; 367 dev->vlan_features = COMMON_FEATURES;
360 368
361 br->dev = dev; 369 br->dev = dev;
@@ -370,6 +378,7 @@ void br_dev_setup(struct net_device *dev)
370 378
371 br->stp_enabled = BR_NO_STP; 379 br->stp_enabled = BR_NO_STP;
372 br->group_fwd_mask = BR_GROUPFWD_DEFAULT; 380 br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
381 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
373 382
374 br->designated_root = br->bridge_id; 383 br->designated_root = br->bridge_id;
375 br->bridge_max_age = br->max_age = 20 * HZ; 384 br->bridge_max_age = br->max_age = 20 * HZ;
@@ -380,4 +389,5 @@ void br_dev_setup(struct net_device *dev)
380 br_netfilter_rtable_init(br); 389 br_netfilter_rtable_init(br);
381 br_stp_timer_init(br); 390 br_stp_timer_init(br);
382 br_multicast_init(br); 391 br_multicast_init(br);
392 br_vlan_init(br);
383} 393}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 474d36f93342..b524c36c1273 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -85,8 +85,58 @@ static void fdb_rcu_free(struct rcu_head *head)
85 kmem_cache_free(br_fdb_cache, ent); 85 kmem_cache_free(br_fdb_cache, ent);
86} 86}
87 87
88/* When a static FDB entry is added, the mac address from the entry is
89 * added to the bridge private HW address list and all required ports
90 * are then updated with the new information.
91 * Called under RTNL.
92 */
93static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr)
94{
95 int err;
96 struct net_bridge_port *p, *tmp;
97
98 ASSERT_RTNL();
99
100 list_for_each_entry(p, &br->port_list, list) {
101 if (!br_promisc_port(p)) {
102 err = dev_uc_add(p->dev, addr);
103 if (err)
104 goto undo;
105 }
106 }
107
108 return;
109undo:
110 list_for_each_entry(tmp, &br->port_list, list) {
111 if (tmp == p)
112 break;
113 if (!br_promisc_port(tmp))
114 dev_uc_del(tmp->dev, addr);
115 }
116}
117
118/* When a static FDB entry is deleted, the HW address from that entry is
119 * also removed from the bridge private HW address list and updates all
120 * the ports with needed information.
121 * Called under RTNL.
122 */
123static void fdb_del_hw(struct net_bridge *br, const unsigned char *addr)
124{
125 struct net_bridge_port *p;
126
127 ASSERT_RTNL();
128
129 list_for_each_entry(p, &br->port_list, list) {
130 if (!br_promisc_port(p))
131 dev_uc_del(p->dev, addr);
132 }
133}
134
88static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f) 135static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
89{ 136{
137 if (f->is_static)
138 fdb_del_hw(br, f->addr.addr);
139
90 hlist_del_rcu(&f->hlist); 140 hlist_del_rcu(&f->hlist);
91 fdb_notify(br, f, RTM_DELNEIGH); 141 fdb_notify(br, f, RTM_DELNEIGH);
92 call_rcu(&f->rcu, fdb_rcu_free); 142 call_rcu(&f->rcu, fdb_rcu_free);
@@ -466,6 +516,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
466 return -ENOMEM; 516 return -ENOMEM;
467 517
468 fdb->is_local = fdb->is_static = 1; 518 fdb->is_local = fdb->is_static = 1;
519 fdb_add_hw(br, addr);
469 fdb_notify(br, fdb, RTM_NEWNEIGH); 520 fdb_notify(br, fdb, RTM_NEWNEIGH);
470 return 0; 521 return 0;
471} 522}
@@ -571,6 +622,8 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
571 622
572 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr)) 623 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
573 goto nla_put_failure; 624 goto nla_put_failure;
625 if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
626 goto nla_put_failure;
574 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 627 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
575 ci.ndm_confirmed = 0; 628 ci.ndm_confirmed = 0;
576 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); 629 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
@@ -592,6 +645,7 @@ static inline size_t fdb_nlmsg_size(void)
592{ 645{
593 return NLMSG_ALIGN(sizeof(struct ndmsg)) 646 return NLMSG_ALIGN(sizeof(struct ndmsg))
594 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 647 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
648 + nla_total_size(sizeof(u32)) /* NDA_MASTER */
595 + nla_total_size(sizeof(u16)) /* NDA_VLAN */ 649 + nla_total_size(sizeof(u16)) /* NDA_VLAN */
596 + nla_total_size(sizeof(struct nda_cacheinfo)); 650 + nla_total_size(sizeof(struct nda_cacheinfo));
597} 651}
@@ -684,13 +738,25 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
684 } 738 }
685 739
686 if (fdb_to_nud(fdb) != state) { 740 if (fdb_to_nud(fdb) != state) {
687 if (state & NUD_PERMANENT) 741 if (state & NUD_PERMANENT) {
688 fdb->is_local = fdb->is_static = 1; 742 fdb->is_local = 1;
689 else if (state & NUD_NOARP) { 743 if (!fdb->is_static) {
744 fdb->is_static = 1;
745 fdb_add_hw(br, addr);
746 }
747 } else if (state & NUD_NOARP) {
690 fdb->is_local = 0; 748 fdb->is_local = 0;
691 fdb->is_static = 1; 749 if (!fdb->is_static) {
692 } else 750 fdb->is_static = 1;
693 fdb->is_local = fdb->is_static = 0; 751 fdb_add_hw(br, addr);
752 }
753 } else {
754 fdb->is_local = 0;
755 if (fdb->is_static) {
756 fdb->is_static = 0;
757 fdb_del_hw(br, addr);
758 }
759 }
694 760
695 modified = true; 761 modified = true;
696 } 762 }
@@ -880,3 +946,59 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
880out: 946out:
881 return err; 947 return err;
882} 948}
949
950int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
951{
952 struct net_bridge_fdb_entry *fdb, *tmp;
953 int i;
954 int err;
955
956 ASSERT_RTNL();
957
958 for (i = 0; i < BR_HASH_SIZE; i++) {
959 hlist_for_each_entry(fdb, &br->hash[i], hlist) {
960 /* We only care for static entries */
961 if (!fdb->is_static)
962 continue;
963
964 err = dev_uc_add(p->dev, fdb->addr.addr);
965 if (err)
966 goto rollback;
967 }
968 }
969 return 0;
970
971rollback:
972 for (i = 0; i < BR_HASH_SIZE; i++) {
973 hlist_for_each_entry(tmp, &br->hash[i], hlist) {
974 /* If we reached the fdb that failed, we can stop */
975 if (tmp == fdb)
976 break;
977
978 /* We only care for static entries */
979 if (!tmp->is_static)
980 continue;
981
982 dev_uc_del(p->dev, tmp->addr.addr);
983 }
984 }
985 return err;
986}
987
988void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
989{
990 struct net_bridge_fdb_entry *fdb;
991 int i;
992
993 ASSERT_RTNL();
994
995 for (i = 0; i < BR_HASH_SIZE; i++) {
996 hlist_for_each_entry_rcu(fdb, &br->hash[i], hlist) {
997 /* We only care for static entries */
998 if (!fdb->is_static)
999 continue;
1000
1001 dev_uc_del(p->dev, fdb->addr.addr);
1002 }
1003 }
1004}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 5262b8617eb9..3eca3fdf8fe1 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -85,6 +85,111 @@ void br_port_carrier_check(struct net_bridge_port *p)
85 spin_unlock_bh(&br->lock); 85 spin_unlock_bh(&br->lock);
86} 86}
87 87
88static void br_port_set_promisc(struct net_bridge_port *p)
89{
90 int err = 0;
91
92 if (br_promisc_port(p))
93 return;
94
95 err = dev_set_promiscuity(p->dev, 1);
96 if (err)
97 return;
98
99 br_fdb_unsync_static(p->br, p);
100 p->flags |= BR_PROMISC;
101}
102
103static void br_port_clear_promisc(struct net_bridge_port *p)
104{
105 int err;
106
107 /* Check if the port is already non-promisc or if it doesn't
108 * support UNICAST filtering. Without unicast filtering support
109 * we'll end up re-enabling promisc mode anyway, so just check for
110 * it here.
111 */
112 if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
113 return;
114
115 /* Since we'll be clearing the promisc mode, program the port
116 * first so that we don't have interruption in traffic.
117 */
118 err = br_fdb_sync_static(p->br, p);
119 if (err)
120 return;
121
122 dev_set_promiscuity(p->dev, -1);
123 p->flags &= ~BR_PROMISC;
124}
125
126/* When a port is added or removed or when certain port flags
127 * change, this function is called to automatically manage
128 * promiscuity setting of all the bridge ports. We are always called
129 * under RTNL so can skip using rcu primitives.
130 */
131void br_manage_promisc(struct net_bridge *br)
132{
133 struct net_bridge_port *p;
134 bool set_all = false;
135
136 /* If vlan filtering is disabled or bridge interface is placed
137 * into promiscuous mode, place all ports in promiscuous mode.
138 */
139 if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br))
140 set_all = true;
141
142 list_for_each_entry(p, &br->port_list, list) {
143 if (set_all) {
144 br_port_set_promisc(p);
145 } else {
146 /* If the number of auto-ports is <= 1, then all other
147 * ports will have their output configuration
148 * statically specified through fdbs. Since ingress
149 * on the auto-port becomes forwarding/egress to other
150 * ports and egress configuration is statically known,
151 * we can say that ingress configuration of the
152 * auto-port is also statically known.
153 * This lets us disable promiscuous mode and write
154 * this config to hw.
155 */
156 if (br->auto_cnt == 0 ||
157 (br->auto_cnt == 1 && br_auto_port(p)))
158 br_port_clear_promisc(p);
159 else
160 br_port_set_promisc(p);
161 }
162 }
163}
164
165static void nbp_update_port_count(struct net_bridge *br)
166{
167 struct net_bridge_port *p;
168 u32 cnt = 0;
169
170 list_for_each_entry(p, &br->port_list, list) {
171 if (br_auto_port(p))
172 cnt++;
173 }
174 if (br->auto_cnt != cnt) {
175 br->auto_cnt = cnt;
176 br_manage_promisc(br);
177 }
178}
179
180static void nbp_delete_promisc(struct net_bridge_port *p)
181{
182 /* If port is currently promiscuous, unset promiscuity.
183 * Otherwise, it is a static port so remove all addresses
184 * from it.
185 */
186 dev_set_allmulti(p->dev, -1);
187 if (br_promisc_port(p))
188 dev_set_promiscuity(p->dev, -1);
189 else
190 br_fdb_unsync_static(p->br, p);
191}
192
88static void release_nbp(struct kobject *kobj) 193static void release_nbp(struct kobject *kobj)
89{ 194{
90 struct net_bridge_port *p 195 struct net_bridge_port *p
@@ -133,7 +238,7 @@ static void del_nbp(struct net_bridge_port *p)
133 238
134 sysfs_remove_link(br->ifobj, p->dev->name); 239 sysfs_remove_link(br->ifobj, p->dev->name);
135 240
136 dev_set_promiscuity(dev, -1); 241 nbp_delete_promisc(p);
137 242
138 spin_lock_bh(&br->lock); 243 spin_lock_bh(&br->lock);
139 br_stp_disable_port(p); 244 br_stp_disable_port(p);
@@ -141,10 +246,11 @@ static void del_nbp(struct net_bridge_port *p)
141 246
142 br_ifinfo_notify(RTM_DELLINK, p); 247 br_ifinfo_notify(RTM_DELLINK, p);
143 248
249 list_del_rcu(&p->list);
250
144 nbp_vlan_flush(p); 251 nbp_vlan_flush(p);
145 br_fdb_delete_by_port(br, p, 1); 252 br_fdb_delete_by_port(br, p, 1);
146 253 nbp_update_port_count(br);
147 list_del_rcu(&p->list);
148 254
149 dev->priv_flags &= ~IFF_BRIDGE_PORT; 255 dev->priv_flags &= ~IFF_BRIDGE_PORT;
150 256
@@ -353,7 +459,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
353 459
354 call_netdevice_notifiers(NETDEV_JOIN, dev); 460 call_netdevice_notifiers(NETDEV_JOIN, dev);
355 461
356 err = dev_set_promiscuity(dev, 1); 462 err = dev_set_allmulti(dev, 1);
357 if (err) 463 if (err)
358 goto put_back; 464 goto put_back;
359 465
@@ -384,6 +490,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
384 490
385 list_add_rcu(&p->list, &br->port_list); 491 list_add_rcu(&p->list, &br->port_list);
386 492
493 nbp_update_port_count(br);
494
387 netdev_update_features(br->dev); 495 netdev_update_features(br->dev);
388 496
389 if (br->dev->needed_headroom < dev->needed_headroom) 497 if (br->dev->needed_headroom < dev->needed_headroom)
@@ -421,7 +529,7 @@ err2:
421 kobject_put(&p->kobj); 529 kobject_put(&p->kobj);
422 p = NULL; /* kobject_put frees */ 530 p = NULL; /* kobject_put frees */
423err1: 531err1:
424 dev_set_promiscuity(dev, -1); 532 dev_set_allmulti(dev, -1);
425put_back: 533put_back:
426 dev_put(dev); 534 dev_put(dev);
427 kfree(p); 535 kfree(p);
@@ -455,3 +563,11 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
455 563
456 return 0; 564 return 0;
457} 565}
566
567void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
568{
569 struct net_bridge *br = p->br;
570
571 if (mask & BR_AUTO_MASK)
572 nbp_update_port_count(br);
573}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 04d6348fd530..366c43649079 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -177,6 +177,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
177 p = br_port_get_rcu(skb->dev); 177 p = br_port_get_rcu(skb->dev);
178 178
179 if (unlikely(is_link_local_ether_addr(dest))) { 179 if (unlikely(is_link_local_ether_addr(dest))) {
180 u16 fwd_mask = p->br->group_fwd_mask_required;
181
180 /* 182 /*
181 * See IEEE 802.1D Table 7-10 Reserved addresses 183 * See IEEE 802.1D Table 7-10 Reserved addresses
182 * 184 *
@@ -194,7 +196,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
194 case 0x00: /* Bridge Group Address */ 196 case 0x00: /* Bridge Group Address */
195 /* If STP is turned off, 197 /* If STP is turned off,
196 then must forward to keep loop detection */ 198 then must forward to keep loop detection */
197 if (p->br->stp_enabled == BR_NO_STP) 199 if (p->br->stp_enabled == BR_NO_STP ||
200 fwd_mask & (1u << dest[5]))
198 goto forward; 201 goto forward;
199 break; 202 break;
200 203
@@ -203,7 +206,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
203 206
204 default: 207 default:
205 /* Allow selective forwarding for most other protocols */ 208 /* Allow selective forwarding for most other protocols */
206 if (p->br->group_fwd_mask & (1u << dest[5])) 209 fwd_mask |= p->br->group_fwd_mask;
210 if (fwd_mask & (1u << dest[5]))
207 goto forward; 211 goto forward;
208 } 212 }
209 213
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index b7b1914dfa25..5df05269d17a 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -418,13 +418,13 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
418 418
419 ip.proto = entry->addr.proto; 419 ip.proto = entry->addr.proto;
420 if (ip.proto == htons(ETH_P_IP)) { 420 if (ip.proto == htons(ETH_P_IP)) {
421 if (timer_pending(&br->ip4_querier.timer)) 421 if (timer_pending(&br->ip4_other_query.timer))
422 return -EBUSY; 422 return -EBUSY;
423 423
424 ip.u.ip4 = entry->addr.u.ip4; 424 ip.u.ip4 = entry->addr.u.ip4;
425#if IS_ENABLED(CONFIG_IPV6) 425#if IS_ENABLED(CONFIG_IPV6)
426 } else { 426 } else {
427 if (timer_pending(&br->ip6_querier.timer)) 427 if (timer_pending(&br->ip6_other_query.timer))
428 return -EBUSY; 428 return -EBUSY;
429 429
430 ip.u.ip6 = entry->addr.u.ip6; 430 ip.u.ip6 = entry->addr.u.ip6;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 7b757b5dc773..abfa0b65a111 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/export.h>
14#include <linux/if_ether.h> 15#include <linux/if_ether.h>
15#include <linux/igmp.h> 16#include <linux/igmp.h>
16#include <linux/jhash.h> 17#include <linux/jhash.h>
@@ -35,7 +36,7 @@
35#include "br_private.h" 36#include "br_private.h"
36 37
37static void br_multicast_start_querier(struct net_bridge *br, 38static void br_multicast_start_querier(struct net_bridge *br,
38 struct bridge_mcast_query *query); 39 struct bridge_mcast_own_query *query);
39unsigned int br_mdb_rehash_seq; 40unsigned int br_mdb_rehash_seq;
40 41
41static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 42static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -761,7 +762,7 @@ static void br_multicast_local_router_expired(unsigned long data)
761} 762}
762 763
763static void br_multicast_querier_expired(struct net_bridge *br, 764static void br_multicast_querier_expired(struct net_bridge *br,
764 struct bridge_mcast_query *query) 765 struct bridge_mcast_own_query *query)
765{ 766{
766 spin_lock(&br->multicast_lock); 767 spin_lock(&br->multicast_lock);
767 if (!netif_running(br->dev) || br->multicast_disabled) 768 if (!netif_running(br->dev) || br->multicast_disabled)
@@ -777,7 +778,7 @@ static void br_ip4_multicast_querier_expired(unsigned long data)
777{ 778{
778 struct net_bridge *br = (void *)data; 779 struct net_bridge *br = (void *)data;
779 780
780 br_multicast_querier_expired(br, &br->ip4_query); 781 br_multicast_querier_expired(br, &br->ip4_own_query);
781} 782}
782 783
783#if IS_ENABLED(CONFIG_IPV6) 784#if IS_ENABLED(CONFIG_IPV6)
@@ -785,10 +786,22 @@ static void br_ip6_multicast_querier_expired(unsigned long data)
785{ 786{
786 struct net_bridge *br = (void *)data; 787 struct net_bridge *br = (void *)data;
787 788
788 br_multicast_querier_expired(br, &br->ip6_query); 789 br_multicast_querier_expired(br, &br->ip6_own_query);
789} 790}
790#endif 791#endif
791 792
793static void br_multicast_select_own_querier(struct net_bridge *br,
794 struct br_ip *ip,
795 struct sk_buff *skb)
796{
797 if (ip->proto == htons(ETH_P_IP))
798 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
799#if IS_ENABLED(CONFIG_IPV6)
800 else
801 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
802#endif
803}
804
792static void __br_multicast_send_query(struct net_bridge *br, 805static void __br_multicast_send_query(struct net_bridge *br,
793 struct net_bridge_port *port, 806 struct net_bridge_port *port,
794 struct br_ip *ip) 807 struct br_ip *ip)
@@ -804,17 +817,19 @@ static void __br_multicast_send_query(struct net_bridge *br,
804 skb->dev = port->dev; 817 skb->dev = port->dev;
805 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 818 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
806 dev_queue_xmit); 819 dev_queue_xmit);
807 } else 820 } else {
821 br_multicast_select_own_querier(br, ip, skb);
808 netif_rx(skb); 822 netif_rx(skb);
823 }
809} 824}
810 825
811static void br_multicast_send_query(struct net_bridge *br, 826static void br_multicast_send_query(struct net_bridge *br,
812 struct net_bridge_port *port, 827 struct net_bridge_port *port,
813 struct bridge_mcast_query *query) 828 struct bridge_mcast_own_query *own_query)
814{ 829{
815 unsigned long time; 830 unsigned long time;
816 struct br_ip br_group; 831 struct br_ip br_group;
817 struct bridge_mcast_querier *querier = NULL; 832 struct bridge_mcast_other_query *other_query = NULL;
818 833
819 if (!netif_running(br->dev) || br->multicast_disabled || 834 if (!netif_running(br->dev) || br->multicast_disabled ||
820 !br->multicast_querier) 835 !br->multicast_querier)
@@ -822,31 +837,32 @@ static void br_multicast_send_query(struct net_bridge *br,
822 837
823 memset(&br_group.u, 0, sizeof(br_group.u)); 838 memset(&br_group.u, 0, sizeof(br_group.u));
824 839
825 if (port ? (query == &port->ip4_query) : 840 if (port ? (own_query == &port->ip4_own_query) :
826 (query == &br->ip4_query)) { 841 (own_query == &br->ip4_own_query)) {
827 querier = &br->ip4_querier; 842 other_query = &br->ip4_other_query;
828 br_group.proto = htons(ETH_P_IP); 843 br_group.proto = htons(ETH_P_IP);
829#if IS_ENABLED(CONFIG_IPV6) 844#if IS_ENABLED(CONFIG_IPV6)
830 } else { 845 } else {
831 querier = &br->ip6_querier; 846 other_query = &br->ip6_other_query;
832 br_group.proto = htons(ETH_P_IPV6); 847 br_group.proto = htons(ETH_P_IPV6);
833#endif 848#endif
834 } 849 }
835 850
836 if (!querier || timer_pending(&querier->timer)) 851 if (!other_query || timer_pending(&other_query->timer))
837 return; 852 return;
838 853
839 __br_multicast_send_query(br, port, &br_group); 854 __br_multicast_send_query(br, port, &br_group);
840 855
841 time = jiffies; 856 time = jiffies;
842 time += query->startup_sent < br->multicast_startup_query_count ? 857 time += own_query->startup_sent < br->multicast_startup_query_count ?
843 br->multicast_startup_query_interval : 858 br->multicast_startup_query_interval :
844 br->multicast_query_interval; 859 br->multicast_query_interval;
845 mod_timer(&query->timer, time); 860 mod_timer(&own_query->timer, time);
846} 861}
847 862
848static void br_multicast_port_query_expired(struct net_bridge_port *port, 863static void
849 struct bridge_mcast_query *query) 864br_multicast_port_query_expired(struct net_bridge_port *port,
865 struct bridge_mcast_own_query *query)
850{ 866{
851 struct net_bridge *br = port->br; 867 struct net_bridge *br = port->br;
852 868
@@ -868,7 +884,7 @@ static void br_ip4_multicast_port_query_expired(unsigned long data)
868{ 884{
869 struct net_bridge_port *port = (void *)data; 885 struct net_bridge_port *port = (void *)data;
870 886
871 br_multicast_port_query_expired(port, &port->ip4_query); 887 br_multicast_port_query_expired(port, &port->ip4_own_query);
872} 888}
873 889
874#if IS_ENABLED(CONFIG_IPV6) 890#if IS_ENABLED(CONFIG_IPV6)
@@ -876,7 +892,7 @@ static void br_ip6_multicast_port_query_expired(unsigned long data)
876{ 892{
877 struct net_bridge_port *port = (void *)data; 893 struct net_bridge_port *port = (void *)data;
878 894
879 br_multicast_port_query_expired(port, &port->ip6_query); 895 br_multicast_port_query_expired(port, &port->ip6_own_query);
880} 896}
881#endif 897#endif
882 898
@@ -886,11 +902,11 @@ void br_multicast_add_port(struct net_bridge_port *port)
886 902
887 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 903 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
888 (unsigned long)port); 904 (unsigned long)port);
889 setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired, 905 setup_timer(&port->ip4_own_query.timer,
890 (unsigned long)port); 906 br_ip4_multicast_port_query_expired, (unsigned long)port);
891#if IS_ENABLED(CONFIG_IPV6) 907#if IS_ENABLED(CONFIG_IPV6)
892 setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired, 908 setup_timer(&port->ip6_own_query.timer,
893 (unsigned long)port); 909 br_ip6_multicast_port_query_expired, (unsigned long)port);
894#endif 910#endif
895} 911}
896 912
@@ -899,7 +915,7 @@ void br_multicast_del_port(struct net_bridge_port *port)
899 del_timer_sync(&port->multicast_router_timer); 915 del_timer_sync(&port->multicast_router_timer);
900} 916}
901 917
902static void br_multicast_enable(struct bridge_mcast_query *query) 918static void br_multicast_enable(struct bridge_mcast_own_query *query)
903{ 919{
904 query->startup_sent = 0; 920 query->startup_sent = 0;
905 921
@@ -916,9 +932,9 @@ void br_multicast_enable_port(struct net_bridge_port *port)
916 if (br->multicast_disabled || !netif_running(br->dev)) 932 if (br->multicast_disabled || !netif_running(br->dev))
917 goto out; 933 goto out;
918 934
919 br_multicast_enable(&port->ip4_query); 935 br_multicast_enable(&port->ip4_own_query);
920#if IS_ENABLED(CONFIG_IPV6) 936#if IS_ENABLED(CONFIG_IPV6)
921 br_multicast_enable(&port->ip6_query); 937 br_multicast_enable(&port->ip6_own_query);
922#endif 938#endif
923 939
924out: 940out:
@@ -938,9 +954,9 @@ void br_multicast_disable_port(struct net_bridge_port *port)
938 if (!hlist_unhashed(&port->rlist)) 954 if (!hlist_unhashed(&port->rlist))
939 hlist_del_init_rcu(&port->rlist); 955 hlist_del_init_rcu(&port->rlist);
940 del_timer(&port->multicast_router_timer); 956 del_timer(&port->multicast_router_timer);
941 del_timer(&port->ip4_query.timer); 957 del_timer(&port->ip4_own_query.timer);
942#if IS_ENABLED(CONFIG_IPV6) 958#if IS_ENABLED(CONFIG_IPV6)
943 del_timer(&port->ip6_query.timer); 959 del_timer(&port->ip6_own_query.timer);
944#endif 960#endif
945 spin_unlock(&br->multicast_lock); 961 spin_unlock(&br->multicast_lock);
946} 962}
@@ -1064,15 +1080,80 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1064} 1080}
1065#endif 1081#endif
1066 1082
1083static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1084 struct net_bridge_port *port,
1085 __be32 saddr)
1086{
1087 if (!timer_pending(&br->ip4_own_query.timer) &&
1088 !timer_pending(&br->ip4_other_query.timer))
1089 goto update;
1090
1091 if (!br->ip4_querier.addr.u.ip4)
1092 goto update;
1093
1094 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1095 goto update;
1096
1097 return false;
1098
1099update:
1100 br->ip4_querier.addr.u.ip4 = saddr;
1101
1102 /* update protected by general multicast_lock by caller */
1103 rcu_assign_pointer(br->ip4_querier.port, port);
1104
1105 return true;
1106}
1107
1108#if IS_ENABLED(CONFIG_IPV6)
1109static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1110 struct net_bridge_port *port,
1111 struct in6_addr *saddr)
1112{
1113 if (!timer_pending(&br->ip6_own_query.timer) &&
1114 !timer_pending(&br->ip6_other_query.timer))
1115 goto update;
1116
1117 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1118 goto update;
1119
1120 return false;
1121
1122update:
1123 br->ip6_querier.addr.u.ip6 = *saddr;
1124
1125 /* update protected by general multicast_lock by caller */
1126 rcu_assign_pointer(br->ip6_querier.port, port);
1127
1128 return true;
1129}
1130#endif
1131
1132static bool br_multicast_select_querier(struct net_bridge *br,
1133 struct net_bridge_port *port,
1134 struct br_ip *saddr)
1135{
1136 switch (saddr->proto) {
1137 case htons(ETH_P_IP):
1138 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1139#if IS_ENABLED(CONFIG_IPV6)
1140 case htons(ETH_P_IPV6):
1141 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1142#endif
1143 }
1144
1145 return false;
1146}
1147
1067static void 1148static void
1068br_multicast_update_querier_timer(struct net_bridge *br, 1149br_multicast_update_query_timer(struct net_bridge *br,
1069 struct bridge_mcast_querier *querier, 1150 struct bridge_mcast_other_query *query,
1070 unsigned long max_delay) 1151 unsigned long max_delay)
1071{ 1152{
1072 if (!timer_pending(&querier->timer)) 1153 if (!timer_pending(&query->timer))
1073 querier->delay_time = jiffies + max_delay; 1154 query->delay_time = jiffies + max_delay;
1074 1155
1075 mod_timer(&querier->timer, jiffies + br->multicast_querier_interval); 1156 mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1076} 1157}
1077 1158
1078/* 1159/*
@@ -1125,16 +1206,14 @@ timer:
1125 1206
1126static void br_multicast_query_received(struct net_bridge *br, 1207static void br_multicast_query_received(struct net_bridge *br,
1127 struct net_bridge_port *port, 1208 struct net_bridge_port *port,
1128 struct bridge_mcast_querier *querier, 1209 struct bridge_mcast_other_query *query,
1129 int saddr, 1210 struct br_ip *saddr,
1130 bool is_general_query,
1131 unsigned long max_delay) 1211 unsigned long max_delay)
1132{ 1212{
1133 if (saddr && is_general_query) 1213 if (!br_multicast_select_querier(br, port, saddr))
1134 br_multicast_update_querier_timer(br, querier, max_delay);
1135 else if (timer_pending(&querier->timer))
1136 return; 1214 return;
1137 1215
1216 br_multicast_update_query_timer(br, query, max_delay);
1138 br_multicast_mark_router(br, port); 1217 br_multicast_mark_router(br, port);
1139} 1218}
1140 1219
@@ -1149,6 +1228,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1149 struct igmpv3_query *ih3; 1228 struct igmpv3_query *ih3;
1150 struct net_bridge_port_group *p; 1229 struct net_bridge_port_group *p;
1151 struct net_bridge_port_group __rcu **pp; 1230 struct net_bridge_port_group __rcu **pp;
1231 struct br_ip saddr;
1152 unsigned long max_delay; 1232 unsigned long max_delay;
1153 unsigned long now = jiffies; 1233 unsigned long now = jiffies;
1154 __be32 group; 1234 __be32 group;
@@ -1190,11 +1270,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1190 goto out; 1270 goto out;
1191 } 1271 }
1192 1272
1193 br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr, 1273 if (!group) {
1194 !group, max_delay); 1274 saddr.proto = htons(ETH_P_IP);
1275 saddr.u.ip4 = iph->saddr;
1195 1276
1196 if (!group) 1277 br_multicast_query_received(br, port, &br->ip4_other_query,
1278 &saddr, max_delay);
1197 goto out; 1279 goto out;
1280 }
1198 1281
1199 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1282 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
1200 if (!mp) 1283 if (!mp)
@@ -1234,6 +1317,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1234 struct mld2_query *mld2q; 1317 struct mld2_query *mld2q;
1235 struct net_bridge_port_group *p; 1318 struct net_bridge_port_group *p;
1236 struct net_bridge_port_group __rcu **pp; 1319 struct net_bridge_port_group __rcu **pp;
1320 struct br_ip saddr;
1237 unsigned long max_delay; 1321 unsigned long max_delay;
1238 unsigned long now = jiffies; 1322 unsigned long now = jiffies;
1239 const struct in6_addr *group = NULL; 1323 const struct in6_addr *group = NULL;
@@ -1282,12 +1366,16 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1282 goto out; 1366 goto out;
1283 } 1367 }
1284 1368
1285 br_multicast_query_received(br, port, &br->ip6_querier, 1369 if (is_general_query) {
1286 !ipv6_addr_any(&ip6h->saddr), 1370 saddr.proto = htons(ETH_P_IPV6);
1287 is_general_query, max_delay); 1371 saddr.u.ip6 = ip6h->saddr;
1288 1372
1289 if (!group) 1373 br_multicast_query_received(br, port, &br->ip6_other_query,
1374 &saddr, max_delay);
1375 goto out;
1376 } else if (!group) {
1290 goto out; 1377 goto out;
1378 }
1291 1379
1292 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1380 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
1293 if (!mp) 1381 if (!mp)
@@ -1315,11 +1403,12 @@ out:
1315} 1403}
1316#endif 1404#endif
1317 1405
1318static void br_multicast_leave_group(struct net_bridge *br, 1406static void
1319 struct net_bridge_port *port, 1407br_multicast_leave_group(struct net_bridge *br,
1320 struct br_ip *group, 1408 struct net_bridge_port *port,
1321 struct bridge_mcast_querier *querier, 1409 struct br_ip *group,
1322 struct bridge_mcast_query *query) 1410 struct bridge_mcast_other_query *other_query,
1411 struct bridge_mcast_own_query *own_query)
1323{ 1412{
1324 struct net_bridge_mdb_htable *mdb; 1413 struct net_bridge_mdb_htable *mdb;
1325 struct net_bridge_mdb_entry *mp; 1414 struct net_bridge_mdb_entry *mp;
@@ -1330,7 +1419,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1330 spin_lock(&br->multicast_lock); 1419 spin_lock(&br->multicast_lock);
1331 if (!netif_running(br->dev) || 1420 if (!netif_running(br->dev) ||
1332 (port && port->state == BR_STATE_DISABLED) || 1421 (port && port->state == BR_STATE_DISABLED) ||
1333 timer_pending(&querier->timer)) 1422 timer_pending(&other_query->timer))
1334 goto out; 1423 goto out;
1335 1424
1336 mdb = mlock_dereference(br->mdb, br); 1425 mdb = mlock_dereference(br->mdb, br);
@@ -1344,7 +1433,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1344 time = jiffies + br->multicast_last_member_count * 1433 time = jiffies + br->multicast_last_member_count *
1345 br->multicast_last_member_interval; 1434 br->multicast_last_member_interval;
1346 1435
1347 mod_timer(&query->timer, time); 1436 mod_timer(&own_query->timer, time);
1348 1437
1349 for (p = mlock_dereference(mp->ports, br); 1438 for (p = mlock_dereference(mp->ports, br);
1350 p != NULL; 1439 p != NULL;
@@ -1425,17 +1514,19 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1425 __u16 vid) 1514 __u16 vid)
1426{ 1515{
1427 struct br_ip br_group; 1516 struct br_ip br_group;
1428 struct bridge_mcast_query *query = port ? &port->ip4_query : 1517 struct bridge_mcast_own_query *own_query;
1429 &br->ip4_query;
1430 1518
1431 if (ipv4_is_local_multicast(group)) 1519 if (ipv4_is_local_multicast(group))
1432 return; 1520 return;
1433 1521
1522 own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1523
1434 br_group.u.ip4 = group; 1524 br_group.u.ip4 = group;
1435 br_group.proto = htons(ETH_P_IP); 1525 br_group.proto = htons(ETH_P_IP);
1436 br_group.vid = vid; 1526 br_group.vid = vid;
1437 1527
1438 br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query); 1528 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
1529 own_query);
1439} 1530}
1440 1531
1441#if IS_ENABLED(CONFIG_IPV6) 1532#if IS_ENABLED(CONFIG_IPV6)
@@ -1445,18 +1536,19 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1445 __u16 vid) 1536 __u16 vid)
1446{ 1537{
1447 struct br_ip br_group; 1538 struct br_ip br_group;
1448 struct bridge_mcast_query *query = port ? &port->ip6_query : 1539 struct bridge_mcast_own_query *own_query;
1449 &br->ip6_query;
1450
1451 1540
1452 if (ipv6_addr_is_ll_all_nodes(group)) 1541 if (ipv6_addr_is_ll_all_nodes(group))
1453 return; 1542 return;
1454 1543
1544 own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1545
1455 br_group.u.ip6 = *group; 1546 br_group.u.ip6 = *group;
1456 br_group.proto = htons(ETH_P_IPV6); 1547 br_group.proto = htons(ETH_P_IPV6);
1457 br_group.vid = vid; 1548 br_group.vid = vid;
1458 1549
1459 br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query); 1550 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
1551 own_query);
1460} 1552}
1461#endif 1553#endif
1462 1554
@@ -1723,12 +1815,14 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1723} 1815}
1724 1816
1725static void br_multicast_query_expired(struct net_bridge *br, 1817static void br_multicast_query_expired(struct net_bridge *br,
1726 struct bridge_mcast_query *query) 1818 struct bridge_mcast_own_query *query,
1819 struct bridge_mcast_querier *querier)
1727{ 1820{
1728 spin_lock(&br->multicast_lock); 1821 spin_lock(&br->multicast_lock);
1729 if (query->startup_sent < br->multicast_startup_query_count) 1822 if (query->startup_sent < br->multicast_startup_query_count)
1730 query->startup_sent++; 1823 query->startup_sent++;
1731 1824
1825 rcu_assign_pointer(querier, NULL);
1732 br_multicast_send_query(br, NULL, query); 1826 br_multicast_send_query(br, NULL, query);
1733 spin_unlock(&br->multicast_lock); 1827 spin_unlock(&br->multicast_lock);
1734} 1828}
@@ -1737,7 +1831,7 @@ static void br_ip4_multicast_query_expired(unsigned long data)
1737{ 1831{
1738 struct net_bridge *br = (void *)data; 1832 struct net_bridge *br = (void *)data;
1739 1833
1740 br_multicast_query_expired(br, &br->ip4_query); 1834 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1741} 1835}
1742 1836
1743#if IS_ENABLED(CONFIG_IPV6) 1837#if IS_ENABLED(CONFIG_IPV6)
@@ -1745,7 +1839,7 @@ static void br_ip6_multicast_query_expired(unsigned long data)
1745{ 1839{
1746 struct net_bridge *br = (void *)data; 1840 struct net_bridge *br = (void *)data;
1747 1841
1748 br_multicast_query_expired(br, &br->ip6_query); 1842 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1749} 1843}
1750#endif 1844#endif
1751 1845
@@ -1767,28 +1861,30 @@ void br_multicast_init(struct net_bridge *br)
1767 br->multicast_querier_interval = 255 * HZ; 1861 br->multicast_querier_interval = 255 * HZ;
1768 br->multicast_membership_interval = 260 * HZ; 1862 br->multicast_membership_interval = 260 * HZ;
1769 1863
1770 br->ip4_querier.delay_time = 0; 1864 br->ip4_other_query.delay_time = 0;
1865 br->ip4_querier.port = NULL;
1771#if IS_ENABLED(CONFIG_IPV6) 1866#if IS_ENABLED(CONFIG_IPV6)
1772 br->ip6_querier.delay_time = 0; 1867 br->ip6_other_query.delay_time = 0;
1868 br->ip6_querier.port = NULL;
1773#endif 1869#endif
1774 1870
1775 spin_lock_init(&br->multicast_lock); 1871 spin_lock_init(&br->multicast_lock);
1776 setup_timer(&br->multicast_router_timer, 1872 setup_timer(&br->multicast_router_timer,
1777 br_multicast_local_router_expired, 0); 1873 br_multicast_local_router_expired, 0);
1778 setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired, 1874 setup_timer(&br->ip4_other_query.timer,
1779 (unsigned long)br); 1875 br_ip4_multicast_querier_expired, (unsigned long)br);
1780 setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired, 1876 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired,
1781 (unsigned long)br); 1877 (unsigned long)br);
1782#if IS_ENABLED(CONFIG_IPV6) 1878#if IS_ENABLED(CONFIG_IPV6)
1783 setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired, 1879 setup_timer(&br->ip6_other_query.timer,
1784 (unsigned long)br); 1880 br_ip6_multicast_querier_expired, (unsigned long)br);
1785 setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired, 1881 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired,
1786 (unsigned long)br); 1882 (unsigned long)br);
1787#endif 1883#endif
1788} 1884}
1789 1885
1790static void __br_multicast_open(struct net_bridge *br, 1886static void __br_multicast_open(struct net_bridge *br,
1791 struct bridge_mcast_query *query) 1887 struct bridge_mcast_own_query *query)
1792{ 1888{
1793 query->startup_sent = 0; 1889 query->startup_sent = 0;
1794 1890
@@ -1800,9 +1896,9 @@ static void __br_multicast_open(struct net_bridge *br,
1800 1896
1801void br_multicast_open(struct net_bridge *br) 1897void br_multicast_open(struct net_bridge *br)
1802{ 1898{
1803 __br_multicast_open(br, &br->ip4_query); 1899 __br_multicast_open(br, &br->ip4_own_query);
1804#if IS_ENABLED(CONFIG_IPV6) 1900#if IS_ENABLED(CONFIG_IPV6)
1805 __br_multicast_open(br, &br->ip6_query); 1901 __br_multicast_open(br, &br->ip6_own_query);
1806#endif 1902#endif
1807} 1903}
1808 1904
@@ -1815,11 +1911,11 @@ void br_multicast_stop(struct net_bridge *br)
1815 int i; 1911 int i;
1816 1912
1817 del_timer_sync(&br->multicast_router_timer); 1913 del_timer_sync(&br->multicast_router_timer);
1818 del_timer_sync(&br->ip4_querier.timer); 1914 del_timer_sync(&br->ip4_other_query.timer);
1819 del_timer_sync(&br->ip4_query.timer); 1915 del_timer_sync(&br->ip4_own_query.timer);
1820#if IS_ENABLED(CONFIG_IPV6) 1916#if IS_ENABLED(CONFIG_IPV6)
1821 del_timer_sync(&br->ip6_querier.timer); 1917 del_timer_sync(&br->ip6_other_query.timer);
1822 del_timer_sync(&br->ip6_query.timer); 1918 del_timer_sync(&br->ip6_own_query.timer);
1823#endif 1919#endif
1824 1920
1825 spin_lock_bh(&br->multicast_lock); 1921 spin_lock_bh(&br->multicast_lock);
@@ -1923,7 +2019,7 @@ unlock:
1923} 2019}
1924 2020
1925static void br_multicast_start_querier(struct net_bridge *br, 2021static void br_multicast_start_querier(struct net_bridge *br,
1926 struct bridge_mcast_query *query) 2022 struct bridge_mcast_own_query *query)
1927{ 2023{
1928 struct net_bridge_port *port; 2024 struct net_bridge_port *port;
1929 2025
@@ -1934,11 +2030,11 @@ static void br_multicast_start_querier(struct net_bridge *br,
1934 port->state == BR_STATE_BLOCKING) 2030 port->state == BR_STATE_BLOCKING)
1935 continue; 2031 continue;
1936 2032
1937 if (query == &br->ip4_query) 2033 if (query == &br->ip4_own_query)
1938 br_multicast_enable(&port->ip4_query); 2034 br_multicast_enable(&port->ip4_own_query);
1939#if IS_ENABLED(CONFIG_IPV6) 2035#if IS_ENABLED(CONFIG_IPV6)
1940 else 2036 else
1941 br_multicast_enable(&port->ip6_query); 2037 br_multicast_enable(&port->ip6_own_query);
1942#endif 2038#endif
1943 } 2039 }
1944} 2040}
@@ -1974,9 +2070,9 @@ rollback:
1974 goto rollback; 2070 goto rollback;
1975 } 2071 }
1976 2072
1977 br_multicast_start_querier(br, &br->ip4_query); 2073 br_multicast_start_querier(br, &br->ip4_own_query);
1978#if IS_ENABLED(CONFIG_IPV6) 2074#if IS_ENABLED(CONFIG_IPV6)
1979 br_multicast_start_querier(br, &br->ip6_query); 2075 br_multicast_start_querier(br, &br->ip6_own_query);
1980#endif 2076#endif
1981 2077
1982unlock: 2078unlock:
@@ -2001,16 +2097,16 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
2001 2097
2002 max_delay = br->multicast_query_response_interval; 2098 max_delay = br->multicast_query_response_interval;
2003 2099
2004 if (!timer_pending(&br->ip4_querier.timer)) 2100 if (!timer_pending(&br->ip4_other_query.timer))
2005 br->ip4_querier.delay_time = jiffies + max_delay; 2101 br->ip4_other_query.delay_time = jiffies + max_delay;
2006 2102
2007 br_multicast_start_querier(br, &br->ip4_query); 2103 br_multicast_start_querier(br, &br->ip4_own_query);
2008 2104
2009#if IS_ENABLED(CONFIG_IPV6) 2105#if IS_ENABLED(CONFIG_IPV6)
2010 if (!timer_pending(&br->ip6_querier.timer)) 2106 if (!timer_pending(&br->ip6_other_query.timer))
2011 br->ip6_querier.delay_time = jiffies + max_delay; 2107 br->ip6_other_query.delay_time = jiffies + max_delay;
2012 2108
2013 br_multicast_start_querier(br, &br->ip6_query); 2109 br_multicast_start_querier(br, &br->ip6_own_query);
2014#endif 2110#endif
2015 2111
2016unlock: 2112unlock:
@@ -2061,3 +2157,109 @@ unlock:
2061 2157
2062 return err; 2158 return err;
2063} 2159}
2160
2161/**
2162 * br_multicast_list_adjacent - Returns snooped multicast addresses
2163 * @dev: The bridge port adjacent to which to retrieve addresses
2164 * @br_ip_list: The list to store found, snooped multicast IP addresses in
2165 *
2166 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2167 * snooping feature on all bridge ports of dev's bridge device, excluding
2168 * the addresses from dev itself.
2169 *
2170 * Returns the number of items added to br_ip_list.
2171 *
2172 * Notes:
2173 * - br_ip_list needs to be initialized by caller
2174 * - br_ip_list might contain duplicates in the end
2175 * (needs to be taken care of by caller)
2176 * - br_ip_list needs to be freed by caller
2177 */
2178int br_multicast_list_adjacent(struct net_device *dev,
2179 struct list_head *br_ip_list)
2180{
2181 struct net_bridge *br;
2182 struct net_bridge_port *port;
2183 struct net_bridge_port_group *group;
2184 struct br_ip_list *entry;
2185 int count = 0;
2186
2187 rcu_read_lock();
2188 if (!br_ip_list || !br_port_exists(dev))
2189 goto unlock;
2190
2191 port = br_port_get_rcu(dev);
2192 if (!port || !port->br)
2193 goto unlock;
2194
2195 br = port->br;
2196
2197 list_for_each_entry_rcu(port, &br->port_list, list) {
2198 if (!port->dev || port->dev == dev)
2199 continue;
2200
2201 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2202 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2203 if (!entry)
2204 goto unlock;
2205
2206 entry->addr = group->addr;
2207 list_add(&entry->list, br_ip_list);
2208 count++;
2209 }
2210 }
2211
2212unlock:
2213 rcu_read_unlock();
2214 return count;
2215}
2216EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2217
2218/**
2219 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2220 * @dev: The bridge port adjacent to which to check for a querier
2221 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2222 *
2223 * Checks whether the given interface has a bridge on top and if so returns
2224 * true if a selected querier is behind one of the other ports of this
2225 * bridge. Otherwise returns false.
2226 */
2227bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2228{
2229 struct net_bridge *br;
2230 struct net_bridge_port *port;
2231 bool ret = false;
2232
2233 rcu_read_lock();
2234 if (!br_port_exists(dev))
2235 goto unlock;
2236
2237 port = br_port_get_rcu(dev);
2238 if (!port || !port->br)
2239 goto unlock;
2240
2241 br = port->br;
2242
2243 switch (proto) {
2244 case ETH_P_IP:
2245 if (!timer_pending(&br->ip4_other_query.timer) ||
2246 rcu_dereference(br->ip4_querier.port) == port)
2247 goto unlock;
2248 break;
2249#if IS_ENABLED(CONFIG_IPV6)
2250 case ETH_P_IPV6:
2251 if (!timer_pending(&br->ip6_other_query.timer) ||
2252 rcu_dereference(br->ip6_querier.port) == port)
2253 goto unlock;
2254 break;
2255#endif
2256 default:
2257 goto unlock;
2258 }
2259
2260 ret = true;
2261unlock:
2262 rcu_read_unlock();
2263 return ret;
2264}
2265EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 2acf7fa1fec6..a615264cf01a 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -535,7 +535,7 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct
535 if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb)) 535 if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
536 return br; 536 return br;
537 537
538 vlan = __vlan_find_dev_deep(br, skb->vlan_proto, 538 vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
539 vlan_tx_tag_get(skb) & VLAN_VID_MASK); 539 vlan_tx_tag_get(skb) & VLAN_VID_MASK);
540 540
541 return vlan ? vlan : br; 541 return vlan ? vlan : br;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e8844d975b32..26edb518b839 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -328,6 +328,7 @@ static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
328static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) 328static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
329{ 329{
330 int err; 330 int err;
331 unsigned long old_flags = p->flags;
331 332
332 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); 333 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
333 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); 334 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
@@ -353,6 +354,8 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
353 if (err) 354 if (err)
354 return err; 355 return err;
355 } 356 }
357
358 br_port_flags_change(p, old_flags ^ p->flags);
356 return 0; 359 return 0;
357} 360}
358 361
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
deleted file mode 100644
index 2998dd1769a0..000000000000
--- a/net/bridge/br_notify.c
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * Device event handling
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/rtnetlink.h>
16#include <net/net_namespace.h>
17
18#include "br_private.h"
19
20static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr);
21
22struct notifier_block br_device_notifier = {
23 .notifier_call = br_device_event
24};
25
26/*
27 * Handle changes in state of network devices enslaved to a bridge.
28 *
29 * Note: don't care about up/down if bridge itself is down, because
30 * port state is checked when bridge is brought up.
31 */
32static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
33{
34 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
35 struct net_bridge_port *p;
36 struct net_bridge *br;
37 bool changed_addr;
38 int err;
39
40 /* register of bridge completed, add sysfs entries */
41 if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
42 br_sysfs_addbr(dev);
43 return NOTIFY_DONE;
44 }
45
46 /* not a port of a bridge */
47 p = br_port_get_rtnl(dev);
48 if (!p)
49 return NOTIFY_DONE;
50
51 br = p->br;
52
53 switch (event) {
54 case NETDEV_CHANGEMTU:
55 dev_set_mtu(br->dev, br_min_mtu(br));
56 break;
57
58 case NETDEV_CHANGEADDR:
59 spin_lock_bh(&br->lock);
60 br_fdb_changeaddr(p, dev->dev_addr);
61 changed_addr = br_stp_recalculate_bridge_id(br);
62 spin_unlock_bh(&br->lock);
63
64 if (changed_addr)
65 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
66
67 break;
68
69 case NETDEV_CHANGE:
70 br_port_carrier_check(p);
71 break;
72
73 case NETDEV_FEAT_CHANGE:
74 netdev_update_features(br->dev);
75 break;
76
77 case NETDEV_DOWN:
78 spin_lock_bh(&br->lock);
79 if (br->dev->flags & IFF_UP)
80 br_stp_disable_port(p);
81 spin_unlock_bh(&br->lock);
82 break;
83
84 case NETDEV_UP:
85 if (netif_running(br->dev) && netif_oper_up(dev)) {
86 spin_lock_bh(&br->lock);
87 br_stp_enable_port(p);
88 spin_unlock_bh(&br->lock);
89 }
90 break;
91
92 case NETDEV_UNREGISTER:
93 br_del_if(br, dev);
94 break;
95
96 case NETDEV_CHANGENAME:
97 err = br_sysfs_renameif(p);
98 if (err)
99 return notifier_from_errno(err);
100 break;
101
102 case NETDEV_PRE_TYPE_CHANGE:
103 /* Forbid underlaying device to change its type. */
104 return NOTIFY_BAD;
105
106 case NETDEV_RESEND_IGMP:
107 /* Propagate to master device */
108 call_netdevice_notifiers(event, br->dev);
109 break;
110 }
111
112 /* Events that may cause spanning tree to refresh */
113 if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
114 event == NETDEV_CHANGE || event == NETDEV_DOWN)
115 br_ifinfo_notify(RTM_NEWLINK, p);
116
117 return NOTIFY_DONE;
118}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 59d3a85c5873..23caf5b0309e 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -35,6 +35,8 @@
35#define BR_GROUPFWD_DEFAULT 0 35#define BR_GROUPFWD_DEFAULT 0
36/* Don't allow forwarding control protocols like STP and LLDP */ 36/* Don't allow forwarding control protocols like STP and LLDP */
37#define BR_GROUPFWD_RESTRICTED 0x4007u 37#define BR_GROUPFWD_RESTRICTED 0x4007u
38/* The Nearest Customer Bridge Group Address, 01-80-C2-00-00-[00,0B,0C,0D,0F] */
39#define BR_GROUPFWD_8021AD 0xB801u
38 40
39/* Path to usermode spanning tree program */ 41/* Path to usermode spanning tree program */
40#define BR_STP_PROG "/sbin/bridge-stp" 42#define BR_STP_PROG "/sbin/bridge-stp"
@@ -54,30 +56,24 @@ struct mac_addr
54 unsigned char addr[ETH_ALEN]; 56 unsigned char addr[ETH_ALEN];
55}; 57};
56 58
57struct br_ip
58{
59 union {
60 __be32 ip4;
61#if IS_ENABLED(CONFIG_IPV6)
62 struct in6_addr ip6;
63#endif
64 } u;
65 __be16 proto;
66 __u16 vid;
67};
68
69#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 59#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
70/* our own querier */ 60/* our own querier */
71struct bridge_mcast_query { 61struct bridge_mcast_own_query {
72 struct timer_list timer; 62 struct timer_list timer;
73 u32 startup_sent; 63 u32 startup_sent;
74}; 64};
75 65
76/* other querier */ 66/* other querier */
77struct bridge_mcast_querier { 67struct bridge_mcast_other_query {
78 struct timer_list timer; 68 struct timer_list timer;
79 unsigned long delay_time; 69 unsigned long delay_time;
80}; 70};
71
72/* selected querier */
73struct bridge_mcast_querier {
74 struct br_ip addr;
75 struct net_bridge_port __rcu *port;
76};
81#endif 77#endif
82 78
83struct net_port_vlans { 79struct net_port_vlans {
@@ -174,11 +170,13 @@ struct net_bridge_port
174#define BR_ADMIN_COST 0x00000010 170#define BR_ADMIN_COST 0x00000010
175#define BR_LEARNING 0x00000020 171#define BR_LEARNING 0x00000020
176#define BR_FLOOD 0x00000040 172#define BR_FLOOD 0x00000040
173#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING)
174#define BR_PROMISC 0x00000080
177 175
178#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 176#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
179 struct bridge_mcast_query ip4_query; 177 struct bridge_mcast_own_query ip4_own_query;
180#if IS_ENABLED(CONFIG_IPV6) 178#if IS_ENABLED(CONFIG_IPV6)
181 struct bridge_mcast_query ip6_query; 179 struct bridge_mcast_own_query ip6_own_query;
182#endif /* IS_ENABLED(CONFIG_IPV6) */ 180#endif /* IS_ENABLED(CONFIG_IPV6) */
183 unsigned char multicast_router; 181 unsigned char multicast_router;
184 struct timer_list multicast_router_timer; 182 struct timer_list multicast_router_timer;
@@ -198,6 +196,9 @@ struct net_bridge_port
198#endif 196#endif
199}; 197};
200 198
199#define br_auto_port(p) ((p)->flags & BR_AUTO_MASK)
200#define br_promisc_port(p) ((p)->flags & BR_PROMISC)
201
201#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT) 202#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
202 203
203static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev) 204static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
@@ -227,6 +228,7 @@ struct net_bridge
227 bool nf_call_arptables; 228 bool nf_call_arptables;
228#endif 229#endif
229 u16 group_fwd_mask; 230 u16 group_fwd_mask;
231 u16 group_fwd_mask_required;
230 232
231 /* STP */ 233 /* STP */
232 bridge_id designated_root; 234 bridge_id designated_root;
@@ -241,6 +243,7 @@ struct net_bridge
241 unsigned long bridge_forward_delay; 243 unsigned long bridge_forward_delay;
242 244
243 u8 group_addr[ETH_ALEN]; 245 u8 group_addr[ETH_ALEN];
246 bool group_addr_set;
244 u16 root_port; 247 u16 root_port;
245 248
246 enum { 249 enum {
@@ -277,11 +280,13 @@ struct net_bridge
277 struct hlist_head router_list; 280 struct hlist_head router_list;
278 281
279 struct timer_list multicast_router_timer; 282 struct timer_list multicast_router_timer;
283 struct bridge_mcast_other_query ip4_other_query;
284 struct bridge_mcast_own_query ip4_own_query;
280 struct bridge_mcast_querier ip4_querier; 285 struct bridge_mcast_querier ip4_querier;
281 struct bridge_mcast_query ip4_query;
282#if IS_ENABLED(CONFIG_IPV6) 286#if IS_ENABLED(CONFIG_IPV6)
287 struct bridge_mcast_other_query ip6_other_query;
288 struct bridge_mcast_own_query ip6_own_query;
283 struct bridge_mcast_querier ip6_querier; 289 struct bridge_mcast_querier ip6_querier;
284 struct bridge_mcast_query ip6_query;
285#endif /* IS_ENABLED(CONFIG_IPV6) */ 290#endif /* IS_ENABLED(CONFIG_IPV6) */
286#endif 291#endif
287 292
@@ -290,8 +295,10 @@ struct net_bridge
290 struct timer_list topology_change_timer; 295 struct timer_list topology_change_timer;
291 struct timer_list gc_timer; 296 struct timer_list gc_timer;
292 struct kobject *ifobj; 297 struct kobject *ifobj;
298 u32 auto_cnt;
293#ifdef CONFIG_BRIDGE_VLAN_FILTERING 299#ifdef CONFIG_BRIDGE_VLAN_FILTERING
294 u8 vlan_enabled; 300 u8 vlan_enabled;
301 __be16 vlan_proto;
295 struct net_port_vlans __rcu *vlan_info; 302 struct net_port_vlans __rcu *vlan_info;
296#endif 303#endif
297}; 304};
@@ -327,8 +334,6 @@ struct br_input_skb_cb {
327#define br_debug(br, format, args...) \ 334#define br_debug(br, format, args...) \
328 pr_debug("%s: " format, (br)->dev->name, ##args) 335 pr_debug("%s: " format, (br)->dev->name, ##args)
329 336
330extern struct notifier_block br_device_notifier;
331
332/* called under bridge lock */ 337/* called under bridge lock */
333static inline int br_is_root_bridge(const struct net_bridge *br) 338static inline int br_is_root_bridge(const struct net_bridge *br)
334{ 339{
@@ -395,6 +400,8 @@ int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
395 const unsigned char *addr, u16 nlh_flags); 400 const unsigned char *addr, u16 nlh_flags);
396int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 401int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
397 struct net_device *dev, int idx); 402 struct net_device *dev, int idx);
403int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
404void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
398 405
399/* br_forward.c */ 406/* br_forward.c */
400void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb); 407void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
@@ -415,6 +422,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev);
415int br_min_mtu(const struct net_bridge *br); 422int br_min_mtu(const struct net_bridge *br);
416netdev_features_t br_features_recompute(struct net_bridge *br, 423netdev_features_t br_features_recompute(struct net_bridge *br,
417 netdev_features_t features); 424 netdev_features_t features);
425void br_port_flags_change(struct net_bridge_port *port, unsigned long mask);
426void br_manage_promisc(struct net_bridge *br);
418 427
419/* br_input.c */ 428/* br_input.c */
420int br_handle_frame_finish(struct sk_buff *skb); 429int br_handle_frame_finish(struct sk_buff *skb);
@@ -485,7 +494,7 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
485 494
486static inline bool 495static inline bool
487__br_multicast_querier_exists(struct net_bridge *br, 496__br_multicast_querier_exists(struct net_bridge *br,
488 struct bridge_mcast_querier *querier) 497 struct bridge_mcast_other_query *querier)
489{ 498{
490 return time_is_before_jiffies(querier->delay_time) && 499 return time_is_before_jiffies(querier->delay_time) &&
491 (br->multicast_querier || timer_pending(&querier->timer)); 500 (br->multicast_querier || timer_pending(&querier->timer));
@@ -496,10 +505,10 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
496{ 505{
497 switch (eth->h_proto) { 506 switch (eth->h_proto) {
498 case (htons(ETH_P_IP)): 507 case (htons(ETH_P_IP)):
499 return __br_multicast_querier_exists(br, &br->ip4_querier); 508 return __br_multicast_querier_exists(br, &br->ip4_other_query);
500#if IS_ENABLED(CONFIG_IPV6) 509#if IS_ENABLED(CONFIG_IPV6)
501 case (htons(ETH_P_IPV6)): 510 case (htons(ETH_P_IPV6)):
502 return __br_multicast_querier_exists(br, &br->ip6_querier); 511 return __br_multicast_querier_exists(br, &br->ip6_other_query);
503#endif 512#endif
504 default: 513 default:
505 return false; 514 return false;
@@ -589,7 +598,10 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
589int br_vlan_delete(struct net_bridge *br, u16 vid); 598int br_vlan_delete(struct net_bridge *br, u16 vid);
590void br_vlan_flush(struct net_bridge *br); 599void br_vlan_flush(struct net_bridge *br);
591bool br_vlan_find(struct net_bridge *br, u16 vid); 600bool br_vlan_find(struct net_bridge *br, u16 vid);
601void br_recalculate_fwd_mask(struct net_bridge *br);
592int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val); 602int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
603int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
604void br_vlan_init(struct net_bridge *br);
593int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags); 605int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
594int nbp_vlan_delete(struct net_bridge_port *port, u16 vid); 606int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
595void nbp_vlan_flush(struct net_bridge_port *port); 607void nbp_vlan_flush(struct net_bridge_port *port);
@@ -633,6 +645,10 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
633 return v->pvid ?: VLAN_N_VID; 645 return v->pvid ?: VLAN_N_VID;
634} 646}
635 647
648static inline int br_vlan_enabled(struct net_bridge *br)
649{
650 return br->vlan_enabled;
651}
636#else 652#else
637static inline bool br_allowed_ingress(struct net_bridge *br, 653static inline bool br_allowed_ingress(struct net_bridge *br,
638 struct net_port_vlans *v, 654 struct net_port_vlans *v,
@@ -681,6 +697,14 @@ static inline bool br_vlan_find(struct net_bridge *br, u16 vid)
681 return false; 697 return false;
682} 698}
683 699
700static inline void br_recalculate_fwd_mask(struct net_bridge *br)
701{
702}
703
704static inline void br_vlan_init(struct net_bridge *br)
705{
706}
707
684static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) 708static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
685{ 709{
686 return -EOPNOTSUPP; 710 return -EOPNOTSUPP;
@@ -719,6 +743,11 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
719{ 743{
720 return VLAN_N_VID; /* Returns invalid vid */ 744 return VLAN_N_VID; /* Returns invalid vid */
721} 745}
746
747static inline int br_vlan_enabled(struct net_bridge *br)
748{
749 return 0;
750}
722#endif 751#endif
723 752
724/* br_netfilter.c */ 753/* br_netfilter.c */
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 8dac65552f19..c9e2572b15f4 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -312,10 +312,19 @@ static ssize_t group_addr_store(struct device *d,
312 new_addr[5] == 3) /* 802.1X PAE address */ 312 new_addr[5] == 3) /* 802.1X PAE address */
313 return -EINVAL; 313 return -EINVAL;
314 314
315 if (!rtnl_trylock())
316 return restart_syscall();
317
315 spin_lock_bh(&br->lock); 318 spin_lock_bh(&br->lock);
316 for (i = 0; i < 6; i++) 319 for (i = 0; i < 6; i++)
317 br->group_addr[i] = new_addr[i]; 320 br->group_addr[i] = new_addr[i];
318 spin_unlock_bh(&br->lock); 321 spin_unlock_bh(&br->lock);
322
323 br->group_addr_set = true;
324 br_recalculate_fwd_mask(br);
325
326 rtnl_unlock();
327
319 return len; 328 return len;
320} 329}
321 330
@@ -700,6 +709,22 @@ static ssize_t vlan_filtering_store(struct device *d,
700 return store_bridge_parm(d, buf, len, br_vlan_filter_toggle); 709 return store_bridge_parm(d, buf, len, br_vlan_filter_toggle);
701} 710}
702static DEVICE_ATTR_RW(vlan_filtering); 711static DEVICE_ATTR_RW(vlan_filtering);
712
713static ssize_t vlan_protocol_show(struct device *d,
714 struct device_attribute *attr,
715 char *buf)
716{
717 struct net_bridge *br = to_bridge(d);
718 return sprintf(buf, "%#06x\n", ntohs(br->vlan_proto));
719}
720
721static ssize_t vlan_protocol_store(struct device *d,
722 struct device_attribute *attr,
723 const char *buf, size_t len)
724{
725 return store_bridge_parm(d, buf, len, br_vlan_set_proto);
726}
727static DEVICE_ATTR_RW(vlan_protocol);
703#endif 728#endif
704 729
705static struct attribute *bridge_attrs[] = { 730static struct attribute *bridge_attrs[] = {
@@ -745,6 +770,7 @@ static struct attribute *bridge_attrs[] = {
745#endif 770#endif
746#ifdef CONFIG_BRIDGE_VLAN_FILTERING 771#ifdef CONFIG_BRIDGE_VLAN_FILTERING
747 &dev_attr_vlan_filtering.attr, 772 &dev_attr_vlan_filtering.attr,
773 &dev_attr_vlan_protocol.attr,
748#endif 774#endif
749 NULL 775 NULL
750}; 776};
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index dd595bd7fa82..e561cd59b8a6 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -41,20 +41,30 @@ static ssize_t show_##_name(struct net_bridge_port *p, char *buf) \
41} \ 41} \
42static int store_##_name(struct net_bridge_port *p, unsigned long v) \ 42static int store_##_name(struct net_bridge_port *p, unsigned long v) \
43{ \ 43{ \
44 unsigned long flags = p->flags; \ 44 return store_flag(p, v, _mask); \
45 if (v) \
46 flags |= _mask; \
47 else \
48 flags &= ~_mask; \
49 if (flags != p->flags) { \
50 p->flags = flags; \
51 br_ifinfo_notify(RTM_NEWLINK, p); \
52 } \
53 return 0; \
54} \ 45} \
55static BRPORT_ATTR(_name, S_IRUGO | S_IWUSR, \ 46static BRPORT_ATTR(_name, S_IRUGO | S_IWUSR, \
56 show_##_name, store_##_name) 47 show_##_name, store_##_name)
57 48
49static int store_flag(struct net_bridge_port *p, unsigned long v,
50 unsigned long mask)
51{
52 unsigned long flags;
53
54 flags = p->flags;
55
56 if (v)
57 flags |= mask;
58 else
59 flags &= ~mask;
60
61 if (flags != p->flags) {
62 p->flags = flags;
63 br_port_flags_change(p, mask);
64 br_ifinfo_notify(RTM_NEWLINK, p);
65 }
66 return 0;
67}
58 68
59static ssize_t show_path_cost(struct net_bridge_port *p, char *buf) 69static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
60{ 70{
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 5fee2feaf292..2b2774fe0703 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -60,7 +60,7 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
60 * that ever changes this code will allow tagged 60 * that ever changes this code will allow tagged
61 * traffic to enter the bridge. 61 * traffic to enter the bridge.
62 */ 62 */
63 err = vlan_vid_add(dev, htons(ETH_P_8021Q), vid); 63 err = vlan_vid_add(dev, br->vlan_proto, vid);
64 if (err) 64 if (err)
65 return err; 65 return err;
66 } 66 }
@@ -80,7 +80,7 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
80 80
81out_filt: 81out_filt:
82 if (p) 82 if (p)
83 vlan_vid_del(dev, htons(ETH_P_8021Q), vid); 83 vlan_vid_del(dev, br->vlan_proto, vid);
84 return err; 84 return err;
85} 85}
86 86
@@ -92,8 +92,10 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
92 __vlan_delete_pvid(v, vid); 92 __vlan_delete_pvid(v, vid);
93 clear_bit(vid, v->untagged_bitmap); 93 clear_bit(vid, v->untagged_bitmap);
94 94
95 if (v->port_idx) 95 if (v->port_idx) {
96 vlan_vid_del(v->parent.port->dev, htons(ETH_P_8021Q), vid); 96 struct net_bridge_port *p = v->parent.port;
97 vlan_vid_del(p->dev, p->br->vlan_proto, vid);
98 }
97 99
98 clear_bit(vid, v->vlan_bitmap); 100 clear_bit(vid, v->vlan_bitmap);
99 v->num_vlans--; 101 v->num_vlans--;
@@ -158,7 +160,8 @@ out:
158bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, 160bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
159 struct sk_buff *skb, u16 *vid) 161 struct sk_buff *skb, u16 *vid)
160{ 162{
161 int err; 163 bool tagged;
164 __be16 proto;
162 165
163 /* If VLAN filtering is disabled on the bridge, all packets are 166 /* If VLAN filtering is disabled on the bridge, all packets are
164 * permitted. 167 * permitted.
@@ -172,19 +175,41 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
172 if (!v) 175 if (!v)
173 goto drop; 176 goto drop;
174 177
178 proto = br->vlan_proto;
179
175 /* If vlan tx offload is disabled on bridge device and frame was 180 /* If vlan tx offload is disabled on bridge device and frame was
176 * sent from vlan device on the bridge device, it does not have 181 * sent from vlan device on the bridge device, it does not have
177 * HW accelerated vlan tag. 182 * HW accelerated vlan tag.
178 */ 183 */
179 if (unlikely(!vlan_tx_tag_present(skb) && 184 if (unlikely(!vlan_tx_tag_present(skb) &&
180 (skb->protocol == htons(ETH_P_8021Q) || 185 skb->protocol == proto)) {
181 skb->protocol == htons(ETH_P_8021AD)))) {
182 skb = vlan_untag(skb); 186 skb = vlan_untag(skb);
183 if (unlikely(!skb)) 187 if (unlikely(!skb))
184 return false; 188 return false;
185 } 189 }
186 190
187 err = br_vlan_get_tag(skb, vid); 191 if (!br_vlan_get_tag(skb, vid)) {
192 /* Tagged frame */
193 if (skb->vlan_proto != proto) {
194 /* Protocol-mismatch, empty out vlan_tci for new tag */
195 skb_push(skb, ETH_HLEN);
196 skb = __vlan_put_tag(skb, skb->vlan_proto,
197 vlan_tx_tag_get(skb));
198 if (unlikely(!skb))
199 return false;
200
201 skb_pull(skb, ETH_HLEN);
202 skb_reset_mac_len(skb);
203 *vid = 0;
204 tagged = false;
205 } else {
206 tagged = true;
207 }
208 } else {
209 /* Untagged frame */
210 tagged = false;
211 }
212
188 if (!*vid) { 213 if (!*vid) {
189 u16 pvid = br_get_pvid(v); 214 u16 pvid = br_get_pvid(v);
190 215
@@ -199,9 +224,9 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
199 * ingress frame is considered to belong to this vlan. 224 * ingress frame is considered to belong to this vlan.
200 */ 225 */
201 *vid = pvid; 226 *vid = pvid;
202 if (likely(err)) 227 if (likely(!tagged))
203 /* Untagged Frame. */ 228 /* Untagged Frame. */
204 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid); 229 __vlan_hwaccel_put_tag(skb, proto, pvid);
205 else 230 else
206 /* Priority-tagged Frame. 231 /* Priority-tagged Frame.
207 * At this point, We know that skb->vlan_tci had 232 * At this point, We know that skb->vlan_tci had
@@ -254,7 +279,9 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
254 if (!v) 279 if (!v)
255 return false; 280 return false;
256 281
257 br_vlan_get_tag(skb, vid); 282 if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
283 *vid = 0;
284
258 if (!*vid) { 285 if (!*vid) {
259 *vid = br_get_pvid(v); 286 *vid = br_get_pvid(v);
260 if (*vid == VLAN_N_VID) 287 if (*vid == VLAN_N_VID)
@@ -351,6 +378,33 @@ out:
351 return found; 378 return found;
352} 379}
353 380
381/* Must be protected by RTNL. */
382static void recalculate_group_addr(struct net_bridge *br)
383{
384 if (br->group_addr_set)
385 return;
386
387 spin_lock_bh(&br->lock);
388 if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
389 /* Bridge Group Address */
390 br->group_addr[5] = 0x00;
391 } else { /* vlan_enabled && ETH_P_8021AD */
392 /* Provider Bridge Group Address */
393 br->group_addr[5] = 0x08;
394 }
395 spin_unlock_bh(&br->lock);
396}
397
398/* Must be protected by RTNL. */
399void br_recalculate_fwd_mask(struct net_bridge *br)
400{
401 if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
402 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
403 else /* vlan_enabled && ETH_P_8021AD */
404 br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
405 ~(1u << br->group_addr[5]);
406}
407
354int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) 408int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
355{ 409{
356 if (!rtnl_trylock()) 410 if (!rtnl_trylock())
@@ -360,12 +414,88 @@ int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
360 goto unlock; 414 goto unlock;
361 415
362 br->vlan_enabled = val; 416 br->vlan_enabled = val;
417 br_manage_promisc(br);
418 recalculate_group_addr(br);
419 br_recalculate_fwd_mask(br);
363 420
364unlock: 421unlock:
365 rtnl_unlock(); 422 rtnl_unlock();
366 return 0; 423 return 0;
367} 424}
368 425
426int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
427{
428 int err = 0;
429 struct net_bridge_port *p;
430 struct net_port_vlans *pv;
431 __be16 proto, oldproto;
432 u16 vid, errvid;
433
434 if (val != ETH_P_8021Q && val != ETH_P_8021AD)
435 return -EPROTONOSUPPORT;
436
437 if (!rtnl_trylock())
438 return restart_syscall();
439
440 proto = htons(val);
441 if (br->vlan_proto == proto)
442 goto unlock;
443
444 /* Add VLANs for the new proto to the device filter. */
445 list_for_each_entry(p, &br->port_list, list) {
446 pv = rtnl_dereference(p->vlan_info);
447 if (!pv)
448 continue;
449
450 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
451 err = vlan_vid_add(p->dev, proto, vid);
452 if (err)
453 goto err_filt;
454 }
455 }
456
457 oldproto = br->vlan_proto;
458 br->vlan_proto = proto;
459
460 recalculate_group_addr(br);
461 br_recalculate_fwd_mask(br);
462
463 /* Delete VLANs for the old proto from the device filter. */
464 list_for_each_entry(p, &br->port_list, list) {
465 pv = rtnl_dereference(p->vlan_info);
466 if (!pv)
467 continue;
468
469 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
470 vlan_vid_del(p->dev, oldproto, vid);
471 }
472
473unlock:
474 rtnl_unlock();
475 return err;
476
477err_filt:
478 errvid = vid;
479 for_each_set_bit(vid, pv->vlan_bitmap, errvid)
480 vlan_vid_del(p->dev, proto, vid);
481
482 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
483 pv = rtnl_dereference(p->vlan_info);
484 if (!pv)
485 continue;
486
487 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
488 vlan_vid_del(p->dev, proto, vid);
489 }
490
491 goto unlock;
492}
493
494void br_vlan_init(struct net_bridge *br)
495{
496 br->vlan_proto = htons(ETH_P_8021Q);
497}
498
369/* Must be protected by RTNL. 499/* Must be protected by RTNL.
370 * Must be called with vid in range from 1 to 4094 inclusive. 500 * Must be called with vid in range from 1 to 4094 inclusive.
371 */ 501 */
@@ -432,7 +562,7 @@ void nbp_vlan_flush(struct net_bridge_port *port)
432 return; 562 return;
433 563
434 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) 564 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
435 vlan_vid_del(port->dev, htons(ETH_P_8021Q), vid); 565 vlan_vid_del(port->dev, port->br->vlan_proto, vid);
436 566
437 __vlan_flush(pv); 567 __vlan_flush(pv);
438} 568}
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 5ca74a0e595f..629dc77874a9 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -2,14 +2,23 @@
2# Bridge netfilter configuration 2# Bridge netfilter configuration
3# 3#
4# 4#
5config NF_TABLES_BRIDGE 5menuconfig NF_TABLES_BRIDGE
6 depends on NF_TABLES 6 depends on BRIDGE && NETFILTER && NF_TABLES
7 tristate "Ethernet Bridge nf_tables support" 7 tristate "Ethernet Bridge nf_tables support"
8 8
9if NF_TABLES_BRIDGE
10
11config NFT_BRIDGE_META
12 tristate "Netfilter nf_table bridge meta support"
13 depends on NFT_META
14 help
15 Add support for bridge dedicated meta key.
16
17endif # NF_TABLES_BRIDGE
18
9menuconfig BRIDGE_NF_EBTABLES 19menuconfig BRIDGE_NF_EBTABLES
10 tristate "Ethernet Bridge tables (ebtables) support" 20 tristate "Ethernet Bridge tables (ebtables) support"
11 depends on BRIDGE && NETFILTER 21 depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
12 select NETFILTER_XTABLES
13 help 22 help
14 ebtables is a general, extensible frame/packet identification 23 ebtables is a general, extensible frame/packet identification
15 framework. Say 'Y' or 'M' here if you want to do Ethernet 24 framework. Say 'Y' or 'M' here if you want to do Ethernet
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index ea7629f58b3d..6f2f3943d66f 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o 5obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o
6obj-$(CONFIG_NFT_BRIDGE_META) += nft_meta_bridge.o
6 7
7obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o 8obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
8 9
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
new file mode 100644
index 000000000000..4f02109d708f
--- /dev/null
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -0,0 +1,139 @@
1/*
2 * Copyright (c) 2014 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/netlink.h>
14#include <linux/netfilter.h>
15#include <linux/netfilter/nf_tables.h>
16#include <net/netfilter/nf_tables.h>
17#include <net/netfilter/nft_meta.h>
18
19#include "../br_private.h"
20
21static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
22 struct nft_data data[NFT_REG_MAX + 1],
23 const struct nft_pktinfo *pkt)
24{
25 const struct nft_meta *priv = nft_expr_priv(expr);
26 const struct net_device *in = pkt->in, *out = pkt->out;
27 struct nft_data *dest = &data[priv->dreg];
28 const struct net_bridge_port *p;
29
30 switch (priv->key) {
31 case NFT_META_BRI_IIFNAME:
32 if (in == NULL || (p = br_port_get_rcu(in)) == NULL)
33 goto err;
34 break;
35 case NFT_META_BRI_OIFNAME:
36 if (out == NULL || (p = br_port_get_rcu(out)) == NULL)
37 goto err;
38 break;
39 default:
40 goto out;
41 }
42
43 strncpy((char *)dest->data, p->br->dev->name, sizeof(dest->data));
44 return;
45out:
46 return nft_meta_get_eval(expr, data, pkt);
47err:
48 data[NFT_REG_VERDICT].verdict = NFT_BREAK;
49}
50
51static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
52 const struct nft_expr *expr,
53 const struct nlattr * const tb[])
54{
55 struct nft_meta *priv = nft_expr_priv(expr);
56 int err;
57
58 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
59 switch (priv->key) {
60 case NFT_META_BRI_IIFNAME:
61 case NFT_META_BRI_OIFNAME:
62 break;
63 default:
64 return nft_meta_get_init(ctx, expr, tb);
65 }
66
67 priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
68 err = nft_validate_output_register(priv->dreg);
69 if (err < 0)
70 return err;
71
72 err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
73 if (err < 0)
74 return err;
75
76 return 0;
77}
78
79static struct nft_expr_type nft_meta_bridge_type;
80static const struct nft_expr_ops nft_meta_bridge_get_ops = {
81 .type = &nft_meta_bridge_type,
82 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
83 .eval = nft_meta_bridge_get_eval,
84 .init = nft_meta_bridge_get_init,
85 .dump = nft_meta_get_dump,
86};
87
88static const struct nft_expr_ops nft_meta_bridge_set_ops = {
89 .type = &nft_meta_bridge_type,
90 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
91 .eval = nft_meta_set_eval,
92 .init = nft_meta_set_init,
93 .dump = nft_meta_set_dump,
94};
95
96static const struct nft_expr_ops *
97nft_meta_bridge_select_ops(const struct nft_ctx *ctx,
98 const struct nlattr * const tb[])
99{
100 if (tb[NFTA_META_KEY] == NULL)
101 return ERR_PTR(-EINVAL);
102
103 if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
104 return ERR_PTR(-EINVAL);
105
106 if (tb[NFTA_META_DREG])
107 return &nft_meta_bridge_get_ops;
108
109 if (tb[NFTA_META_SREG])
110 return &nft_meta_bridge_set_ops;
111
112 return ERR_PTR(-EINVAL);
113}
114
115static struct nft_expr_type nft_meta_bridge_type __read_mostly = {
116 .family = NFPROTO_BRIDGE,
117 .name = "meta",
118 .select_ops = &nft_meta_bridge_select_ops,
119 .policy = nft_meta_policy,
120 .maxattr = NFTA_META_MAX,
121 .owner = THIS_MODULE,
122};
123
124static int __init nft_meta_bridge_module_init(void)
125{
126 return nft_register_expr(&nft_meta_bridge_type);
127}
128
129static void __exit nft_meta_bridge_module_exit(void)
130{
131 nft_unregister_expr(&nft_meta_bridge_type);
132}
133
134module_init(nft_meta_bridge_module_init);
135module_exit(nft_meta_bridge_module_exit);
136
137MODULE_LICENSE("GPL");
138MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
139MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "meta");
diff --git a/net/can/af_can.c b/net/can/af_can.c
index a27f8aad9e99..ce82337521f6 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -338,6 +338,29 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
338} 338}
339 339
340/** 340/**
341 * effhash - hash function for 29 bit CAN identifier reduction
342 * @can_id: 29 bit CAN identifier
343 *
344 * Description:
345 * To reduce the linear traversal in one linked list of _single_ EFF CAN
346 * frame subscriptions the 29 bit identifier is mapped to 10 bits.
347 * (see CAN_EFF_RCV_HASH_BITS definition)
348 *
349 * Return:
350 * Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask )
351 */
352static unsigned int effhash(canid_t can_id)
353{
354 unsigned int hash;
355
356 hash = can_id;
357 hash ^= can_id >> CAN_EFF_RCV_HASH_BITS;
358 hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS);
359
360 return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1);
361}
362
363/**
341 * find_rcv_list - determine optimal filterlist inside device filter struct 364 * find_rcv_list - determine optimal filterlist inside device filter struct
342 * @can_id: pointer to CAN identifier of a given can_filter 365 * @can_id: pointer to CAN identifier of a given can_filter
343 * @mask: pointer to CAN mask of a given can_filter 366 * @mask: pointer to CAN mask of a given can_filter
@@ -400,10 +423,8 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
400 !(*can_id & CAN_RTR_FLAG)) { 423 !(*can_id & CAN_RTR_FLAG)) {
401 424
402 if (*can_id & CAN_EFF_FLAG) { 425 if (*can_id & CAN_EFF_FLAG) {
403 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) { 426 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS))
404 /* RFC: a future use-case for hash-tables? */ 427 return &d->rx_eff[effhash(*can_id)];
405 return &d->rx[RX_EFF];
406 }
407 } else { 428 } else {
408 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS)) 429 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
409 return &d->rx_sff[*can_id]; 430 return &d->rx_sff[*can_id];
@@ -632,7 +653,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
632 return matches; 653 return matches;
633 654
634 if (can_id & CAN_EFF_FLAG) { 655 if (can_id & CAN_EFF_FLAG) {
635 hlist_for_each_entry_rcu(r, &d->rx[RX_EFF], list) { 656 hlist_for_each_entry_rcu(r, &d->rx_eff[effhash(can_id)], list) {
636 if (r->can_id == can_id) { 657 if (r->can_id == can_id) {
637 deliver(skb, r); 658 deliver(skb, r);
638 matches++; 659 matches++;
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 6de58b40535c..fca0fe9fc45a 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -59,12 +59,17 @@ struct receiver {
59 char *ident; 59 char *ident;
60}; 60};
61 61
62enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX }; 62#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
63#define CAN_EFF_RCV_HASH_BITS 10
64#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS)
65
66enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX };
63 67
64/* per device receive filters linked at dev->ml_priv */ 68/* per device receive filters linked at dev->ml_priv */
65struct dev_rcv_lists { 69struct dev_rcv_lists {
66 struct hlist_head rx[RX_MAX]; 70 struct hlist_head rx[RX_MAX];
67 struct hlist_head rx_sff[0x800]; 71 struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ];
72 struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ];
68 int remove_on_zero_entries; 73 int remove_on_zero_entries;
69 int entries; 74 int entries;
70}; 75};
diff --git a/net/can/proc.c b/net/can/proc.c
index b543470c8f8b..1a19b985a868 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -80,7 +80,6 @@ static const char rx_list_name[][8] = {
80 [RX_ALL] = "rx_all", 80 [RX_ALL] = "rx_all",
81 [RX_FIL] = "rx_fil", 81 [RX_FIL] = "rx_fil",
82 [RX_INV] = "rx_inv", 82 [RX_INV] = "rx_inv",
83 [RX_EFF] = "rx_eff",
84}; 83};
85 84
86/* 85/*
@@ -389,25 +388,26 @@ static const struct file_operations can_rcvlist_proc_fops = {
389 .release = single_release, 388 .release = single_release,
390}; 389};
391 390
392static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m, 391static inline void can_rcvlist_proc_show_array(struct seq_file *m,
393 struct net_device *dev, 392 struct net_device *dev,
394 struct dev_rcv_lists *d) 393 struct hlist_head *rcv_array,
394 unsigned int rcv_array_sz)
395{ 395{
396 int i; 396 unsigned int i;
397 int all_empty = 1; 397 int all_empty = 1;
398 398
399 /* check whether at least one list is non-empty */ 399 /* check whether at least one list is non-empty */
400 for (i = 0; i < 0x800; i++) 400 for (i = 0; i < rcv_array_sz; i++)
401 if (!hlist_empty(&d->rx_sff[i])) { 401 if (!hlist_empty(&rcv_array[i])) {
402 all_empty = 0; 402 all_empty = 0;
403 break; 403 break;
404 } 404 }
405 405
406 if (!all_empty) { 406 if (!all_empty) {
407 can_print_recv_banner(m); 407 can_print_recv_banner(m);
408 for (i = 0; i < 0x800; i++) { 408 for (i = 0; i < rcv_array_sz; i++) {
409 if (!hlist_empty(&d->rx_sff[i])) 409 if (!hlist_empty(&rcv_array[i]))
410 can_print_rcvlist(m, &d->rx_sff[i], dev); 410 can_print_rcvlist(m, &rcv_array[i], dev);
411 } 411 }
412 } else 412 } else
413 seq_printf(m, " (%s: no entry)\n", DNAME(dev)); 413 seq_printf(m, " (%s: no entry)\n", DNAME(dev));
@@ -425,12 +425,15 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
425 425
426 /* sff receive list for 'all' CAN devices (dev == NULL) */ 426 /* sff receive list for 'all' CAN devices (dev == NULL) */
427 d = &can_rx_alldev_list; 427 d = &can_rx_alldev_list;
428 can_rcvlist_sff_proc_show_one(m, NULL, d); 428 can_rcvlist_proc_show_array(m, NULL, d->rx_sff, ARRAY_SIZE(d->rx_sff));
429 429
430 /* sff receive list for registered CAN devices */ 430 /* sff receive list for registered CAN devices */
431 for_each_netdev_rcu(&init_net, dev) { 431 for_each_netdev_rcu(&init_net, dev) {
432 if (dev->type == ARPHRD_CAN && dev->ml_priv) 432 if (dev->type == ARPHRD_CAN && dev->ml_priv) {
433 can_rcvlist_sff_proc_show_one(m, dev, dev->ml_priv); 433 d = dev->ml_priv;
434 can_rcvlist_proc_show_array(m, dev, d->rx_sff,
435 ARRAY_SIZE(d->rx_sff));
436 }
434 } 437 }
435 438
436 rcu_read_unlock(); 439 rcu_read_unlock();
@@ -452,6 +455,49 @@ static const struct file_operations can_rcvlist_sff_proc_fops = {
452 .release = single_release, 455 .release = single_release,
453}; 456};
454 457
458
459static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
460{
461 struct net_device *dev;
462 struct dev_rcv_lists *d;
463
464 /* RX_EFF */
465 seq_puts(m, "\nreceive list 'rx_eff':\n");
466
467 rcu_read_lock();
468
469 /* eff receive list for 'all' CAN devices (dev == NULL) */
470 d = &can_rx_alldev_list;
471 can_rcvlist_proc_show_array(m, NULL, d->rx_eff, ARRAY_SIZE(d->rx_eff));
472
473 /* eff receive list for registered CAN devices */
474 for_each_netdev_rcu(&init_net, dev) {
475 if (dev->type == ARPHRD_CAN && dev->ml_priv) {
476 d = dev->ml_priv;
477 can_rcvlist_proc_show_array(m, dev, d->rx_eff,
478 ARRAY_SIZE(d->rx_eff));
479 }
480 }
481
482 rcu_read_unlock();
483
484 seq_putc(m, '\n');
485 return 0;
486}
487
488static int can_rcvlist_eff_proc_open(struct inode *inode, struct file *file)
489{
490 return single_open(file, can_rcvlist_eff_proc_show, NULL);
491}
492
493static const struct file_operations can_rcvlist_eff_proc_fops = {
494 .owner = THIS_MODULE,
495 .open = can_rcvlist_eff_proc_open,
496 .read = seq_read,
497 .llseek = seq_lseek,
498 .release = single_release,
499};
500
455/* 501/*
456 * proc utility functions 502 * proc utility functions
457 */ 503 */
@@ -491,8 +537,8 @@ void can_init_proc(void)
491 &can_rcvlist_proc_fops, (void *)RX_FIL); 537 &can_rcvlist_proc_fops, (void *)RX_FIL);
492 pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, can_dir, 538 pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, can_dir,
493 &can_rcvlist_proc_fops, (void *)RX_INV); 539 &can_rcvlist_proc_fops, (void *)RX_INV);
494 pde_rcvlist_eff = proc_create_data(CAN_PROC_RCVLIST_EFF, 0644, can_dir, 540 pde_rcvlist_eff = proc_create(CAN_PROC_RCVLIST_EFF, 0644, can_dir,
495 &can_rcvlist_proc_fops, (void *)RX_EFF); 541 &can_rcvlist_eff_proc_fops);
496 pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, can_dir, 542 pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, can_dir,
497 &can_rcvlist_sff_proc_fops); 543 &can_rcvlist_sff_proc_fops);
498} 544}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index b0dfce77656a..05be0c181695 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2491,7 +2491,7 @@ EXPORT_SYMBOL(ceph_osdc_sync);
2491 * Call all pending notify callbacks - for use after a watch is 2491 * Call all pending notify callbacks - for use after a watch is
2492 * unregistered, to make sure no more callbacks for it will be invoked 2492 * unregistered, to make sure no more callbacks for it will be invoked
2493 */ 2493 */
2494extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc) 2494void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
2495{ 2495{
2496 flush_workqueue(osdc->notify_wq); 2496 flush_workqueue(osdc->notify_wq);
2497} 2497}
diff --git a/net/core/Makefile b/net/core/Makefile
index 826b925aa453..71093d94ad2b 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
9 9
10obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ 10obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ 11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
12 sock_diag.o dev_ioctl.o 12 sock_diag.o dev_ioctl.o tso.o
13 13
14obj-$(CONFIG_XFRM) += flow.o 14obj-$(CONFIG_XFRM) += flow.o
15obj-y += net-sysfs.o 15obj-y += net-sysfs.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index a16ed7bbe376..6b1c04ca1d50 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -739,11 +739,15 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
739 __sum16 sum; 739 __sum16 sum;
740 740
741 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 741 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
742 if (likely(!sum)) { 742 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !sum &&
743 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) 743 !skb->csum_complete_sw)
744 netdev_rx_csum_fault(skb->dev); 744 netdev_rx_csum_fault(skb->dev);
745 skb->ip_summed = CHECKSUM_UNNECESSARY; 745
746 } 746 /* Save checksum complete for later use */
747 skb->csum = sum;
748 skb->ip_summed = CHECKSUM_COMPLETE;
749 skb->csum_complete_sw = 1;
750
747 return sum; 751 return sum;
748} 752}
749EXPORT_SYMBOL(__skb_checksum_complete_head); 753EXPORT_SYMBOL(__skb_checksum_complete_head);
diff --git a/net/core/dev.c b/net/core/dev.c
index 8908a68db449..30eedf677913 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1661,6 +1661,29 @@ bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1661} 1661}
1662EXPORT_SYMBOL_GPL(is_skb_forwardable); 1662EXPORT_SYMBOL_GPL(is_skb_forwardable);
1663 1663
1664int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1665{
1666 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1667 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1668 atomic_long_inc(&dev->rx_dropped);
1669 kfree_skb(skb);
1670 return NET_RX_DROP;
1671 }
1672 }
1673
1674 if (unlikely(!is_skb_forwardable(dev, skb))) {
1675 atomic_long_inc(&dev->rx_dropped);
1676 kfree_skb(skb);
1677 return NET_RX_DROP;
1678 }
1679
1680 skb_scrub_packet(skb, true);
1681 skb->protocol = eth_type_trans(skb, dev);
1682
1683 return 0;
1684}
1685EXPORT_SYMBOL_GPL(__dev_forward_skb);
1686
1664/** 1687/**
1665 * dev_forward_skb - loopback an skb to another netif 1688 * dev_forward_skb - loopback an skb to another netif
1666 * 1689 *
@@ -1681,24 +1704,7 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
1681 */ 1704 */
1682int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1705int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1683{ 1706{
1684 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 1707 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1685 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1686 atomic_long_inc(&dev->rx_dropped);
1687 kfree_skb(skb);
1688 return NET_RX_DROP;
1689 }
1690 }
1691
1692 if (unlikely(!is_skb_forwardable(dev, skb))) {
1693 atomic_long_inc(&dev->rx_dropped);
1694 kfree_skb(skb);
1695 return NET_RX_DROP;
1696 }
1697
1698 skb_scrub_packet(skb, true);
1699 skb->protocol = eth_type_trans(skb, dev);
1700
1701 return netif_rx_internal(skb);
1702} 1708}
1703EXPORT_SYMBOL_GPL(dev_forward_skb); 1709EXPORT_SYMBOL_GPL(dev_forward_skb);
1704 1710
@@ -2507,13 +2513,39 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2507 return 0; 2513 return 0;
2508} 2514}
2509 2515
2516/* If MPLS offload request, verify we are testing hardware MPLS features
2517 * instead of standard features for the netdev.
2518 */
2519#ifdef CONFIG_NET_MPLS_GSO
2520static netdev_features_t net_mpls_features(struct sk_buff *skb,
2521 netdev_features_t features,
2522 __be16 type)
2523{
2524 if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC))
2525 features &= skb->dev->mpls_features;
2526
2527 return features;
2528}
2529#else
2530static netdev_features_t net_mpls_features(struct sk_buff *skb,
2531 netdev_features_t features,
2532 __be16 type)
2533{
2534 return features;
2535}
2536#endif
2537
2510static netdev_features_t harmonize_features(struct sk_buff *skb, 2538static netdev_features_t harmonize_features(struct sk_buff *skb,
2511 netdev_features_t features) 2539 netdev_features_t features)
2512{ 2540{
2513 int tmp; 2541 int tmp;
2542 __be16 type;
2543
2544 type = skb_network_protocol(skb, &tmp);
2545 features = net_mpls_features(skb, features, type);
2514 2546
2515 if (skb->ip_summed != CHECKSUM_NONE && 2547 if (skb->ip_summed != CHECKSUM_NONE &&
2516 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) { 2548 !can_checksum_protocol(features, type)) {
2517 features &= ~NETIF_F_ALL_CSUM; 2549 features &= ~NETIF_F_ALL_CSUM;
2518 } else if (illegal_highdma(skb->dev, skb)) { 2550 } else if (illegal_highdma(skb->dev, skb)) {
2519 features &= ~NETIF_F_SG; 2551 features &= ~NETIF_F_SG;
@@ -5689,10 +5721,6 @@ static void rollback_registered_many(struct list_head *head)
5689 */ 5721 */
5690 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5722 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5691 5723
5692 if (!dev->rtnl_link_ops ||
5693 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5694 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
5695
5696 /* 5724 /*
5697 * Flush the unicast and multicast chains 5725 * Flush the unicast and multicast chains
5698 */ 5726 */
@@ -5702,6 +5730,10 @@ static void rollback_registered_many(struct list_head *head)
5702 if (dev->netdev_ops->ndo_uninit) 5730 if (dev->netdev_ops->ndo_uninit)
5703 dev->netdev_ops->ndo_uninit(dev); 5731 dev->netdev_ops->ndo_uninit(dev);
5704 5732
5733 if (!dev->rtnl_link_ops ||
5734 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5735 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
5736
5705 /* Notifier chain MUST detach us all upper devices. */ 5737 /* Notifier chain MUST detach us all upper devices. */
5706 WARN_ON(netdev_has_any_upper_dev(dev)); 5738 WARN_ON(netdev_has_any_upper_dev(dev));
5707 5739
@@ -5927,10 +5959,7 @@ static void netdev_init_one_queue(struct net_device *dev,
5927 5959
5928static void netif_free_tx_queues(struct net_device *dev) 5960static void netif_free_tx_queues(struct net_device *dev)
5929{ 5961{
5930 if (is_vmalloc_addr(dev->_tx)) 5962 kvfree(dev->_tx);
5931 vfree(dev->_tx);
5932 else
5933 kfree(dev->_tx);
5934} 5963}
5935 5964
5936static int netif_alloc_netdev_queues(struct net_device *dev) 5965static int netif_alloc_netdev_queues(struct net_device *dev)
@@ -6404,10 +6433,7 @@ void netdev_freemem(struct net_device *dev)
6404{ 6433{
6405 char *addr = (char *)dev - dev->padded; 6434 char *addr = (char *)dev - dev->padded;
6406 6435
6407 if (is_vmalloc_addr(addr)) 6436 kvfree(addr);
6408 vfree(addr);
6409 else
6410 kfree(addr);
6411} 6437}
6412 6438
6413/** 6439/**
@@ -6512,11 +6538,6 @@ free_all:
6512 6538
6513free_pcpu: 6539free_pcpu:
6514 free_percpu(dev->pcpu_refcnt); 6540 free_percpu(dev->pcpu_refcnt);
6515 netif_free_tx_queues(dev);
6516#ifdef CONFIG_SYSFS
6517 kfree(dev->_rx);
6518#endif
6519
6520free_dev: 6541free_dev:
6521 netdev_freemem(dev); 6542 netdev_freemem(dev);
6522 return NULL; 6543 return NULL;
@@ -6613,6 +6634,9 @@ EXPORT_SYMBOL(unregister_netdevice_queue);
6613/** 6634/**
6614 * unregister_netdevice_many - unregister many devices 6635 * unregister_netdevice_many - unregister many devices
6615 * @head: list of devices 6636 * @head: list of devices
6637 *
6638 * Note: As most callers use a stack allocated list_head,
6639 * we force a list_del() to make sure stack wont be corrupted later.
6616 */ 6640 */
6617void unregister_netdevice_many(struct list_head *head) 6641void unregister_netdevice_many(struct list_head *head)
6618{ 6642{
@@ -6622,6 +6646,7 @@ void unregister_netdevice_many(struct list_head *head)
6622 rollback_registered_many(head); 6646 rollback_registered_many(head);
6623 list_for_each_entry(dev, head, unreg_list) 6647 list_for_each_entry(dev, head, unreg_list)
6624 net_set_todo(dev); 6648 net_set_todo(dev);
6649 list_del(head);
6625 } 6650 }
6626} 6651}
6627EXPORT_SYMBOL(unregister_netdevice_many); 6652EXPORT_SYMBOL(unregister_netdevice_many);
@@ -7077,7 +7102,6 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
7077 } 7102 }
7078 } 7103 }
7079 unregister_netdevice_many(&dev_kill_list); 7104 unregister_netdevice_many(&dev_kill_list);
7080 list_del(&dev_kill_list);
7081 rtnl_unlock(); 7105 rtnl_unlock();
7082} 7106}
7083 7107
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 329d5794e7dc..b6b230600b97 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -225,6 +225,91 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
225} 225}
226EXPORT_SYMBOL(__hw_addr_unsync); 226EXPORT_SYMBOL(__hw_addr_unsync);
227 227
228/**
229 * __hw_addr_sync_dev - Synchonize device's multicast list
230 * @list: address list to syncronize
231 * @dev: device to sync
232 * @sync: function to call if address should be added
233 * @unsync: function to call if address should be removed
234 *
235 * This funciton is intended to be called from the ndo_set_rx_mode
236 * function of devices that require explicit address add/remove
237 * notifications. The unsync function may be NULL in which case
238 * the addresses requiring removal will simply be removed without
239 * any notification to the device.
240 **/
241int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
242 struct net_device *dev,
243 int (*sync)(struct net_device *, const unsigned char *),
244 int (*unsync)(struct net_device *,
245 const unsigned char *))
246{
247 struct netdev_hw_addr *ha, *tmp;
248 int err;
249
250 /* first go through and flush out any stale entries */
251 list_for_each_entry_safe(ha, tmp, &list->list, list) {
252 if (!ha->sync_cnt || ha->refcount != 1)
253 continue;
254
255 /* if unsync is defined and fails defer unsyncing address */
256 if (unsync && unsync(dev, ha->addr))
257 continue;
258
259 ha->sync_cnt--;
260 __hw_addr_del_entry(list, ha, false, false);
261 }
262
263 /* go through and sync new entries to the list */
264 list_for_each_entry_safe(ha, tmp, &list->list, list) {
265 if (ha->sync_cnt)
266 continue;
267
268 err = sync(dev, ha->addr);
269 if (err)
270 return err;
271
272 ha->sync_cnt++;
273 ha->refcount++;
274 }
275
276 return 0;
277}
278EXPORT_SYMBOL(__hw_addr_sync_dev);
279
280/**
281 * __hw_addr_unsync_dev - Remove synchonized addresses from device
282 * @list: address list to remove syncronized addresses from
283 * @dev: device to sync
284 * @unsync: function to call if address should be removed
285 *
286 * Remove all addresses that were added to the device by __hw_addr_sync_dev().
287 * This function is intended to be called from the ndo_stop or ndo_open
288 * functions on devices that require explicit address add/remove
289 * notifications. If the unsync function pointer is NULL then this function
290 * can be used to just reset the sync_cnt for the addresses in the list.
291 **/
292void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
293 struct net_device *dev,
294 int (*unsync)(struct net_device *,
295 const unsigned char *))
296{
297 struct netdev_hw_addr *ha, *tmp;
298
299 list_for_each_entry_safe(ha, tmp, &list->list, list) {
300 if (!ha->sync_cnt)
301 continue;
302
303 /* if unsync is defined and fails defer unsyncing address */
304 if (unsync && unsync(dev, ha->addr))
305 continue;
306
307 ha->sync_cnt--;
308 __hw_addr_del_entry(list, ha, false, false);
309 }
310}
311EXPORT_SYMBOL(__hw_addr_unsync_dev);
312
228static void __hw_addr_flush(struct netdev_hw_addr_list *list) 313static void __hw_addr_flush(struct netdev_hw_addr_list *list)
229{ 314{
230 struct netdev_hw_addr *ha, *tmp; 315 struct netdev_hw_addr *ha, *tmp;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 640ba0e5831c..17cb912793fa 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -557,6 +557,23 @@ err_out:
557 return ret; 557 return ret;
558} 558}
559 559
560static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr,
561 struct ethtool_rxnfc *rx_rings,
562 u32 size)
563{
564 int i;
565
566 if (copy_from_user(indir, useraddr, size * sizeof(indir[0])))
567 return -EFAULT;
568
569 /* Validate ring indices */
570 for (i = 0; i < size; i++)
571 if (indir[i] >= rx_rings->data)
572 return -EINVAL;
573
574 return 0;
575}
576
560static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, 577static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
561 void __user *useraddr) 578 void __user *useraddr)
562{ 579{
@@ -565,7 +582,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
565 int ret; 582 int ret;
566 583
567 if (!dev->ethtool_ops->get_rxfh_indir_size || 584 if (!dev->ethtool_ops->get_rxfh_indir_size ||
568 !dev->ethtool_ops->get_rxfh_indir) 585 !dev->ethtool_ops->get_rxfh)
569 return -EOPNOTSUPP; 586 return -EOPNOTSUPP;
570 dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev); 587 dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
571 if (dev_size == 0) 588 if (dev_size == 0)
@@ -591,7 +608,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
591 if (!indir) 608 if (!indir)
592 return -ENOMEM; 609 return -ENOMEM;
593 610
594 ret = dev->ethtool_ops->get_rxfh_indir(dev, indir); 611 ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL);
595 if (ret) 612 if (ret)
596 goto out; 613 goto out;
597 614
@@ -613,8 +630,9 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
613 u32 *indir; 630 u32 *indir;
614 const struct ethtool_ops *ops = dev->ethtool_ops; 631 const struct ethtool_ops *ops = dev->ethtool_ops;
615 int ret; 632 int ret;
633 u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]);
616 634
617 if (!ops->get_rxfh_indir_size || !ops->set_rxfh_indir || 635 if (!ops->get_rxfh_indir_size || !ops->set_rxfh ||
618 !ops->get_rxnfc) 636 !ops->get_rxnfc)
619 return -EOPNOTSUPP; 637 return -EOPNOTSUPP;
620 638
@@ -643,28 +661,184 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
643 for (i = 0; i < dev_size; i++) 661 for (i = 0; i < dev_size; i++)
644 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); 662 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
645 } else { 663 } else {
646 if (copy_from_user(indir, 664 ret = ethtool_copy_validate_indir(indir,
647 useraddr + 665 useraddr + ringidx_offset,
648 offsetof(struct ethtool_rxfh_indir, 666 &rx_rings,
649 ring_index[0]), 667 dev_size);
650 dev_size * sizeof(indir[0]))) { 668 if (ret)
669 goto out;
670 }
671
672 ret = ops->set_rxfh(dev, indir, NULL);
673
674out:
675 kfree(indir);
676 return ret;
677}
678
679static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
680 void __user *useraddr)
681{
682 int ret;
683 const struct ethtool_ops *ops = dev->ethtool_ops;
684 u32 user_indir_size, user_key_size;
685 u32 dev_indir_size = 0, dev_key_size = 0;
686 struct ethtool_rxfh rxfh;
687 u32 total_size;
688 u32 indir_bytes;
689 u32 *indir = NULL;
690 u8 *hkey = NULL;
691 u8 *rss_config;
692
693 if (!(dev->ethtool_ops->get_rxfh_indir_size ||
694 dev->ethtool_ops->get_rxfh_key_size) ||
695 !dev->ethtool_ops->get_rxfh)
696 return -EOPNOTSUPP;
697
698 if (ops->get_rxfh_indir_size)
699 dev_indir_size = ops->get_rxfh_indir_size(dev);
700 if (ops->get_rxfh_key_size)
701 dev_key_size = ops->get_rxfh_key_size(dev);
702
703 if ((dev_key_size + dev_indir_size) == 0)
704 return -EOPNOTSUPP;
705
706 if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
707 return -EFAULT;
708 user_indir_size = rxfh.indir_size;
709 user_key_size = rxfh.key_size;
710
711 /* Check that reserved fields are 0 for now */
712 if (rxfh.rss_context || rxfh.rsvd[0] || rxfh.rsvd[1])
713 return -EINVAL;
714
715 rxfh.indir_size = dev_indir_size;
716 rxfh.key_size = dev_key_size;
717 if (copy_to_user(useraddr, &rxfh, sizeof(rxfh)))
718 return -EFAULT;
719
720 /* If the user buffer size is 0, this is just a query for the
721 * device table size and key size. Otherwise, if the User size is
722 * not equal to device table size or key size it's an error.
723 */
724 if (!user_indir_size && !user_key_size)
725 return 0;
726
727 if ((user_indir_size && (user_indir_size != dev_indir_size)) ||
728 (user_key_size && (user_key_size != dev_key_size)))
729 return -EINVAL;
730
731 indir_bytes = user_indir_size * sizeof(indir[0]);
732 total_size = indir_bytes + user_key_size;
733 rss_config = kzalloc(total_size, GFP_USER);
734 if (!rss_config)
735 return -ENOMEM;
736
737 if (user_indir_size)
738 indir = (u32 *)rss_config;
739
740 if (user_key_size)
741 hkey = rss_config + indir_bytes;
742
743 ret = dev->ethtool_ops->get_rxfh(dev, indir, hkey);
744 if (!ret) {
745 if (copy_to_user(useraddr +
746 offsetof(struct ethtool_rxfh, rss_config[0]),
747 rss_config, total_size))
651 ret = -EFAULT; 748 ret = -EFAULT;
749 }
750
751 kfree(rss_config);
752
753 return ret;
754}
755
756static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
757 void __user *useraddr)
758{
759 int ret;
760 const struct ethtool_ops *ops = dev->ethtool_ops;
761 struct ethtool_rxnfc rx_rings;
762 struct ethtool_rxfh rxfh;
763 u32 dev_indir_size = 0, dev_key_size = 0, i;
764 u32 *indir = NULL, indir_bytes = 0;
765 u8 *hkey = NULL;
766 u8 *rss_config;
767 u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
768
769 if (!(ops->get_rxfh_indir_size || ops->get_rxfh_key_size) ||
770 !ops->get_rxnfc || !ops->set_rxfh)
771 return -EOPNOTSUPP;
772
773 if (ops->get_rxfh_indir_size)
774 dev_indir_size = ops->get_rxfh_indir_size(dev);
775 if (ops->get_rxfh_key_size)
776 dev_key_size = dev->ethtool_ops->get_rxfh_key_size(dev);
777 if ((dev_key_size + dev_indir_size) == 0)
778 return -EOPNOTSUPP;
779
780 if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
781 return -EFAULT;
782
783 /* Check that reserved fields are 0 for now */
784 if (rxfh.rss_context || rxfh.rsvd[0] || rxfh.rsvd[1])
785 return -EINVAL;
786
787 /* If either indir or hash key is valid, proceed further.
788 * It is not valid to request that both be unchanged.
789 */
790 if ((rxfh.indir_size &&
791 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE &&
792 rxfh.indir_size != dev_indir_size) ||
793 (rxfh.key_size && (rxfh.key_size != dev_key_size)) ||
794 (rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE &&
795 rxfh.key_size == 0))
796 return -EINVAL;
797
798 if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
799 indir_bytes = dev_indir_size * sizeof(indir[0]);
800
801 rss_config = kzalloc(indir_bytes + rxfh.key_size, GFP_USER);
802 if (!rss_config)
803 return -ENOMEM;
804
805 rx_rings.cmd = ETHTOOL_GRXRINGS;
806 ret = ops->get_rxnfc(dev, &rx_rings, NULL);
807 if (ret)
808 goto out;
809
810 /* rxfh.indir_size == 0 means reset the indir table to default.
811 * rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE means leave it unchanged.
812 */
813 if (rxfh.indir_size &&
814 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) {
815 indir = (u32 *)rss_config;
816 ret = ethtool_copy_validate_indir(indir,
817 useraddr + rss_cfg_offset,
818 &rx_rings,
819 rxfh.indir_size);
820 if (ret)
652 goto out; 821 goto out;
653 } 822 } else if (rxfh.indir_size == 0) {
823 indir = (u32 *)rss_config;
824 for (i = 0; i < dev_indir_size; i++)
825 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
826 }
654 827
655 /* Validate ring indices */ 828 if (rxfh.key_size) {
656 for (i = 0; i < dev_size; i++) { 829 hkey = rss_config + indir_bytes;
657 if (indir[i] >= rx_rings.data) { 830 if (copy_from_user(hkey,
658 ret = -EINVAL; 831 useraddr + rss_cfg_offset + indir_bytes,
659 goto out; 832 rxfh.key_size)) {
660 } 833 ret = -EFAULT;
834 goto out;
661 } 835 }
662 } 836 }
663 837
664 ret = ops->set_rxfh_indir(dev, indir); 838 ret = ops->set_rxfh(dev, indir, hkey);
665 839
666out: 840out:
667 kfree(indir); 841 kfree(rss_config);
668 return ret; 842 return ret;
669} 843}
670 844
@@ -1491,6 +1665,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1491 case ETHTOOL_GRXCLSRULE: 1665 case ETHTOOL_GRXCLSRULE:
1492 case ETHTOOL_GRXCLSRLALL: 1666 case ETHTOOL_GRXCLSRLALL:
1493 case ETHTOOL_GRXFHINDIR: 1667 case ETHTOOL_GRXFHINDIR:
1668 case ETHTOOL_GRSSH:
1494 case ETHTOOL_GFEATURES: 1669 case ETHTOOL_GFEATURES:
1495 case ETHTOOL_GCHANNELS: 1670 case ETHTOOL_GCHANNELS:
1496 case ETHTOOL_GET_TS_INFO: 1671 case ETHTOOL_GET_TS_INFO:
@@ -1628,6 +1803,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1628 case ETHTOOL_SRXFHINDIR: 1803 case ETHTOOL_SRXFHINDIR:
1629 rc = ethtool_set_rxfh_indir(dev, useraddr); 1804 rc = ethtool_set_rxfh_indir(dev, useraddr);
1630 break; 1805 break;
1806 case ETHTOOL_GRSSH:
1807 rc = ethtool_get_rxfh(dev, useraddr);
1808 break;
1809 case ETHTOOL_SRSSH:
1810 rc = ethtool_set_rxfh(dev, useraddr);
1811 break;
1631 case ETHTOOL_GFEATURES: 1812 case ETHTOOL_GFEATURES:
1632 rc = ethtool_get_features(dev, useraddr); 1813 rc = ethtool_get_features(dev, useraddr);
1633 break; 1814 break;
diff --git a/net/core/filter.c b/net/core/filter.c
index 4aec7b93f1a9..735fad897496 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -45,6 +45,27 @@
45#include <linux/seccomp.h> 45#include <linux/seccomp.h>
46#include <linux/if_vlan.h> 46#include <linux/if_vlan.h>
47 47
48/* Registers */
49#define BPF_R0 regs[BPF_REG_0]
50#define BPF_R1 regs[BPF_REG_1]
51#define BPF_R2 regs[BPF_REG_2]
52#define BPF_R3 regs[BPF_REG_3]
53#define BPF_R4 regs[BPF_REG_4]
54#define BPF_R5 regs[BPF_REG_5]
55#define BPF_R6 regs[BPF_REG_6]
56#define BPF_R7 regs[BPF_REG_7]
57#define BPF_R8 regs[BPF_REG_8]
58#define BPF_R9 regs[BPF_REG_9]
59#define BPF_R10 regs[BPF_REG_10]
60
61/* Named registers */
62#define DST regs[insn->dst_reg]
63#define SRC regs[insn->src_reg]
64#define FP regs[BPF_REG_FP]
65#define ARG1 regs[BPF_REG_ARG1]
66#define CTX regs[BPF_REG_CTX]
67#define IMM insn->imm
68
48/* No hurry in this branch 69/* No hurry in this branch
49 * 70 *
50 * Exported for the bpf jit load helper. 71 * Exported for the bpf jit load helper.
@@ -57,9 +78,9 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
57 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 78 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
58 else if (k >= SKF_LL_OFF) 79 else if (k >= SKF_LL_OFF)
59 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 80 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
60
61 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 81 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
62 return ptr; 82 return ptr;
83
63 return NULL; 84 return NULL;
64} 85}
65 86
@@ -68,6 +89,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
68{ 89{
69 if (k >= 0) 90 if (k >= 0)
70 return skb_header_pointer(skb, k, size, buffer); 91 return skb_header_pointer(skb, k, size, buffer);
92
71 return bpf_internal_load_pointer_neg_helper(skb, k, size); 93 return bpf_internal_load_pointer_neg_helper(skb, k, size);
72} 94}
73 95
@@ -122,13 +144,6 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
122 return 0; 144 return 0;
123} 145}
124 146
125/* Register mappings for user programs. */
126#define A_REG 0
127#define X_REG 7
128#define TMP_REG 8
129#define ARG2_REG 2
130#define ARG3_REG 3
131
132/** 147/**
133 * __sk_run_filter - run a filter on a given context 148 * __sk_run_filter - run a filter on a given context
134 * @ctx: buffer to run the filter on 149 * @ctx: buffer to run the filter on
@@ -138,447 +153,442 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
138 * keep, 0 for none. @ctx is the data we are operating on, @insn is the 153 * keep, 0 for none. @ctx is the data we are operating on, @insn is the
139 * array of filter instructions. 154 * array of filter instructions.
140 */ 155 */
141unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) 156static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
142{ 157{
143 u64 stack[MAX_BPF_STACK / sizeof(u64)]; 158 u64 stack[MAX_BPF_STACK / sizeof(u64)];
144 u64 regs[MAX_BPF_REG], tmp; 159 u64 regs[MAX_BPF_REG], tmp;
145 void *ptr;
146 int off;
147
148#define K insn->imm
149#define A regs[insn->a_reg]
150#define X regs[insn->x_reg]
151#define R0 regs[0]
152
153#define CONT ({insn++; goto select_insn; })
154#define CONT_JMP ({insn++; goto select_insn; })
155
156 static const void *jumptable[256] = { 160 static const void *jumptable[256] = {
157 [0 ... 255] = &&default_label, 161 [0 ... 255] = &&default_label,
158 /* Now overwrite non-defaults ... */ 162 /* Now overwrite non-defaults ... */
159#define DL(A, B, C) [A|B|C] = &&A##_##B##_##C 163 /* 32 bit ALU operations */
160 DL(BPF_ALU, BPF_ADD, BPF_X), 164 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
161 DL(BPF_ALU, BPF_ADD, BPF_K), 165 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
162 DL(BPF_ALU, BPF_SUB, BPF_X), 166 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
163 DL(BPF_ALU, BPF_SUB, BPF_K), 167 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
164 DL(BPF_ALU, BPF_AND, BPF_X), 168 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
165 DL(BPF_ALU, BPF_AND, BPF_K), 169 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
166 DL(BPF_ALU, BPF_OR, BPF_X), 170 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
167 DL(BPF_ALU, BPF_OR, BPF_K), 171 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
168 DL(BPF_ALU, BPF_LSH, BPF_X), 172 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
169 DL(BPF_ALU, BPF_LSH, BPF_K), 173 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
170 DL(BPF_ALU, BPF_RSH, BPF_X), 174 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
171 DL(BPF_ALU, BPF_RSH, BPF_K), 175 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
172 DL(BPF_ALU, BPF_XOR, BPF_X), 176 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
173 DL(BPF_ALU, BPF_XOR, BPF_K), 177 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
174 DL(BPF_ALU, BPF_MUL, BPF_X), 178 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
175 DL(BPF_ALU, BPF_MUL, BPF_K), 179 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
176 DL(BPF_ALU, BPF_MOV, BPF_X), 180 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
177 DL(BPF_ALU, BPF_MOV, BPF_K), 181 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
178 DL(BPF_ALU, BPF_DIV, BPF_X), 182 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
179 DL(BPF_ALU, BPF_DIV, BPF_K), 183 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
180 DL(BPF_ALU, BPF_MOD, BPF_X), 184 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
181 DL(BPF_ALU, BPF_MOD, BPF_K), 185 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
182 DL(BPF_ALU, BPF_NEG, 0), 186 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
183 DL(BPF_ALU, BPF_END, BPF_TO_BE), 187 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
184 DL(BPF_ALU, BPF_END, BPF_TO_LE), 188 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
185 DL(BPF_ALU64, BPF_ADD, BPF_X), 189 /* 64 bit ALU operations */
186 DL(BPF_ALU64, BPF_ADD, BPF_K), 190 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
187 DL(BPF_ALU64, BPF_SUB, BPF_X), 191 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
188 DL(BPF_ALU64, BPF_SUB, BPF_K), 192 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
189 DL(BPF_ALU64, BPF_AND, BPF_X), 193 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
190 DL(BPF_ALU64, BPF_AND, BPF_K), 194 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
191 DL(BPF_ALU64, BPF_OR, BPF_X), 195 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
192 DL(BPF_ALU64, BPF_OR, BPF_K), 196 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
193 DL(BPF_ALU64, BPF_LSH, BPF_X), 197 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
194 DL(BPF_ALU64, BPF_LSH, BPF_K), 198 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
195 DL(BPF_ALU64, BPF_RSH, BPF_X), 199 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
196 DL(BPF_ALU64, BPF_RSH, BPF_K), 200 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
197 DL(BPF_ALU64, BPF_XOR, BPF_X), 201 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
198 DL(BPF_ALU64, BPF_XOR, BPF_K), 202 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
199 DL(BPF_ALU64, BPF_MUL, BPF_X), 203 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
200 DL(BPF_ALU64, BPF_MUL, BPF_K), 204 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
201 DL(BPF_ALU64, BPF_MOV, BPF_X), 205 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
202 DL(BPF_ALU64, BPF_MOV, BPF_K), 206 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
203 DL(BPF_ALU64, BPF_ARSH, BPF_X), 207 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
204 DL(BPF_ALU64, BPF_ARSH, BPF_K), 208 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
205 DL(BPF_ALU64, BPF_DIV, BPF_X), 209 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
206 DL(BPF_ALU64, BPF_DIV, BPF_K), 210 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
207 DL(BPF_ALU64, BPF_MOD, BPF_X), 211 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
208 DL(BPF_ALU64, BPF_MOD, BPF_K), 212 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
209 DL(BPF_ALU64, BPF_NEG, 0), 213 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
210 DL(BPF_JMP, BPF_CALL, 0), 214 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
211 DL(BPF_JMP, BPF_JA, 0), 215 /* Call instruction */
212 DL(BPF_JMP, BPF_JEQ, BPF_X), 216 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
213 DL(BPF_JMP, BPF_JEQ, BPF_K), 217 /* Jumps */
214 DL(BPF_JMP, BPF_JNE, BPF_X), 218 [BPF_JMP | BPF_JA] = &&JMP_JA,
215 DL(BPF_JMP, BPF_JNE, BPF_K), 219 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
216 DL(BPF_JMP, BPF_JGT, BPF_X), 220 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
217 DL(BPF_JMP, BPF_JGT, BPF_K), 221 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
218 DL(BPF_JMP, BPF_JGE, BPF_X), 222 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
219 DL(BPF_JMP, BPF_JGE, BPF_K), 223 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
220 DL(BPF_JMP, BPF_JSGT, BPF_X), 224 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
221 DL(BPF_JMP, BPF_JSGT, BPF_K), 225 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
222 DL(BPF_JMP, BPF_JSGE, BPF_X), 226 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
223 DL(BPF_JMP, BPF_JSGE, BPF_K), 227 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
224 DL(BPF_JMP, BPF_JSET, BPF_X), 228 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
225 DL(BPF_JMP, BPF_JSET, BPF_K), 229 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
226 DL(BPF_JMP, BPF_EXIT, 0), 230 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
227 DL(BPF_STX, BPF_MEM, BPF_B), 231 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
228 DL(BPF_STX, BPF_MEM, BPF_H), 232 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
229 DL(BPF_STX, BPF_MEM, BPF_W), 233 /* Program return */
230 DL(BPF_STX, BPF_MEM, BPF_DW), 234 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
231 DL(BPF_STX, BPF_XADD, BPF_W), 235 /* Store instructions */
232 DL(BPF_STX, BPF_XADD, BPF_DW), 236 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
233 DL(BPF_ST, BPF_MEM, BPF_B), 237 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
234 DL(BPF_ST, BPF_MEM, BPF_H), 238 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
235 DL(BPF_ST, BPF_MEM, BPF_W), 239 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
236 DL(BPF_ST, BPF_MEM, BPF_DW), 240 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
237 DL(BPF_LDX, BPF_MEM, BPF_B), 241 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
238 DL(BPF_LDX, BPF_MEM, BPF_H), 242 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
239 DL(BPF_LDX, BPF_MEM, BPF_W), 243 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
240 DL(BPF_LDX, BPF_MEM, BPF_DW), 244 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
241 DL(BPF_LD, BPF_ABS, BPF_W), 245 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
242 DL(BPF_LD, BPF_ABS, BPF_H), 246 /* Load instructions */
243 DL(BPF_LD, BPF_ABS, BPF_B), 247 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
244 DL(BPF_LD, BPF_IND, BPF_W), 248 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
245 DL(BPF_LD, BPF_IND, BPF_H), 249 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
246 DL(BPF_LD, BPF_IND, BPF_B), 250 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
247#undef DL 251 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
252 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
253 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
254 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
255 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
256 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
248 }; 257 };
258 void *ptr;
259 int off;
260
261#define CONT ({ insn++; goto select_insn; })
262#define CONT_JMP ({ insn++; goto select_insn; })
249 263
250 regs[FP_REG] = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; 264 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
251 regs[ARG1_REG] = (u64) (unsigned long) ctx; 265 ARG1 = (u64) (unsigned long) ctx;
252 regs[A_REG] = 0; 266
253 regs[X_REG] = 0; 267 /* Registers used in classic BPF programs need to be reset first. */
268 regs[BPF_REG_A] = 0;
269 regs[BPF_REG_X] = 0;
254 270
255select_insn: 271select_insn:
256 goto *jumptable[insn->code]; 272 goto *jumptable[insn->code];
257 273
258 /* ALU */ 274 /* ALU */
259#define ALU(OPCODE, OP) \ 275#define ALU(OPCODE, OP) \
260 BPF_ALU64_##OPCODE##_BPF_X: \ 276 ALU64_##OPCODE##_X: \
261 A = A OP X; \ 277 DST = DST OP SRC; \
262 CONT; \ 278 CONT; \
263 BPF_ALU_##OPCODE##_BPF_X: \ 279 ALU_##OPCODE##_X: \
264 A = (u32) A OP (u32) X; \ 280 DST = (u32) DST OP (u32) SRC; \
265 CONT; \ 281 CONT; \
266 BPF_ALU64_##OPCODE##_BPF_K: \ 282 ALU64_##OPCODE##_K: \
267 A = A OP K; \ 283 DST = DST OP IMM; \
268 CONT; \ 284 CONT; \
269 BPF_ALU_##OPCODE##_BPF_K: \ 285 ALU_##OPCODE##_K: \
270 A = (u32) A OP (u32) K; \ 286 DST = (u32) DST OP (u32) IMM; \
271 CONT; 287 CONT;
272 288
273 ALU(BPF_ADD, +) 289 ALU(ADD, +)
274 ALU(BPF_SUB, -) 290 ALU(SUB, -)
275 ALU(BPF_AND, &) 291 ALU(AND, &)
276 ALU(BPF_OR, |) 292 ALU(OR, |)
277 ALU(BPF_LSH, <<) 293 ALU(LSH, <<)
278 ALU(BPF_RSH, >>) 294 ALU(RSH, >>)
279 ALU(BPF_XOR, ^) 295 ALU(XOR, ^)
280 ALU(BPF_MUL, *) 296 ALU(MUL, *)
281#undef ALU 297#undef ALU
282 BPF_ALU_BPF_NEG_0: 298 ALU_NEG:
283 A = (u32) -A; 299 DST = (u32) -DST;
284 CONT; 300 CONT;
285 BPF_ALU64_BPF_NEG_0: 301 ALU64_NEG:
286 A = -A; 302 DST = -DST;
287 CONT; 303 CONT;
288 BPF_ALU_BPF_MOV_BPF_X: 304 ALU_MOV_X:
289 A = (u32) X; 305 DST = (u32) SRC;
290 CONT; 306 CONT;
291 BPF_ALU_BPF_MOV_BPF_K: 307 ALU_MOV_K:
292 A = (u32) K; 308 DST = (u32) IMM;
293 CONT; 309 CONT;
294 BPF_ALU64_BPF_MOV_BPF_X: 310 ALU64_MOV_X:
295 A = X; 311 DST = SRC;
296 CONT; 312 CONT;
297 BPF_ALU64_BPF_MOV_BPF_K: 313 ALU64_MOV_K:
298 A = K; 314 DST = IMM;
299 CONT; 315 CONT;
300 BPF_ALU64_BPF_ARSH_BPF_X: 316 ALU64_ARSH_X:
301 (*(s64 *) &A) >>= X; 317 (*(s64 *) &DST) >>= SRC;
302 CONT; 318 CONT;
303 BPF_ALU64_BPF_ARSH_BPF_K: 319 ALU64_ARSH_K:
304 (*(s64 *) &A) >>= K; 320 (*(s64 *) &DST) >>= IMM;
305 CONT; 321 CONT;
306 BPF_ALU64_BPF_MOD_BPF_X: 322 ALU64_MOD_X:
307 if (unlikely(X == 0)) 323 if (unlikely(SRC == 0))
308 return 0; 324 return 0;
309 tmp = A; 325 tmp = DST;
310 A = do_div(tmp, X); 326 DST = do_div(tmp, SRC);
311 CONT; 327 CONT;
312 BPF_ALU_BPF_MOD_BPF_X: 328 ALU_MOD_X:
313 if (unlikely(X == 0)) 329 if (unlikely(SRC == 0))
314 return 0; 330 return 0;
315 tmp = (u32) A; 331 tmp = (u32) DST;
316 A = do_div(tmp, (u32) X); 332 DST = do_div(tmp, (u32) SRC);
317 CONT; 333 CONT;
318 BPF_ALU64_BPF_MOD_BPF_K: 334 ALU64_MOD_K:
319 tmp = A; 335 tmp = DST;
320 A = do_div(tmp, K); 336 DST = do_div(tmp, IMM);
321 CONT; 337 CONT;
322 BPF_ALU_BPF_MOD_BPF_K: 338 ALU_MOD_K:
323 tmp = (u32) A; 339 tmp = (u32) DST;
324 A = do_div(tmp, (u32) K); 340 DST = do_div(tmp, (u32) IMM);
325 CONT; 341 CONT;
326 BPF_ALU64_BPF_DIV_BPF_X: 342 ALU64_DIV_X:
327 if (unlikely(X == 0)) 343 if (unlikely(SRC == 0))
328 return 0; 344 return 0;
329 do_div(A, X); 345 do_div(DST, SRC);
330 CONT; 346 CONT;
331 BPF_ALU_BPF_DIV_BPF_X: 347 ALU_DIV_X:
332 if (unlikely(X == 0)) 348 if (unlikely(SRC == 0))
333 return 0; 349 return 0;
334 tmp = (u32) A; 350 tmp = (u32) DST;
335 do_div(tmp, (u32) X); 351 do_div(tmp, (u32) SRC);
336 A = (u32) tmp; 352 DST = (u32) tmp;
337 CONT; 353 CONT;
338 BPF_ALU64_BPF_DIV_BPF_K: 354 ALU64_DIV_K:
339 do_div(A, K); 355 do_div(DST, IMM);
340 CONT; 356 CONT;
341 BPF_ALU_BPF_DIV_BPF_K: 357 ALU_DIV_K:
342 tmp = (u32) A; 358 tmp = (u32) DST;
343 do_div(tmp, (u32) K); 359 do_div(tmp, (u32) IMM);
344 A = (u32) tmp; 360 DST = (u32) tmp;
345 CONT; 361 CONT;
346 BPF_ALU_BPF_END_BPF_TO_BE: 362 ALU_END_TO_BE:
347 switch (K) { 363 switch (IMM) {
348 case 16: 364 case 16:
349 A = (__force u16) cpu_to_be16(A); 365 DST = (__force u16) cpu_to_be16(DST);
350 break; 366 break;
351 case 32: 367 case 32:
352 A = (__force u32) cpu_to_be32(A); 368 DST = (__force u32) cpu_to_be32(DST);
353 break; 369 break;
354 case 64: 370 case 64:
355 A = (__force u64) cpu_to_be64(A); 371 DST = (__force u64) cpu_to_be64(DST);
356 break; 372 break;
357 } 373 }
358 CONT; 374 CONT;
359 BPF_ALU_BPF_END_BPF_TO_LE: 375 ALU_END_TO_LE:
360 switch (K) { 376 switch (IMM) {
361 case 16: 377 case 16:
362 A = (__force u16) cpu_to_le16(A); 378 DST = (__force u16) cpu_to_le16(DST);
363 break; 379 break;
364 case 32: 380 case 32:
365 A = (__force u32) cpu_to_le32(A); 381 DST = (__force u32) cpu_to_le32(DST);
366 break; 382 break;
367 case 64: 383 case 64:
368 A = (__force u64) cpu_to_le64(A); 384 DST = (__force u64) cpu_to_le64(DST);
369 break; 385 break;
370 } 386 }
371 CONT; 387 CONT;
372 388
373 /* CALL */ 389 /* CALL */
374 BPF_JMP_BPF_CALL_0: 390 JMP_CALL:
375 /* Function call scratches R1-R5 registers, preserves R6-R9, 391 /* Function call scratches BPF_R1-BPF_R5 registers,
376 * and stores return value into R0. 392 * preserves BPF_R6-BPF_R9, and stores return value
393 * into BPF_R0.
377 */ 394 */
378 R0 = (__bpf_call_base + insn->imm)(regs[1], regs[2], regs[3], 395 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
379 regs[4], regs[5]); 396 BPF_R4, BPF_R5);
380 CONT; 397 CONT;
381 398
382 /* JMP */ 399 /* JMP */
383 BPF_JMP_BPF_JA_0: 400 JMP_JA:
384 insn += insn->off; 401 insn += insn->off;
385 CONT; 402 CONT;
386 BPF_JMP_BPF_JEQ_BPF_X: 403 JMP_JEQ_X:
387 if (A == X) { 404 if (DST == SRC) {
388 insn += insn->off; 405 insn += insn->off;
389 CONT_JMP; 406 CONT_JMP;
390 } 407 }
391 CONT; 408 CONT;
392 BPF_JMP_BPF_JEQ_BPF_K: 409 JMP_JEQ_K:
393 if (A == K) { 410 if (DST == IMM) {
394 insn += insn->off; 411 insn += insn->off;
395 CONT_JMP; 412 CONT_JMP;
396 } 413 }
397 CONT; 414 CONT;
398 BPF_JMP_BPF_JNE_BPF_X: 415 JMP_JNE_X:
399 if (A != X) { 416 if (DST != SRC) {
400 insn += insn->off; 417 insn += insn->off;
401 CONT_JMP; 418 CONT_JMP;
402 } 419 }
403 CONT; 420 CONT;
404 BPF_JMP_BPF_JNE_BPF_K: 421 JMP_JNE_K:
405 if (A != K) { 422 if (DST != IMM) {
406 insn += insn->off; 423 insn += insn->off;
407 CONT_JMP; 424 CONT_JMP;
408 } 425 }
409 CONT; 426 CONT;
410 BPF_JMP_BPF_JGT_BPF_X: 427 JMP_JGT_X:
411 if (A > X) { 428 if (DST > SRC) {
412 insn += insn->off; 429 insn += insn->off;
413 CONT_JMP; 430 CONT_JMP;
414 } 431 }
415 CONT; 432 CONT;
416 BPF_JMP_BPF_JGT_BPF_K: 433 JMP_JGT_K:
417 if (A > K) { 434 if (DST > IMM) {
418 insn += insn->off; 435 insn += insn->off;
419 CONT_JMP; 436 CONT_JMP;
420 } 437 }
421 CONT; 438 CONT;
422 BPF_JMP_BPF_JGE_BPF_X: 439 JMP_JGE_X:
423 if (A >= X) { 440 if (DST >= SRC) {
424 insn += insn->off; 441 insn += insn->off;
425 CONT_JMP; 442 CONT_JMP;
426 } 443 }
427 CONT; 444 CONT;
428 BPF_JMP_BPF_JGE_BPF_K: 445 JMP_JGE_K:
429 if (A >= K) { 446 if (DST >= IMM) {
430 insn += insn->off; 447 insn += insn->off;
431 CONT_JMP; 448 CONT_JMP;
432 } 449 }
433 CONT; 450 CONT;
434 BPF_JMP_BPF_JSGT_BPF_X: 451 JMP_JSGT_X:
435 if (((s64)A) > ((s64)X)) { 452 if (((s64) DST) > ((s64) SRC)) {
436 insn += insn->off; 453 insn += insn->off;
437 CONT_JMP; 454 CONT_JMP;
438 } 455 }
439 CONT; 456 CONT;
440 BPF_JMP_BPF_JSGT_BPF_K: 457 JMP_JSGT_K:
441 if (((s64)A) > ((s64)K)) { 458 if (((s64) DST) > ((s64) IMM)) {
442 insn += insn->off; 459 insn += insn->off;
443 CONT_JMP; 460 CONT_JMP;
444 } 461 }
445 CONT; 462 CONT;
446 BPF_JMP_BPF_JSGE_BPF_X: 463 JMP_JSGE_X:
447 if (((s64)A) >= ((s64)X)) { 464 if (((s64) DST) >= ((s64) SRC)) {
448 insn += insn->off; 465 insn += insn->off;
449 CONT_JMP; 466 CONT_JMP;
450 } 467 }
451 CONT; 468 CONT;
452 BPF_JMP_BPF_JSGE_BPF_K: 469 JMP_JSGE_K:
453 if (((s64)A) >= ((s64)K)) { 470 if (((s64) DST) >= ((s64) IMM)) {
454 insn += insn->off; 471 insn += insn->off;
455 CONT_JMP; 472 CONT_JMP;
456 } 473 }
457 CONT; 474 CONT;
458 BPF_JMP_BPF_JSET_BPF_X: 475 JMP_JSET_X:
459 if (A & X) { 476 if (DST & SRC) {
460 insn += insn->off; 477 insn += insn->off;
461 CONT_JMP; 478 CONT_JMP;
462 } 479 }
463 CONT; 480 CONT;
464 BPF_JMP_BPF_JSET_BPF_K: 481 JMP_JSET_K:
465 if (A & K) { 482 if (DST & IMM) {
466 insn += insn->off; 483 insn += insn->off;
467 CONT_JMP; 484 CONT_JMP;
468 } 485 }
469 CONT; 486 CONT;
470 BPF_JMP_BPF_EXIT_0: 487 JMP_EXIT:
471 return R0; 488 return BPF_R0;
472 489
473 /* STX and ST and LDX*/ 490 /* STX and ST and LDX*/
474#define LDST(SIZEOP, SIZE) \ 491#define LDST(SIZEOP, SIZE) \
475 BPF_STX_BPF_MEM_##SIZEOP: \ 492 STX_MEM_##SIZEOP: \
476 *(SIZE *)(unsigned long) (A + insn->off) = X; \ 493 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
477 CONT; \ 494 CONT; \
478 BPF_ST_BPF_MEM_##SIZEOP: \ 495 ST_MEM_##SIZEOP: \
479 *(SIZE *)(unsigned long) (A + insn->off) = K; \ 496 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
480 CONT; \ 497 CONT; \
481 BPF_LDX_BPF_MEM_##SIZEOP: \ 498 LDX_MEM_##SIZEOP: \
482 A = *(SIZE *)(unsigned long) (X + insn->off); \ 499 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
483 CONT; 500 CONT;
484 501
485 LDST(BPF_B, u8) 502 LDST(B, u8)
486 LDST(BPF_H, u16) 503 LDST(H, u16)
487 LDST(BPF_W, u32) 504 LDST(W, u32)
488 LDST(BPF_DW, u64) 505 LDST(DW, u64)
489#undef LDST 506#undef LDST
490 BPF_STX_BPF_XADD_BPF_W: /* lock xadd *(u32 *)(A + insn->off) += X */ 507 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
491 atomic_add((u32) X, (atomic_t *)(unsigned long) 508 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
492 (A + insn->off)); 509 (DST + insn->off));
493 CONT; 510 CONT;
494 BPF_STX_BPF_XADD_BPF_DW: /* lock xadd *(u64 *)(A + insn->off) += X */ 511 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
495 atomic64_add((u64) X, (atomic64_t *)(unsigned long) 512 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
496 (A + insn->off)); 513 (DST + insn->off));
497 CONT; 514 CONT;
498 BPF_LD_BPF_ABS_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */ 515 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
499 off = K; 516 off = IMM;
500load_word: 517load_word:
501 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only 518 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
502 * appearing in the programs where ctx == skb. All programs 519 * only appearing in the programs where ctx ==
503 * keep 'ctx' in regs[CTX_REG] == R6, sk_convert_filter() 520 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
504 * saves it in R6, internal BPF verifier will check that 521 * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
505 * R6 == ctx. 522 * internal BPF verifier will check that BPF_R6 ==
523 * ctx.
506 * 524 *
507 * BPF_ABS and BPF_IND are wrappers of function calls, so 525 * BPF_ABS and BPF_IND are wrappers of function calls,
508 * they scratch R1-R5 registers, preserve R6-R9, and store 526 * so they scratch BPF_R1-BPF_R5 registers, preserve
509 * return value into R0. 527 * BPF_R6-BPF_R9, and store return value into BPF_R0.
510 * 528 *
511 * Implicit input: 529 * Implicit input:
512 * ctx 530 * ctx == skb == BPF_R6 == CTX
513 * 531 *
514 * Explicit input: 532 * Explicit input:
515 * X == any register 533 * SRC == any register
516 * K == 32-bit immediate 534 * IMM == 32-bit immediate
517 * 535 *
518 * Output: 536 * Output:
519 * R0 - 8/16/32-bit skb data converted to cpu endianness 537 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
520 */ 538 */
521 ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp); 539
540 ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
522 if (likely(ptr != NULL)) { 541 if (likely(ptr != NULL)) {
523 R0 = get_unaligned_be32(ptr); 542 BPF_R0 = get_unaligned_be32(ptr);
524 CONT; 543 CONT;
525 } 544 }
545
526 return 0; 546 return 0;
527 BPF_LD_BPF_ABS_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */ 547 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
528 off = K; 548 off = IMM;
529load_half: 549load_half:
530 ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp); 550 ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
531 if (likely(ptr != NULL)) { 551 if (likely(ptr != NULL)) {
532 R0 = get_unaligned_be16(ptr); 552 BPF_R0 = get_unaligned_be16(ptr);
533 CONT; 553 CONT;
534 } 554 }
555
535 return 0; 556 return 0;
536 BPF_LD_BPF_ABS_BPF_B: /* R0 = *(u8 *) (ctx + K) */ 557 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
537 off = K; 558 off = IMM;
538load_byte: 559load_byte:
539 ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp); 560 ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
540 if (likely(ptr != NULL)) { 561 if (likely(ptr != NULL)) {
541 R0 = *(u8 *)ptr; 562 BPF_R0 = *(u8 *)ptr;
542 CONT; 563 CONT;
543 } 564 }
565
544 return 0; 566 return 0;
545 BPF_LD_BPF_IND_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */ 567 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
546 off = K + X; 568 off = IMM + SRC;
547 goto load_word; 569 goto load_word;
548 BPF_LD_BPF_IND_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */ 570 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
549 off = K + X; 571 off = IMM + SRC;
550 goto load_half; 572 goto load_half;
551 BPF_LD_BPF_IND_BPF_B: /* R0 = *(u8 *) (skb->data + X + K) */ 573 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
552 off = K + X; 574 off = IMM + SRC;
553 goto load_byte; 575 goto load_byte;
554 576
555 default_label: 577 default_label:
556 /* If we ever reach this, we have a bug somewhere. */ 578 /* If we ever reach this, we have a bug somewhere. */
557 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code); 579 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
558 return 0; 580 return 0;
559#undef CONT_JMP
560#undef CONT
561
562#undef R0
563#undef X
564#undef A
565#undef K
566} 581}
567 582
568u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
569 const struct sock_filter_int *insni)
570 __attribute__ ((alias ("__sk_run_filter")));
571
572u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
573 const struct sock_filter_int *insni)
574 __attribute__ ((alias ("__sk_run_filter")));
575EXPORT_SYMBOL_GPL(sk_run_filter_int_skb);
576
577/* Helper to find the offset of pkt_type in sk_buff structure. We want 583/* Helper to find the offset of pkt_type in sk_buff structure. We want
578 * to make sure its still a 3bit field starting at a byte boundary; 584 * to make sure its still a 3bit field starting at a byte boundary;
579 * taken from arch/x86/net/bpf_jit_comp.c. 585 * taken from arch/x86/net/bpf_jit_comp.c.
580 */ 586 */
587#ifdef __BIG_ENDIAN_BITFIELD
588#define PKT_TYPE_MAX (7 << 5)
589#else
581#define PKT_TYPE_MAX 7 590#define PKT_TYPE_MAX 7
591#endif
582static unsigned int pkt_type_offset(void) 592static unsigned int pkt_type_offset(void)
583{ 593{
584 struct sk_buff skb_probe = { .pkt_type = ~0, }; 594 struct sk_buff skb_probe = { .pkt_type = ~0, };
@@ -594,16 +604,14 @@ static unsigned int pkt_type_offset(void)
594 return -1; 604 return -1;
595} 605}
596 606
597static u64 __skb_get_pay_offset(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) 607static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
598{ 608{
599 struct sk_buff *skb = (struct sk_buff *)(long) ctx; 609 return __skb_get_poff((struct sk_buff *)(unsigned long) ctx);
600
601 return __skb_get_poff(skb);
602} 610}
603 611
604static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) 612static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
605{ 613{
606 struct sk_buff *skb = (struct sk_buff *)(long) ctx; 614 struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
607 struct nlattr *nla; 615 struct nlattr *nla;
608 616
609 if (skb_is_nonlinear(skb)) 617 if (skb_is_nonlinear(skb))
@@ -612,19 +620,19 @@ static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
612 if (skb->len < sizeof(struct nlattr)) 620 if (skb->len < sizeof(struct nlattr))
613 return 0; 621 return 0;
614 622
615 if (A > skb->len - sizeof(struct nlattr)) 623 if (a > skb->len - sizeof(struct nlattr))
616 return 0; 624 return 0;
617 625
618 nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X); 626 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
619 if (nla) 627 if (nla)
620 return (void *) nla - (void *) skb->data; 628 return (void *) nla - (void *) skb->data;
621 629
622 return 0; 630 return 0;
623} 631}
624 632
625static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) 633static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
626{ 634{
627 struct sk_buff *skb = (struct sk_buff *)(long) ctx; 635 struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
628 struct nlattr *nla; 636 struct nlattr *nla;
629 637
630 if (skb_is_nonlinear(skb)) 638 if (skb_is_nonlinear(skb))
@@ -633,25 +641,31 @@ static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
633 if (skb->len < sizeof(struct nlattr)) 641 if (skb->len < sizeof(struct nlattr))
634 return 0; 642 return 0;
635 643
636 if (A > skb->len - sizeof(struct nlattr)) 644 if (a > skb->len - sizeof(struct nlattr))
637 return 0; 645 return 0;
638 646
639 nla = (struct nlattr *) &skb->data[A]; 647 nla = (struct nlattr *) &skb->data[a];
640 if (nla->nla_len > skb->len - A) 648 if (nla->nla_len > skb->len - a)
641 return 0; 649 return 0;
642 650
643 nla = nla_find_nested(nla, X); 651 nla = nla_find_nested(nla, x);
644 if (nla) 652 if (nla)
645 return (void *) nla - (void *) skb->data; 653 return (void *) nla - (void *) skb->data;
646 654
647 return 0; 655 return 0;
648} 656}
649 657
650static u64 __get_raw_cpu_id(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) 658static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
651{ 659{
652 return raw_smp_processor_id(); 660 return raw_smp_processor_id();
653} 661}
654 662
663/* note that this only generates 32-bit random numbers */
664static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
665{
666 return prandom_u32();
667}
668
655static bool convert_bpf_extensions(struct sock_filter *fp, 669static bool convert_bpf_extensions(struct sock_filter *fp,
656 struct sock_filter_int **insnp) 670 struct sock_filter_int **insnp)
657{ 671{
@@ -661,119 +675,83 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
661 case SKF_AD_OFF + SKF_AD_PROTOCOL: 675 case SKF_AD_OFF + SKF_AD_PROTOCOL:
662 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); 676 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
663 677
664 insn->code = BPF_LDX | BPF_MEM | BPF_H; 678 /* A = *(u16 *) (CTX + offsetof(protocol)) */
665 insn->a_reg = A_REG; 679 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
666 insn->x_reg = CTX_REG; 680 offsetof(struct sk_buff, protocol));
667 insn->off = offsetof(struct sk_buff, protocol);
668 insn++;
669
670 /* A = ntohs(A) [emitting a nop or swap16] */ 681 /* A = ntohs(A) [emitting a nop or swap16] */
671 insn->code = BPF_ALU | BPF_END | BPF_FROM_BE; 682 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
672 insn->a_reg = A_REG;
673 insn->imm = 16;
674 break; 683 break;
675 684
676 case SKF_AD_OFF + SKF_AD_PKTTYPE: 685 case SKF_AD_OFF + SKF_AD_PKTTYPE:
677 insn->code = BPF_LDX | BPF_MEM | BPF_B; 686 *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
678 insn->a_reg = A_REG; 687 pkt_type_offset());
679 insn->x_reg = CTX_REG;
680 insn->off = pkt_type_offset();
681 if (insn->off < 0) 688 if (insn->off < 0)
682 return false; 689 return false;
683 insn++; 690 insn++;
684 691 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
685 insn->code = BPF_ALU | BPF_AND | BPF_K; 692#ifdef __BIG_ENDIAN_BITFIELD
686 insn->a_reg = A_REG; 693 insn++;
687 insn->imm = PKT_TYPE_MAX; 694 *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 5);
695#endif
688 break; 696 break;
689 697
690 case SKF_AD_OFF + SKF_AD_IFINDEX: 698 case SKF_AD_OFF + SKF_AD_IFINDEX:
691 case SKF_AD_OFF + SKF_AD_HATYPE: 699 case SKF_AD_OFF + SKF_AD_HATYPE:
692 if (FIELD_SIZEOF(struct sk_buff, dev) == 8)
693 insn->code = BPF_LDX | BPF_MEM | BPF_DW;
694 else
695 insn->code = BPF_LDX | BPF_MEM | BPF_W;
696 insn->a_reg = TMP_REG;
697 insn->x_reg = CTX_REG;
698 insn->off = offsetof(struct sk_buff, dev);
699 insn++;
700
701 insn->code = BPF_JMP | BPF_JNE | BPF_K;
702 insn->a_reg = TMP_REG;
703 insn->imm = 0;
704 insn->off = 1;
705 insn++;
706
707 insn->code = BPF_JMP | BPF_EXIT;
708 insn++;
709
710 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); 700 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
711 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); 701 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
712 702 BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
713 insn->a_reg = A_REG; 703
714 insn->x_reg = TMP_REG; 704 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
715 705 BPF_REG_TMP, BPF_REG_CTX,
716 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) { 706 offsetof(struct sk_buff, dev));
717 insn->code = BPF_LDX | BPF_MEM | BPF_W; 707 /* if (tmp != 0) goto pc + 1 */
718 insn->off = offsetof(struct net_device, ifindex); 708 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
719 } else { 709 *insn++ = BPF_EXIT_INSN();
720 insn->code = BPF_LDX | BPF_MEM | BPF_H; 710 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
721 insn->off = offsetof(struct net_device, type); 711 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
722 } 712 offsetof(struct net_device, ifindex));
713 else
714 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
715 offsetof(struct net_device, type));
723 break; 716 break;
724 717
725 case SKF_AD_OFF + SKF_AD_MARK: 718 case SKF_AD_OFF + SKF_AD_MARK:
726 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 719 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
727 720
728 insn->code = BPF_LDX | BPF_MEM | BPF_W; 721 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
729 insn->a_reg = A_REG; 722 offsetof(struct sk_buff, mark));
730 insn->x_reg = CTX_REG;
731 insn->off = offsetof(struct sk_buff, mark);
732 break; 723 break;
733 724
734 case SKF_AD_OFF + SKF_AD_RXHASH: 725 case SKF_AD_OFF + SKF_AD_RXHASH:
735 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 726 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
736 727
737 insn->code = BPF_LDX | BPF_MEM | BPF_W; 728 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
738 insn->a_reg = A_REG; 729 offsetof(struct sk_buff, hash));
739 insn->x_reg = CTX_REG;
740 insn->off = offsetof(struct sk_buff, hash);
741 break; 730 break;
742 731
743 case SKF_AD_OFF + SKF_AD_QUEUE: 732 case SKF_AD_OFF + SKF_AD_QUEUE:
744 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); 733 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
745 734
746 insn->code = BPF_LDX | BPF_MEM | BPF_H; 735 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
747 insn->a_reg = A_REG; 736 offsetof(struct sk_buff, queue_mapping));
748 insn->x_reg = CTX_REG;
749 insn->off = offsetof(struct sk_buff, queue_mapping);
750 break; 737 break;
751 738
752 case SKF_AD_OFF + SKF_AD_VLAN_TAG: 739 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
753 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: 740 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
754 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 741 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
755
756 insn->code = BPF_LDX | BPF_MEM | BPF_H;
757 insn->a_reg = A_REG;
758 insn->x_reg = CTX_REG;
759 insn->off = offsetof(struct sk_buff, vlan_tci);
760 insn++;
761
762 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); 742 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
763 743
744 /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */
745 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
746 offsetof(struct sk_buff, vlan_tci));
764 if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { 747 if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
765 insn->code = BPF_ALU | BPF_AND | BPF_K; 748 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
766 insn->a_reg = A_REG; 749 ~VLAN_TAG_PRESENT);
767 insn->imm = ~VLAN_TAG_PRESENT;
768 } else { 750 } else {
769 insn->code = BPF_ALU | BPF_RSH | BPF_K; 751 /* A >>= 12 */
770 insn->a_reg = A_REG; 752 *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
771 insn->imm = 12; 753 /* A &= 1 */
772 insn++; 754 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
773
774 insn->code = BPF_ALU | BPF_AND | BPF_K;
775 insn->a_reg = A_REG;
776 insn->imm = 1;
777 } 755 }
778 break; 756 break;
779 757
@@ -781,46 +759,36 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
781 case SKF_AD_OFF + SKF_AD_NLATTR: 759 case SKF_AD_OFF + SKF_AD_NLATTR:
782 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 760 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
783 case SKF_AD_OFF + SKF_AD_CPU: 761 case SKF_AD_OFF + SKF_AD_CPU:
784 /* arg1 = ctx */ 762 case SKF_AD_OFF + SKF_AD_RANDOM:
785 insn->code = BPF_ALU64 | BPF_MOV | BPF_X; 763 /* arg1 = CTX */
786 insn->a_reg = ARG1_REG; 764 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
787 insn->x_reg = CTX_REG;
788 insn++;
789
790 /* arg2 = A */ 765 /* arg2 = A */
791 insn->code = BPF_ALU64 | BPF_MOV | BPF_X; 766 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
792 insn->a_reg = ARG2_REG;
793 insn->x_reg = A_REG;
794 insn++;
795
796 /* arg3 = X */ 767 /* arg3 = X */
797 insn->code = BPF_ALU64 | BPF_MOV | BPF_X; 768 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
798 insn->a_reg = ARG3_REG; 769 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
799 insn->x_reg = X_REG;
800 insn++;
801
802 /* Emit call(ctx, arg2=A, arg3=X) */
803 insn->code = BPF_JMP | BPF_CALL;
804 switch (fp->k) { 770 switch (fp->k) {
805 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 771 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
806 insn->imm = __skb_get_pay_offset - __bpf_call_base; 772 *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
807 break; 773 break;
808 case SKF_AD_OFF + SKF_AD_NLATTR: 774 case SKF_AD_OFF + SKF_AD_NLATTR:
809 insn->imm = __skb_get_nlattr - __bpf_call_base; 775 *insn = BPF_EMIT_CALL(__skb_get_nlattr);
810 break; 776 break;
811 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 777 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
812 insn->imm = __skb_get_nlattr_nest - __bpf_call_base; 778 *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
813 break; 779 break;
814 case SKF_AD_OFF + SKF_AD_CPU: 780 case SKF_AD_OFF + SKF_AD_CPU:
815 insn->imm = __get_raw_cpu_id - __bpf_call_base; 781 *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
782 break;
783 case SKF_AD_OFF + SKF_AD_RANDOM:
784 *insn = BPF_EMIT_CALL(__get_random_u32);
816 break; 785 break;
817 } 786 }
818 break; 787 break;
819 788
820 case SKF_AD_OFF + SKF_AD_ALU_XOR_X: 789 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
821 insn->code = BPF_ALU | BPF_XOR | BPF_X; 790 /* A ^= X */
822 insn->a_reg = A_REG; 791 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
823 insn->x_reg = X_REG;
824 break; 792 break;
825 793
826 default: 794 default:
@@ -870,7 +838,7 @@ int sk_convert_filter(struct sock_filter *prog, int len,
870 u8 bpf_src; 838 u8 bpf_src;
871 839
872 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); 840 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
873 BUILD_BUG_ON(FP_REG + 1 != MAX_BPF_REG); 841 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
874 842
875 if (len <= 0 || len >= BPF_MAXINSNS) 843 if (len <= 0 || len >= BPF_MAXINSNS)
876 return -EINVAL; 844 return -EINVAL;
@@ -885,11 +853,8 @@ do_pass:
885 new_insn = new_prog; 853 new_insn = new_prog;
886 fp = prog; 854 fp = prog;
887 855
888 if (new_insn) { 856 if (new_insn)
889 new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X; 857 *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
890 new_insn->a_reg = CTX_REG;
891 new_insn->x_reg = ARG1_REG;
892 }
893 new_insn++; 858 new_insn++;
894 859
895 for (i = 0; i < len; fp++, i++) { 860 for (i = 0; i < len; fp++, i++) {
@@ -937,17 +902,16 @@ do_pass:
937 convert_bpf_extensions(fp, &insn)) 902 convert_bpf_extensions(fp, &insn))
938 break; 903 break;
939 904
940 insn->code = fp->code; 905 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
941 insn->a_reg = A_REG;
942 insn->x_reg = X_REG;
943 insn->imm = fp->k;
944 break; 906 break;
945 907
946 /* Jump opcodes map as-is, but offsets need adjustment. */ 908 /* Jump transformation cannot use BPF block macros
947 case BPF_JMP | BPF_JA: 909 * everywhere as offset calculation and target updates
948 target = i + fp->k + 1; 910 * require a bit more work than the rest, i.e. jump
949 insn->code = fp->code; 911 * opcodes map as-is, but offsets need adjustment.
950#define EMIT_JMP \ 912 */
913
914#define BPF_EMIT_JMP \
951 do { \ 915 do { \
952 if (target >= len || target < 0) \ 916 if (target >= len || target < 0) \
953 goto err; \ 917 goto err; \
@@ -956,7 +920,10 @@ do_pass:
956 insn->off -= insn - tmp_insns; \ 920 insn->off -= insn - tmp_insns; \
957 } while (0) 921 } while (0)
958 922
959 EMIT_JMP; 923 case BPF_JMP | BPF_JA:
924 target = i + fp->k + 1;
925 insn->code = fp->code;
926 BPF_EMIT_JMP;
960 break; 927 break;
961 928
962 case BPF_JMP | BPF_JEQ | BPF_K: 929 case BPF_JMP | BPF_JEQ | BPF_K:
@@ -972,17 +939,14 @@ do_pass:
972 * immediate into tmp register and use it 939 * immediate into tmp register and use it
973 * in compare insn. 940 * in compare insn.
974 */ 941 */
975 insn->code = BPF_ALU | BPF_MOV | BPF_K; 942 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
976 insn->a_reg = TMP_REG;
977 insn->imm = fp->k;
978 insn++;
979 943
980 insn->a_reg = A_REG; 944 insn->dst_reg = BPF_REG_A;
981 insn->x_reg = TMP_REG; 945 insn->src_reg = BPF_REG_TMP;
982 bpf_src = BPF_X; 946 bpf_src = BPF_X;
983 } else { 947 } else {
984 insn->a_reg = A_REG; 948 insn->dst_reg = BPF_REG_A;
985 insn->x_reg = X_REG; 949 insn->src_reg = BPF_REG_X;
986 insn->imm = fp->k; 950 insn->imm = fp->k;
987 bpf_src = BPF_SRC(fp->code); 951 bpf_src = BPF_SRC(fp->code);
988 } 952 }
@@ -991,7 +955,7 @@ do_pass:
991 if (fp->jf == 0) { 955 if (fp->jf == 0) {
992 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 956 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
993 target = i + fp->jt + 1; 957 target = i + fp->jt + 1;
994 EMIT_JMP; 958 BPF_EMIT_JMP;
995 break; 959 break;
996 } 960 }
997 961
@@ -999,127 +963,94 @@ do_pass:
999 if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) { 963 if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
1000 insn->code = BPF_JMP | BPF_JNE | bpf_src; 964 insn->code = BPF_JMP | BPF_JNE | bpf_src;
1001 target = i + fp->jf + 1; 965 target = i + fp->jf + 1;
1002 EMIT_JMP; 966 BPF_EMIT_JMP;
1003 break; 967 break;
1004 } 968 }
1005 969
1006 /* Other jumps are mapped into two insns: Jxx and JA. */ 970 /* Other jumps are mapped into two insns: Jxx and JA. */
1007 target = i + fp->jt + 1; 971 target = i + fp->jt + 1;
1008 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 972 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
1009 EMIT_JMP; 973 BPF_EMIT_JMP;
1010 insn++; 974 insn++;
1011 975
1012 insn->code = BPF_JMP | BPF_JA; 976 insn->code = BPF_JMP | BPF_JA;
1013 target = i + fp->jf + 1; 977 target = i + fp->jf + 1;
1014 EMIT_JMP; 978 BPF_EMIT_JMP;
1015 break; 979 break;
1016 980
1017 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ 981 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
1018 case BPF_LDX | BPF_MSH | BPF_B: 982 case BPF_LDX | BPF_MSH | BPF_B:
1019 insn->code = BPF_ALU64 | BPF_MOV | BPF_X; 983 /* tmp = A */
1020 insn->a_reg = TMP_REG; 984 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
1021 insn->x_reg = A_REG; 985 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
1022 insn++; 986 *insn++ = BPF_LD_ABS(BPF_B, fp->k);
1023 987 /* A &= 0xf */
1024 insn->code = BPF_LD | BPF_ABS | BPF_B; 988 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
1025 insn->a_reg = A_REG; 989 /* A <<= 2 */
1026 insn->imm = fp->k; 990 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
1027 insn++; 991 /* X = A */
1028 992 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
1029 insn->code = BPF_ALU | BPF_AND | BPF_K; 993 /* A = tmp */
1030 insn->a_reg = A_REG; 994 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
1031 insn->imm = 0xf;
1032 insn++;
1033
1034 insn->code = BPF_ALU | BPF_LSH | BPF_K;
1035 insn->a_reg = A_REG;
1036 insn->imm = 2;
1037 insn++;
1038
1039 insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
1040 insn->a_reg = X_REG;
1041 insn->x_reg = A_REG;
1042 insn++;
1043
1044 insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
1045 insn->a_reg = A_REG;
1046 insn->x_reg = TMP_REG;
1047 break; 995 break;
1048 996
1049 /* RET_K, RET_A are remaped into 2 insns. */ 997 /* RET_K, RET_A are remaped into 2 insns. */
1050 case BPF_RET | BPF_A: 998 case BPF_RET | BPF_A:
1051 case BPF_RET | BPF_K: 999 case BPF_RET | BPF_K:
1052 insn->code = BPF_ALU | BPF_MOV | 1000 *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
1053 (BPF_RVAL(fp->code) == BPF_K ? 1001 BPF_K : BPF_X, BPF_REG_0,
1054 BPF_K : BPF_X); 1002 BPF_REG_A, fp->k);
1055 insn->a_reg = 0; 1003 *insn = BPF_EXIT_INSN();
1056 insn->x_reg = A_REG;
1057 insn->imm = fp->k;
1058 insn++;
1059
1060 insn->code = BPF_JMP | BPF_EXIT;
1061 break; 1004 break;
1062 1005
1063 /* Store to stack. */ 1006 /* Store to stack. */
1064 case BPF_ST: 1007 case BPF_ST:
1065 case BPF_STX: 1008 case BPF_STX:
1066 insn->code = BPF_STX | BPF_MEM | BPF_W; 1009 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
1067 insn->a_reg = FP_REG; 1010 BPF_ST ? BPF_REG_A : BPF_REG_X,
1068 insn->x_reg = fp->code == BPF_ST ? A_REG : X_REG; 1011 -(BPF_MEMWORDS - fp->k) * 4);
1069 insn->off = -(BPF_MEMWORDS - fp->k) * 4;
1070 break; 1012 break;
1071 1013
1072 /* Load from stack. */ 1014 /* Load from stack. */
1073 case BPF_LD | BPF_MEM: 1015 case BPF_LD | BPF_MEM:
1074 case BPF_LDX | BPF_MEM: 1016 case BPF_LDX | BPF_MEM:
1075 insn->code = BPF_LDX | BPF_MEM | BPF_W; 1017 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
1076 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? 1018 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
1077 A_REG : X_REG; 1019 -(BPF_MEMWORDS - fp->k) * 4);
1078 insn->x_reg = FP_REG;
1079 insn->off = -(BPF_MEMWORDS - fp->k) * 4;
1080 break; 1020 break;
1081 1021
1082 /* A = K or X = K */ 1022 /* A = K or X = K */
1083 case BPF_LD | BPF_IMM: 1023 case BPF_LD | BPF_IMM:
1084 case BPF_LDX | BPF_IMM: 1024 case BPF_LDX | BPF_IMM:
1085 insn->code = BPF_ALU | BPF_MOV | BPF_K; 1025 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
1086 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? 1026 BPF_REG_A : BPF_REG_X, fp->k);
1087 A_REG : X_REG;
1088 insn->imm = fp->k;
1089 break; 1027 break;
1090 1028
1091 /* X = A */ 1029 /* X = A */
1092 case BPF_MISC | BPF_TAX: 1030 case BPF_MISC | BPF_TAX:
1093 insn->code = BPF_ALU64 | BPF_MOV | BPF_X; 1031 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
1094 insn->a_reg = X_REG;
1095 insn->x_reg = A_REG;
1096 break; 1032 break;
1097 1033
1098 /* A = X */ 1034 /* A = X */
1099 case BPF_MISC | BPF_TXA: 1035 case BPF_MISC | BPF_TXA:
1100 insn->code = BPF_ALU64 | BPF_MOV | BPF_X; 1036 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
1101 insn->a_reg = A_REG;
1102 insn->x_reg = X_REG;
1103 break; 1037 break;
1104 1038
1105 /* A = skb->len or X = skb->len */ 1039 /* A = skb->len or X = skb->len */
1106 case BPF_LD | BPF_W | BPF_LEN: 1040 case BPF_LD | BPF_W | BPF_LEN:
1107 case BPF_LDX | BPF_W | BPF_LEN: 1041 case BPF_LDX | BPF_W | BPF_LEN:
1108 insn->code = BPF_LDX | BPF_MEM | BPF_W; 1042 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
1109 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? 1043 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
1110 A_REG : X_REG; 1044 offsetof(struct sk_buff, len));
1111 insn->x_reg = CTX_REG;
1112 insn->off = offsetof(struct sk_buff, len);
1113 break; 1045 break;
1114 1046
1115 /* access seccomp_data fields */ 1047 /* Access seccomp_data fields. */
1116 case BPF_LDX | BPF_ABS | BPF_W: 1048 case BPF_LDX | BPF_ABS | BPF_W:
1117 insn->code = BPF_LDX | BPF_MEM | BPF_W; 1049 /* A = *(u32 *) (ctx + K) */
1118 insn->a_reg = A_REG; 1050 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
1119 insn->x_reg = CTX_REG;
1120 insn->off = fp->k;
1121 break; 1051 break;
1122 1052
1053 /* Unkown instruction. */
1123 default: 1054 default:
1124 goto err; 1055 goto err;
1125 } 1056 }
@@ -1128,7 +1059,6 @@ do_pass:
1128 if (new_prog) 1059 if (new_prog)
1129 memcpy(new_insn, tmp_insns, 1060 memcpy(new_insn, tmp_insns,
1130 sizeof(*insn) * (insn - tmp_insns)); 1061 sizeof(*insn) * (insn - tmp_insns));
1131
1132 new_insn += insn - tmp_insns; 1062 new_insn += insn - tmp_insns;
1133 } 1063 }
1134 1064
@@ -1143,7 +1073,6 @@ do_pass:
1143 new_flen = new_insn - new_prog; 1073 new_flen = new_insn - new_prog;
1144 if (pass > 2) 1074 if (pass > 2)
1145 goto err; 1075 goto err;
1146
1147 goto do_pass; 1076 goto do_pass;
1148 } 1077 }
1149 1078
@@ -1167,44 +1096,46 @@ err:
1167 */ 1096 */
1168static int check_load_and_stores(struct sock_filter *filter, int flen) 1097static int check_load_and_stores(struct sock_filter *filter, int flen)
1169{ 1098{
1170 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */ 1099 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
1171 int pc, ret = 0; 1100 int pc, ret = 0;
1172 1101
1173 BUILD_BUG_ON(BPF_MEMWORDS > 16); 1102 BUILD_BUG_ON(BPF_MEMWORDS > 16);
1103
1174 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL); 1104 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
1175 if (!masks) 1105 if (!masks)
1176 return -ENOMEM; 1106 return -ENOMEM;
1107
1177 memset(masks, 0xff, flen * sizeof(*masks)); 1108 memset(masks, 0xff, flen * sizeof(*masks));
1178 1109
1179 for (pc = 0; pc < flen; pc++) { 1110 for (pc = 0; pc < flen; pc++) {
1180 memvalid &= masks[pc]; 1111 memvalid &= masks[pc];
1181 1112
1182 switch (filter[pc].code) { 1113 switch (filter[pc].code) {
1183 case BPF_S_ST: 1114 case BPF_ST:
1184 case BPF_S_STX: 1115 case BPF_STX:
1185 memvalid |= (1 << filter[pc].k); 1116 memvalid |= (1 << filter[pc].k);
1186 break; 1117 break;
1187 case BPF_S_LD_MEM: 1118 case BPF_LD | BPF_MEM:
1188 case BPF_S_LDX_MEM: 1119 case BPF_LDX | BPF_MEM:
1189 if (!(memvalid & (1 << filter[pc].k))) { 1120 if (!(memvalid & (1 << filter[pc].k))) {
1190 ret = -EINVAL; 1121 ret = -EINVAL;
1191 goto error; 1122 goto error;
1192 } 1123 }
1193 break; 1124 break;
1194 case BPF_S_JMP_JA: 1125 case BPF_JMP | BPF_JA:
1195 /* a jump must set masks on target */ 1126 /* A jump must set masks on target */
1196 masks[pc + 1 + filter[pc].k] &= memvalid; 1127 masks[pc + 1 + filter[pc].k] &= memvalid;
1197 memvalid = ~0; 1128 memvalid = ~0;
1198 break; 1129 break;
1199 case BPF_S_JMP_JEQ_K: 1130 case BPF_JMP | BPF_JEQ | BPF_K:
1200 case BPF_S_JMP_JEQ_X: 1131 case BPF_JMP | BPF_JEQ | BPF_X:
1201 case BPF_S_JMP_JGE_K: 1132 case BPF_JMP | BPF_JGE | BPF_K:
1202 case BPF_S_JMP_JGE_X: 1133 case BPF_JMP | BPF_JGE | BPF_X:
1203 case BPF_S_JMP_JGT_K: 1134 case BPF_JMP | BPF_JGT | BPF_K:
1204 case BPF_S_JMP_JGT_X: 1135 case BPF_JMP | BPF_JGT | BPF_X:
1205 case BPF_S_JMP_JSET_X: 1136 case BPF_JMP | BPF_JSET | BPF_K:
1206 case BPF_S_JMP_JSET_K: 1137 case BPF_JMP | BPF_JSET | BPF_X:
1207 /* a jump must set masks on targets */ 1138 /* A jump must set masks on targets */
1208 masks[pc + 1 + filter[pc].jt] &= memvalid; 1139 masks[pc + 1 + filter[pc].jt] &= memvalid;
1209 masks[pc + 1 + filter[pc].jf] &= memvalid; 1140 masks[pc + 1 + filter[pc].jf] &= memvalid;
1210 memvalid = ~0; 1141 memvalid = ~0;
@@ -1216,6 +1147,72 @@ error:
1216 return ret; 1147 return ret;
1217} 1148}
1218 1149
1150static bool chk_code_allowed(u16 code_to_probe)
1151{
1152 static const bool codes[] = {
1153 /* 32 bit ALU operations */
1154 [BPF_ALU | BPF_ADD | BPF_K] = true,
1155 [BPF_ALU | BPF_ADD | BPF_X] = true,
1156 [BPF_ALU | BPF_SUB | BPF_K] = true,
1157 [BPF_ALU | BPF_SUB | BPF_X] = true,
1158 [BPF_ALU | BPF_MUL | BPF_K] = true,
1159 [BPF_ALU | BPF_MUL | BPF_X] = true,
1160 [BPF_ALU | BPF_DIV | BPF_K] = true,
1161 [BPF_ALU | BPF_DIV | BPF_X] = true,
1162 [BPF_ALU | BPF_MOD | BPF_K] = true,
1163 [BPF_ALU | BPF_MOD | BPF_X] = true,
1164 [BPF_ALU | BPF_AND | BPF_K] = true,
1165 [BPF_ALU | BPF_AND | BPF_X] = true,
1166 [BPF_ALU | BPF_OR | BPF_K] = true,
1167 [BPF_ALU | BPF_OR | BPF_X] = true,
1168 [BPF_ALU | BPF_XOR | BPF_K] = true,
1169 [BPF_ALU | BPF_XOR | BPF_X] = true,
1170 [BPF_ALU | BPF_LSH | BPF_K] = true,
1171 [BPF_ALU | BPF_LSH | BPF_X] = true,
1172 [BPF_ALU | BPF_RSH | BPF_K] = true,
1173 [BPF_ALU | BPF_RSH | BPF_X] = true,
1174 [BPF_ALU | BPF_NEG] = true,
1175 /* Load instructions */
1176 [BPF_LD | BPF_W | BPF_ABS] = true,
1177 [BPF_LD | BPF_H | BPF_ABS] = true,
1178 [BPF_LD | BPF_B | BPF_ABS] = true,
1179 [BPF_LD | BPF_W | BPF_LEN] = true,
1180 [BPF_LD | BPF_W | BPF_IND] = true,
1181 [BPF_LD | BPF_H | BPF_IND] = true,
1182 [BPF_LD | BPF_B | BPF_IND] = true,
1183 [BPF_LD | BPF_IMM] = true,
1184 [BPF_LD | BPF_MEM] = true,
1185 [BPF_LDX | BPF_W | BPF_LEN] = true,
1186 [BPF_LDX | BPF_B | BPF_MSH] = true,
1187 [BPF_LDX | BPF_IMM] = true,
1188 [BPF_LDX | BPF_MEM] = true,
1189 /* Store instructions */
1190 [BPF_ST] = true,
1191 [BPF_STX] = true,
1192 /* Misc instructions */
1193 [BPF_MISC | BPF_TAX] = true,
1194 [BPF_MISC | BPF_TXA] = true,
1195 /* Return instructions */
1196 [BPF_RET | BPF_K] = true,
1197 [BPF_RET | BPF_A] = true,
1198 /* Jump instructions */
1199 [BPF_JMP | BPF_JA] = true,
1200 [BPF_JMP | BPF_JEQ | BPF_K] = true,
1201 [BPF_JMP | BPF_JEQ | BPF_X] = true,
1202 [BPF_JMP | BPF_JGE | BPF_K] = true,
1203 [BPF_JMP | BPF_JGE | BPF_X] = true,
1204 [BPF_JMP | BPF_JGT | BPF_K] = true,
1205 [BPF_JMP | BPF_JGT | BPF_X] = true,
1206 [BPF_JMP | BPF_JSET | BPF_K] = true,
1207 [BPF_JMP | BPF_JSET | BPF_X] = true,
1208 };
1209
1210 if (code_to_probe >= ARRAY_SIZE(codes))
1211 return false;
1212
1213 return codes[code_to_probe];
1214}
1215
1219/** 1216/**
1220 * sk_chk_filter - verify socket filter code 1217 * sk_chk_filter - verify socket filter code
1221 * @filter: filter to verify 1218 * @filter: filter to verify
@@ -1232,153 +1229,76 @@ error:
1232 */ 1229 */
1233int sk_chk_filter(struct sock_filter *filter, unsigned int flen) 1230int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
1234{ 1231{
1235 /*
1236 * Valid instructions are initialized to non-0.
1237 * Invalid instructions are initialized to 0.
1238 */
1239 static const u8 codes[] = {
1240 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
1241 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
1242 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
1243 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
1244 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
1245 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
1246 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
1247 [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
1248 [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
1249 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
1250 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
1251 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
1252 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
1253 [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
1254 [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
1255 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
1256 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
1257 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
1258 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
1259 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
1260 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
1261 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
1262 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
1263 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
1264 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
1265 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
1266 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
1267 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
1268 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
1269 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
1270 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
1271 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
1272 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
1273 [BPF_RET|BPF_K] = BPF_S_RET_K,
1274 [BPF_RET|BPF_A] = BPF_S_RET_A,
1275 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
1276 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
1277 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
1278 [BPF_ST] = BPF_S_ST,
1279 [BPF_STX] = BPF_S_STX,
1280 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
1281 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
1282 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
1283 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
1284 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
1285 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
1286 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
1287 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
1288 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
1289 };
1290 int pc;
1291 bool anc_found; 1232 bool anc_found;
1233 int pc;
1292 1234
1293 if (flen == 0 || flen > BPF_MAXINSNS) 1235 if (flen == 0 || flen > BPF_MAXINSNS)
1294 return -EINVAL; 1236 return -EINVAL;
1295 1237
1296 /* check the filter code now */ 1238 /* Check the filter code now */
1297 for (pc = 0; pc < flen; pc++) { 1239 for (pc = 0; pc < flen; pc++) {
1298 struct sock_filter *ftest = &filter[pc]; 1240 struct sock_filter *ftest = &filter[pc];
1299 u16 code = ftest->code;
1300 1241
1301 if (code >= ARRAY_SIZE(codes)) 1242 /* May we actually operate on this code? */
1302 return -EINVAL; 1243 if (!chk_code_allowed(ftest->code))
1303 code = codes[code];
1304 if (!code)
1305 return -EINVAL; 1244 return -EINVAL;
1245
1306 /* Some instructions need special checks */ 1246 /* Some instructions need special checks */
1307 switch (code) { 1247 switch (ftest->code) {
1308 case BPF_S_ALU_DIV_K: 1248 case BPF_ALU | BPF_DIV | BPF_K:
1309 case BPF_S_ALU_MOD_K: 1249 case BPF_ALU | BPF_MOD | BPF_K:
1310 /* check for division by zero */ 1250 /* Check for division by zero */
1311 if (ftest->k == 0) 1251 if (ftest->k == 0)
1312 return -EINVAL; 1252 return -EINVAL;
1313 break; 1253 break;
1314 case BPF_S_LD_MEM: 1254 case BPF_LD | BPF_MEM:
1315 case BPF_S_LDX_MEM: 1255 case BPF_LDX | BPF_MEM:
1316 case BPF_S_ST: 1256 case BPF_ST:
1317 case BPF_S_STX: 1257 case BPF_STX:
1318 /* check for invalid memory addresses */ 1258 /* Check for invalid memory addresses */
1319 if (ftest->k >= BPF_MEMWORDS) 1259 if (ftest->k >= BPF_MEMWORDS)
1320 return -EINVAL; 1260 return -EINVAL;
1321 break; 1261 break;
1322 case BPF_S_JMP_JA: 1262 case BPF_JMP | BPF_JA:
1323 /* 1263 /* Note, the large ftest->k might cause loops.
1324 * Note, the large ftest->k might cause loops.
1325 * Compare this with conditional jumps below, 1264 * Compare this with conditional jumps below,
1326 * where offsets are limited. --ANK (981016) 1265 * where offsets are limited. --ANK (981016)
1327 */ 1266 */
1328 if (ftest->k >= (unsigned int)(flen-pc-1)) 1267 if (ftest->k >= (unsigned int)(flen - pc - 1))
1329 return -EINVAL; 1268 return -EINVAL;
1330 break; 1269 break;
1331 case BPF_S_JMP_JEQ_K: 1270 case BPF_JMP | BPF_JEQ | BPF_K:
1332 case BPF_S_JMP_JEQ_X: 1271 case BPF_JMP | BPF_JEQ | BPF_X:
1333 case BPF_S_JMP_JGE_K: 1272 case BPF_JMP | BPF_JGE | BPF_K:
1334 case BPF_S_JMP_JGE_X: 1273 case BPF_JMP | BPF_JGE | BPF_X:
1335 case BPF_S_JMP_JGT_K: 1274 case BPF_JMP | BPF_JGT | BPF_K:
1336 case BPF_S_JMP_JGT_X: 1275 case BPF_JMP | BPF_JGT | BPF_X:
1337 case BPF_S_JMP_JSET_X: 1276 case BPF_JMP | BPF_JSET | BPF_K:
1338 case BPF_S_JMP_JSET_K: 1277 case BPF_JMP | BPF_JSET | BPF_X:
1339 /* for conditionals both must be safe */ 1278 /* Both conditionals must be safe */
1340 if (pc + ftest->jt + 1 >= flen || 1279 if (pc + ftest->jt + 1 >= flen ||
1341 pc + ftest->jf + 1 >= flen) 1280 pc + ftest->jf + 1 >= flen)
1342 return -EINVAL; 1281 return -EINVAL;
1343 break; 1282 break;
1344 case BPF_S_LD_W_ABS: 1283 case BPF_LD | BPF_W | BPF_ABS:
1345 case BPF_S_LD_H_ABS: 1284 case BPF_LD | BPF_H | BPF_ABS:
1346 case BPF_S_LD_B_ABS: 1285 case BPF_LD | BPF_B | BPF_ABS:
1347 anc_found = false; 1286 anc_found = false;
1348#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ 1287 if (bpf_anc_helper(ftest) & BPF_ANC)
1349 code = BPF_S_ANC_##CODE; \ 1288 anc_found = true;
1350 anc_found = true; \ 1289 /* Ancillary operation unknown or unsupported */
1351 break
1352 switch (ftest->k) {
1353 ANCILLARY(PROTOCOL);
1354 ANCILLARY(PKTTYPE);
1355 ANCILLARY(IFINDEX);
1356 ANCILLARY(NLATTR);
1357 ANCILLARY(NLATTR_NEST);
1358 ANCILLARY(MARK);
1359 ANCILLARY(QUEUE);
1360 ANCILLARY(HATYPE);
1361 ANCILLARY(RXHASH);
1362 ANCILLARY(CPU);
1363 ANCILLARY(ALU_XOR_X);
1364 ANCILLARY(VLAN_TAG);
1365 ANCILLARY(VLAN_TAG_PRESENT);
1366 ANCILLARY(PAY_OFFSET);
1367 }
1368
1369 /* ancillary operation unknown or unsupported */
1370 if (anc_found == false && ftest->k >= SKF_AD_OFF) 1290 if (anc_found == false && ftest->k >= SKF_AD_OFF)
1371 return -EINVAL; 1291 return -EINVAL;
1372 } 1292 }
1373 ftest->code = code;
1374 } 1293 }
1375 1294
1376 /* last instruction must be a RET code */ 1295 /* Last instruction must be a RET code */
1377 switch (filter[flen - 1].code) { 1296 switch (filter[flen - 1].code) {
1378 case BPF_S_RET_K: 1297 case BPF_RET | BPF_K:
1379 case BPF_S_RET_A: 1298 case BPF_RET | BPF_A:
1380 return check_load_and_stores(filter, flen); 1299 return check_load_and_stores(filter, flen);
1381 } 1300 }
1301
1382 return -EINVAL; 1302 return -EINVAL;
1383} 1303}
1384EXPORT_SYMBOL(sk_chk_filter); 1304EXPORT_SYMBOL(sk_chk_filter);
@@ -1423,7 +1343,7 @@ static void sk_filter_release_rcu(struct rcu_head *rcu)
1423 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 1343 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1424 1344
1425 sk_release_orig_filter(fp); 1345 sk_release_orig_filter(fp);
1426 bpf_jit_free(fp); 1346 sk_filter_free(fp);
1427} 1347}
1428 1348
1429/** 1349/**
@@ -1461,7 +1381,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
1461 1381
1462 fp_new = sock_kmalloc(sk, len, GFP_KERNEL); 1382 fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
1463 if (fp_new) { 1383 if (fp_new) {
1464 memcpy(fp_new, fp, sizeof(struct sk_filter)); 1384 *fp_new = *fp;
1465 /* As we're kepping orig_prog in fp_new along, 1385 /* As we're kepping orig_prog in fp_new along,
1466 * we need to make sure we're not evicting it 1386 * we need to make sure we're not evicting it
1467 * from the old fp. 1387 * from the old fp.
@@ -1478,7 +1398,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
1478{ 1398{
1479 struct sock_filter *old_prog; 1399 struct sock_filter *old_prog;
1480 struct sk_filter *old_fp; 1400 struct sk_filter *old_fp;
1481 int i, err, new_len, old_len = fp->len; 1401 int err, new_len, old_len = fp->len;
1482 1402
1483 /* We are free to overwrite insns et al right here as it 1403 /* We are free to overwrite insns et al right here as it
1484 * won't be used at this point in time anymore internally 1404 * won't be used at this point in time anymore internally
@@ -1488,13 +1408,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
1488 BUILD_BUG_ON(sizeof(struct sock_filter) != 1408 BUILD_BUG_ON(sizeof(struct sock_filter) !=
1489 sizeof(struct sock_filter_int)); 1409 sizeof(struct sock_filter_int));
1490 1410
1491 /* For now, we need to unfiddle BPF_S_* identifiers in place.
1492 * This can sooner or later on be subject to removal, e.g. when
1493 * JITs have been converted.
1494 */
1495 for (i = 0; i < fp->len; i++)
1496 sk_decode_filter(&fp->insns[i], &fp->insns[i]);
1497
1498 /* Conversion cannot happen on overlapping memory areas, 1411 /* Conversion cannot happen on overlapping memory areas,
1499 * so we need to keep the user BPF around until the 2nd 1412 * so we need to keep the user BPF around until the 2nd
1500 * pass. At this time, the user BPF is stored in fp->insns. 1413 * pass. At this time, the user BPF is stored in fp->insns.
@@ -1523,7 +1436,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
1523 goto out_err_free; 1436 goto out_err_free;
1524 } 1437 }
1525 1438
1526 fp->bpf_func = sk_run_filter_int_skb;
1527 fp->len = new_len; 1439 fp->len = new_len;
1528 1440
1529 /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */ 1441 /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
@@ -1536,6 +1448,8 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
1536 */ 1448 */
1537 goto out_err_free; 1449 goto out_err_free;
1538 1450
1451 sk_filter_select_runtime(fp);
1452
1539 kfree(old_prog); 1453 kfree(old_prog);
1540 return fp; 1454 return fp;
1541 1455
@@ -1550,6 +1464,33 @@ out_err:
1550 return ERR_PTR(err); 1464 return ERR_PTR(err);
1551} 1465}
1552 1466
1467void __weak bpf_int_jit_compile(struct sk_filter *prog)
1468{
1469}
1470
1471/**
1472 * sk_filter_select_runtime - select execution runtime for BPF program
1473 * @fp: sk_filter populated with internal BPF program
1474 *
1475 * try to JIT internal BPF program, if JIT is not available select interpreter
1476 * BPF program will be executed via SK_RUN_FILTER() macro
1477 */
1478void sk_filter_select_runtime(struct sk_filter *fp)
1479{
1480 fp->bpf_func = (void *) __sk_run_filter;
1481
1482 /* Probe if internal BPF can be JITed */
1483 bpf_int_jit_compile(fp);
1484}
1485EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
1486
1487/* free internal BPF program */
1488void sk_filter_free(struct sk_filter *fp)
1489{
1490 bpf_jit_free(fp);
1491}
1492EXPORT_SYMBOL_GPL(sk_filter_free);
1493
1553static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp, 1494static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1554 struct sock *sk) 1495 struct sock *sk)
1555{ 1496{
@@ -1592,7 +1533,7 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1592 * a negative errno code is returned. On success the return is zero. 1533 * a negative errno code is returned. On success the return is zero.
1593 */ 1534 */
1594int sk_unattached_filter_create(struct sk_filter **pfp, 1535int sk_unattached_filter_create(struct sk_filter **pfp,
1595 struct sock_fprog *fprog) 1536 struct sock_fprog_kern *fprog)
1596{ 1537{
1597 unsigned int fsize = sk_filter_proglen(fprog); 1538 unsigned int fsize = sk_filter_proglen(fprog);
1598 struct sk_filter *fp; 1539 struct sk_filter *fp;
@@ -1713,83 +1654,6 @@ int sk_detach_filter(struct sock *sk)
1713} 1654}
1714EXPORT_SYMBOL_GPL(sk_detach_filter); 1655EXPORT_SYMBOL_GPL(sk_detach_filter);
1715 1656
1716void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
1717{
1718 static const u16 decodes[] = {
1719 [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
1720 [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
1721 [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
1722 [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
1723 [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
1724 [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
1725 [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
1726 [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
1727 [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
1728 [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
1729 [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
1730 [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
1731 [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
1732 [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
1733 [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
1734 [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
1735 [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
1736 [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
1737 [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
1738 [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
1739 [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
1740 [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
1741 [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
1742 [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
1743 [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
1744 [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
1745 [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
1746 [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
1747 [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
1748 [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
1749 [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
1750 [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
1751 [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
1752 [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
1753 [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
1754 [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
1755 [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
1756 [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
1757 [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
1758 [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
1759 [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
1760 [BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
1761 [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
1762 [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
1763 [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
1764 [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
1765 [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
1766 [BPF_S_RET_K] = BPF_RET|BPF_K,
1767 [BPF_S_RET_A] = BPF_RET|BPF_A,
1768 [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
1769 [BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
1770 [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
1771 [BPF_S_ST] = BPF_ST,
1772 [BPF_S_STX] = BPF_STX,
1773 [BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
1774 [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
1775 [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
1776 [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
1777 [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
1778 [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
1779 [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
1780 [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
1781 [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
1782 };
1783 u16 code;
1784
1785 code = filt->code;
1786
1787 to->code = decodes[code];
1788 to->jt = filt->jt;
1789 to->jf = filt->jf;
1790 to->k = filt->k;
1791}
1792
1793int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, 1657int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
1794 unsigned int len) 1658 unsigned int len)
1795{ 1659{
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 7c8ffd974961..85b62691f4f2 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -273,7 +273,7 @@ static void cleanup_net(struct work_struct *work)
273{ 273{
274 const struct pernet_operations *ops; 274 const struct pernet_operations *ops;
275 struct net *net, *tmp; 275 struct net *net, *tmp;
276 LIST_HEAD(net_kill_list); 276 struct list_head net_kill_list;
277 LIST_HEAD(net_exit_list); 277 LIST_HEAD(net_exit_list);
278 278
279 /* Atomically snapshot the list of namespaces to cleanup */ 279 /* Atomically snapshot the list of namespaces to cleanup */
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 0304f981f7ff..fc17a9d309ac 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -573,7 +573,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
573 is_zero_ether_addr(pkt_dev->src_mac) ? 573 is_zero_ether_addr(pkt_dev->src_mac) ?
574 pkt_dev->odev->dev_addr : pkt_dev->src_mac); 574 pkt_dev->odev->dev_addr : pkt_dev->src_mac);
575 575
576 seq_printf(seq, "dst_mac: "); 576 seq_puts(seq, "dst_mac: ");
577 seq_printf(seq, "%pM\n", pkt_dev->dst_mac); 577 seq_printf(seq, "%pM\n", pkt_dev->dst_mac);
578 578
579 seq_printf(seq, 579 seq_printf(seq,
@@ -588,7 +588,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
588 588
589 if (pkt_dev->nr_labels) { 589 if (pkt_dev->nr_labels) {
590 unsigned int i; 590 unsigned int i;
591 seq_printf(seq, " mpls: "); 591 seq_puts(seq, " mpls: ");
592 for (i = 0; i < pkt_dev->nr_labels; i++) 592 for (i = 0; i < pkt_dev->nr_labels; i++)
593 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), 593 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
594 i == pkt_dev->nr_labels-1 ? "\n" : ", "); 594 i == pkt_dev->nr_labels-1 ? "\n" : ", ");
@@ -613,67 +613,67 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
613 if (pkt_dev->node >= 0) 613 if (pkt_dev->node >= 0)
614 seq_printf(seq, " node: %d\n", pkt_dev->node); 614 seq_printf(seq, " node: %d\n", pkt_dev->node);
615 615
616 seq_printf(seq, " Flags: "); 616 seq_puts(seq, " Flags: ");
617 617
618 if (pkt_dev->flags & F_IPV6) 618 if (pkt_dev->flags & F_IPV6)
619 seq_printf(seq, "IPV6 "); 619 seq_puts(seq, "IPV6 ");
620 620
621 if (pkt_dev->flags & F_IPSRC_RND) 621 if (pkt_dev->flags & F_IPSRC_RND)
622 seq_printf(seq, "IPSRC_RND "); 622 seq_puts(seq, "IPSRC_RND ");
623 623
624 if (pkt_dev->flags & F_IPDST_RND) 624 if (pkt_dev->flags & F_IPDST_RND)
625 seq_printf(seq, "IPDST_RND "); 625 seq_puts(seq, "IPDST_RND ");
626 626
627 if (pkt_dev->flags & F_TXSIZE_RND) 627 if (pkt_dev->flags & F_TXSIZE_RND)
628 seq_printf(seq, "TXSIZE_RND "); 628 seq_puts(seq, "TXSIZE_RND ");
629 629
630 if (pkt_dev->flags & F_UDPSRC_RND) 630 if (pkt_dev->flags & F_UDPSRC_RND)
631 seq_printf(seq, "UDPSRC_RND "); 631 seq_puts(seq, "UDPSRC_RND ");
632 632
633 if (pkt_dev->flags & F_UDPDST_RND) 633 if (pkt_dev->flags & F_UDPDST_RND)
634 seq_printf(seq, "UDPDST_RND "); 634 seq_puts(seq, "UDPDST_RND ");
635 635
636 if (pkt_dev->flags & F_UDPCSUM) 636 if (pkt_dev->flags & F_UDPCSUM)
637 seq_printf(seq, "UDPCSUM "); 637 seq_puts(seq, "UDPCSUM ");
638 638
639 if (pkt_dev->flags & F_MPLS_RND) 639 if (pkt_dev->flags & F_MPLS_RND)
640 seq_printf(seq, "MPLS_RND "); 640 seq_puts(seq, "MPLS_RND ");
641 641
642 if (pkt_dev->flags & F_QUEUE_MAP_RND) 642 if (pkt_dev->flags & F_QUEUE_MAP_RND)
643 seq_printf(seq, "QUEUE_MAP_RND "); 643 seq_puts(seq, "QUEUE_MAP_RND ");
644 644
645 if (pkt_dev->flags & F_QUEUE_MAP_CPU) 645 if (pkt_dev->flags & F_QUEUE_MAP_CPU)
646 seq_printf(seq, "QUEUE_MAP_CPU "); 646 seq_puts(seq, "QUEUE_MAP_CPU ");
647 647
648 if (pkt_dev->cflows) { 648 if (pkt_dev->cflows) {
649 if (pkt_dev->flags & F_FLOW_SEQ) 649 if (pkt_dev->flags & F_FLOW_SEQ)
650 seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/ 650 seq_puts(seq, "FLOW_SEQ "); /*in sequence flows*/
651 else 651 else
652 seq_printf(seq, "FLOW_RND "); 652 seq_puts(seq, "FLOW_RND ");
653 } 653 }
654 654
655#ifdef CONFIG_XFRM 655#ifdef CONFIG_XFRM
656 if (pkt_dev->flags & F_IPSEC_ON) { 656 if (pkt_dev->flags & F_IPSEC_ON) {
657 seq_printf(seq, "IPSEC "); 657 seq_puts(seq, "IPSEC ");
658 if (pkt_dev->spi) 658 if (pkt_dev->spi)
659 seq_printf(seq, "spi:%u", pkt_dev->spi); 659 seq_printf(seq, "spi:%u", pkt_dev->spi);
660 } 660 }
661#endif 661#endif
662 662
663 if (pkt_dev->flags & F_MACSRC_RND) 663 if (pkt_dev->flags & F_MACSRC_RND)
664 seq_printf(seq, "MACSRC_RND "); 664 seq_puts(seq, "MACSRC_RND ");
665 665
666 if (pkt_dev->flags & F_MACDST_RND) 666 if (pkt_dev->flags & F_MACDST_RND)
667 seq_printf(seq, "MACDST_RND "); 667 seq_puts(seq, "MACDST_RND ");
668 668
669 if (pkt_dev->flags & F_VID_RND) 669 if (pkt_dev->flags & F_VID_RND)
670 seq_printf(seq, "VID_RND "); 670 seq_puts(seq, "VID_RND ");
671 671
672 if (pkt_dev->flags & F_SVID_RND) 672 if (pkt_dev->flags & F_SVID_RND)
673 seq_printf(seq, "SVID_RND "); 673 seq_puts(seq, "SVID_RND ");
674 674
675 if (pkt_dev->flags & F_NODE) 675 if (pkt_dev->flags & F_NODE)
676 seq_printf(seq, "NODE_ALLOC "); 676 seq_puts(seq, "NODE_ALLOC ");
677 677
678 seq_puts(seq, "\n"); 678 seq_puts(seq, "\n");
679 679
@@ -716,7 +716,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
716 if (pkt_dev->result[0]) 716 if (pkt_dev->result[0])
717 seq_printf(seq, "Result: %s\n", pkt_dev->result); 717 seq_printf(seq, "Result: %s\n", pkt_dev->result);
718 else 718 else
719 seq_printf(seq, "Result: Idle\n"); 719 seq_puts(seq, "Result: Idle\n");
720 720
721 return 0; 721 return 0;
722} 722}
@@ -1735,14 +1735,14 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
1735 1735
1736 BUG_ON(!t); 1736 BUG_ON(!t);
1737 1737
1738 seq_printf(seq, "Running: "); 1738 seq_puts(seq, "Running: ");
1739 1739
1740 if_lock(t); 1740 if_lock(t);
1741 list_for_each_entry(pkt_dev, &t->if_list, list) 1741 list_for_each_entry(pkt_dev, &t->if_list, list)
1742 if (pkt_dev->running) 1742 if (pkt_dev->running)
1743 seq_printf(seq, "%s ", pkt_dev->odevname); 1743 seq_printf(seq, "%s ", pkt_dev->odevname);
1744 1744
1745 seq_printf(seq, "\nStopped: "); 1745 seq_puts(seq, "\nStopped: ");
1746 1746
1747 list_for_each_entry(pkt_dev, &t->if_list, list) 1747 list_for_each_entry(pkt_dev, &t->if_list, list)
1748 if (!pkt_dev->running) 1748 if (!pkt_dev->running)
@@ -1751,7 +1751,7 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
1751 if (t->result[0]) 1751 if (t->result[0])
1752 seq_printf(seq, "\nResult: %s\n", t->result); 1752 seq_printf(seq, "\nResult: %s\n", t->result);
1753 else 1753 else
1754 seq_printf(seq, "\nResult: NA\n"); 1754 seq_puts(seq, "\nResult: NA\n");
1755 1755
1756 if_unlock(t); 1756 if_unlock(t);
1757 1757
diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c
index eaba0f68f860..d3027a73fd4b 100644
--- a/net/core/ptp_classifier.c
+++ b/net/core/ptp_classifier.c
@@ -88,7 +88,7 @@ EXPORT_SYMBOL_GPL(ptp_classify_raw);
88 88
89void __init ptp_classifier_init(void) 89void __init ptp_classifier_init(void)
90{ 90{
91 static struct sock_filter ptp_filter[] = { 91 static struct sock_filter ptp_filter[] __initdata = {
92 { 0x28, 0, 0, 0x0000000c }, 92 { 0x28, 0, 0, 0x0000000c },
93 { 0x15, 0, 12, 0x00000800 }, 93 { 0x15, 0, 12, 0x00000800 },
94 { 0x30, 0, 0, 0x00000017 }, 94 { 0x30, 0, 0, 0x00000017 },
@@ -133,7 +133,7 @@ void __init ptp_classifier_init(void)
133 { 0x16, 0, 0, 0x00000000 }, 133 { 0x16, 0, 0, 0x00000000 },
134 { 0x06, 0, 0, 0x00000000 }, 134 { 0x06, 0, 0, 0x00000000 },
135 }; 135 };
136 struct sock_fprog ptp_prog = { 136 struct sock_fprog_kern ptp_prog = {
137 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter, 137 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
138 }; 138 };
139 139
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 2d8d8fcfa060..1063996f8317 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -798,8 +798,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
798 size += num_vfs * 798 size += num_vfs *
799 (nla_total_size(sizeof(struct ifla_vf_mac)) + 799 (nla_total_size(sizeof(struct ifla_vf_mac)) +
800 nla_total_size(sizeof(struct ifla_vf_vlan)) + 800 nla_total_size(sizeof(struct ifla_vf_vlan)) +
801 nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 801 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
802 nla_total_size(sizeof(struct ifla_vf_spoofchk))); 802 nla_total_size(sizeof(struct ifla_vf_rate)));
803 return size; 803 return size;
804 } else 804 } else
805 return 0; 805 return 0;
@@ -1065,6 +1065,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1065 struct ifla_vf_info ivi; 1065 struct ifla_vf_info ivi;
1066 struct ifla_vf_mac vf_mac; 1066 struct ifla_vf_mac vf_mac;
1067 struct ifla_vf_vlan vf_vlan; 1067 struct ifla_vf_vlan vf_vlan;
1068 struct ifla_vf_rate vf_rate;
1068 struct ifla_vf_tx_rate vf_tx_rate; 1069 struct ifla_vf_tx_rate vf_tx_rate;
1069 struct ifla_vf_spoofchk vf_spoofchk; 1070 struct ifla_vf_spoofchk vf_spoofchk;
1070 struct ifla_vf_link_state vf_linkstate; 1071 struct ifla_vf_link_state vf_linkstate;
@@ -1085,6 +1086,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1085 break; 1086 break;
1086 vf_mac.vf = 1087 vf_mac.vf =
1087 vf_vlan.vf = 1088 vf_vlan.vf =
1089 vf_rate.vf =
1088 vf_tx_rate.vf = 1090 vf_tx_rate.vf =
1089 vf_spoofchk.vf = 1091 vf_spoofchk.vf =
1090 vf_linkstate.vf = ivi.vf; 1092 vf_linkstate.vf = ivi.vf;
@@ -1092,7 +1094,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1092 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1094 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1093 vf_vlan.vlan = ivi.vlan; 1095 vf_vlan.vlan = ivi.vlan;
1094 vf_vlan.qos = ivi.qos; 1096 vf_vlan.qos = ivi.qos;
1095 vf_tx_rate.rate = ivi.tx_rate; 1097 vf_tx_rate.rate = ivi.max_tx_rate;
1098 vf_rate.min_tx_rate = ivi.min_tx_rate;
1099 vf_rate.max_tx_rate = ivi.max_tx_rate;
1096 vf_spoofchk.setting = ivi.spoofchk; 1100 vf_spoofchk.setting = ivi.spoofchk;
1097 vf_linkstate.link_state = ivi.linkstate; 1101 vf_linkstate.link_state = ivi.linkstate;
1098 vf = nla_nest_start(skb, IFLA_VF_INFO); 1102 vf = nla_nest_start(skb, IFLA_VF_INFO);
@@ -1102,6 +1106,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1102 } 1106 }
1103 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || 1107 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1104 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || 1108 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1109 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1110 &vf_rate) ||
1105 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 1111 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1106 &vf_tx_rate) || 1112 &vf_tx_rate) ||
1107 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 1113 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
@@ -1208,6 +1214,10 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1208 .len = sizeof(struct ifla_vf_tx_rate) }, 1214 .len = sizeof(struct ifla_vf_tx_rate) },
1209 [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY, 1215 [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY,
1210 .len = sizeof(struct ifla_vf_spoofchk) }, 1216 .len = sizeof(struct ifla_vf_spoofchk) },
1217 [IFLA_VF_RATE] = { .type = NLA_BINARY,
1218 .len = sizeof(struct ifla_vf_rate) },
1219 [IFLA_VF_LINK_STATE] = { .type = NLA_BINARY,
1220 .len = sizeof(struct ifla_vf_link_state) },
1211}; 1221};
1212 1222
1213static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 1223static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
@@ -1234,6 +1244,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1234 struct nlattr *tb[IFLA_MAX+1]; 1244 struct nlattr *tb[IFLA_MAX+1];
1235 u32 ext_filter_mask = 0; 1245 u32 ext_filter_mask = 0;
1236 int err; 1246 int err;
1247 int hdrlen;
1237 1248
1238 s_h = cb->args[0]; 1249 s_h = cb->args[0];
1239 s_idx = cb->args[1]; 1250 s_idx = cb->args[1];
@@ -1241,8 +1252,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1241 rcu_read_lock(); 1252 rcu_read_lock();
1242 cb->seq = net->dev_base_seq; 1253 cb->seq = net->dev_base_seq;
1243 1254
1244 if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, 1255 /* A hack to preserve kernel<->userspace interface.
1245 ifla_policy) >= 0) { 1256 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
1257 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
1258 * what iproute2 < v3.9.0 used.
1259 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
1260 * attribute, its netlink message is shorter than struct ifinfomsg.
1261 */
1262 hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
1263 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1264
1265 if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
1246 1266
1247 if (tb[IFLA_EXT_MASK]) 1267 if (tb[IFLA_EXT_MASK])
1248 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 1268 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
@@ -1367,11 +1387,29 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
1367 } 1387 }
1368 case IFLA_VF_TX_RATE: { 1388 case IFLA_VF_TX_RATE: {
1369 struct ifla_vf_tx_rate *ivt; 1389 struct ifla_vf_tx_rate *ivt;
1390 struct ifla_vf_info ivf;
1370 ivt = nla_data(vf); 1391 ivt = nla_data(vf);
1371 err = -EOPNOTSUPP; 1392 err = -EOPNOTSUPP;
1372 if (ops->ndo_set_vf_tx_rate) 1393 if (ops->ndo_get_vf_config)
1373 err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, 1394 err = ops->ndo_get_vf_config(dev, ivt->vf,
1374 ivt->rate); 1395 &ivf);
1396 if (err)
1397 break;
1398 err = -EOPNOTSUPP;
1399 if (ops->ndo_set_vf_rate)
1400 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1401 ivf.min_tx_rate,
1402 ivt->rate);
1403 break;
1404 }
1405 case IFLA_VF_RATE: {
1406 struct ifla_vf_rate *ivt;
1407 ivt = nla_data(vf);
1408 err = -EOPNOTSUPP;
1409 if (ops->ndo_set_vf_rate)
1410 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1411 ivt->min_tx_rate,
1412 ivt->max_tx_rate);
1375 break; 1413 break;
1376 } 1414 }
1377 case IFLA_VF_SPOOFCHK: { 1415 case IFLA_VF_SPOOFCHK: {
@@ -1744,7 +1782,6 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
1744 1782
1745 ops->dellink(dev, &list_kill); 1783 ops->dellink(dev, &list_kill);
1746 unregister_netdevice_many(&list_kill); 1784 unregister_netdevice_many(&list_kill);
1747 list_del(&list_kill);
1748 return 0; 1785 return 0;
1749} 1786}
1750 1787
@@ -2019,11 +2056,15 @@ replay:
2019 if (ops->newlink) { 2056 if (ops->newlink) {
2020 err = ops->newlink(net, dev, tb, data); 2057 err = ops->newlink(net, dev, tb, data);
2021 /* Drivers should call free_netdev() in ->destructor 2058 /* Drivers should call free_netdev() in ->destructor
2022 * and unregister it on failure so that device could be 2059 * and unregister it on failure after registration
2023 * finally freed in rtnl_unlock. 2060 * so that device could be finally freed in rtnl_unlock.
2024 */ 2061 */
2025 if (err < 0) 2062 if (err < 0) {
2063 /* If device is not registered at all, free it now */
2064 if (dev->reg_state == NETREG_UNINITIALIZED)
2065 free_netdev(dev);
2026 goto out; 2066 goto out;
2067 }
2027 } else { 2068 } else {
2028 err = register_netdevice(dev); 2069 err = register_netdevice(dev);
2029 if (err < 0) { 2070 if (err < 0) {
@@ -2095,9 +2136,13 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
2095 struct nlattr *tb[IFLA_MAX+1]; 2136 struct nlattr *tb[IFLA_MAX+1];
2096 u32 ext_filter_mask = 0; 2137 u32 ext_filter_mask = 0;
2097 u16 min_ifinfo_dump_size = 0; 2138 u16 min_ifinfo_dump_size = 0;
2139 int hdrlen;
2140
2141 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
2142 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2143 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2098 2144
2099 if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, 2145 if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
2100 ifla_policy) >= 0) {
2101 if (tb[IFLA_EXT_MASK]) 2146 if (tb[IFLA_EXT_MASK])
2102 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 2147 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
2103 } 2148 }
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 897da56f3aff..ba71212f0251 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -85,31 +85,6 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
85#endif 85#endif
86 86
87#ifdef CONFIG_INET 87#ifdef CONFIG_INET
88__u32 secure_ip_id(__be32 daddr)
89{
90 u32 hash[MD5_DIGEST_WORDS];
91
92 net_secret_init();
93 hash[0] = (__force __u32) daddr;
94 hash[1] = net_secret[13];
95 hash[2] = net_secret[14];
96 hash[3] = net_secret[15];
97
98 md5_transform(hash, net_secret);
99
100 return hash[0];
101}
102
103__u32 secure_ipv6_id(const __be32 daddr[4])
104{
105 __u32 hash[4];
106
107 net_secret_init();
108 memcpy(hash, daddr, 16);
109 md5_transform(hash, net_secret);
110
111 return hash[0];
112}
113 88
114__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, 89__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
115 __be16 sport, __be16 dport) 90 __be16 sport, __be16 dport)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8383b2bddeb9..bf92824af3f7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -694,7 +694,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
694#endif 694#endif
695 memcpy(new->cb, old->cb, sizeof(old->cb)); 695 memcpy(new->cb, old->cb, sizeof(old->cb));
696 new->csum = old->csum; 696 new->csum = old->csum;
697 new->local_df = old->local_df; 697 new->ignore_df = old->ignore_df;
698 new->pkt_type = old->pkt_type; 698 new->pkt_type = old->pkt_type;
699 new->ip_summed = old->ip_summed; 699 new->ip_summed = old->ip_summed;
700 skb_copy_queue_mapping(new, old); 700 skb_copy_queue_mapping(new, old);
@@ -951,10 +951,13 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
951EXPORT_SYMBOL(skb_copy); 951EXPORT_SYMBOL(skb_copy);
952 952
953/** 953/**
954 * __pskb_copy - create copy of an sk_buff with private head. 954 * __pskb_copy_fclone - create copy of an sk_buff with private head.
955 * @skb: buffer to copy 955 * @skb: buffer to copy
956 * @headroom: headroom of new skb 956 * @headroom: headroom of new skb
957 * @gfp_mask: allocation priority 957 * @gfp_mask: allocation priority
958 * @fclone: if true allocate the copy of the skb from the fclone
959 * cache instead of the head cache; it is recommended to set this
960 * to true for the cases where the copy will likely be cloned
958 * 961 *
959 * Make a copy of both an &sk_buff and part of its data, located 962 * Make a copy of both an &sk_buff and part of its data, located
960 * in header. Fragmented data remain shared. This is used when 963 * in header. Fragmented data remain shared. This is used when
@@ -964,11 +967,12 @@ EXPORT_SYMBOL(skb_copy);
964 * The returned buffer has a reference count of 1. 967 * The returned buffer has a reference count of 1.
965 */ 968 */
966 969
967struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) 970struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
971 gfp_t gfp_mask, bool fclone)
968{ 972{
969 unsigned int size = skb_headlen(skb) + headroom; 973 unsigned int size = skb_headlen(skb) + headroom;
970 struct sk_buff *n = __alloc_skb(size, gfp_mask, 974 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
971 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 975 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
972 976
973 if (!n) 977 if (!n)
974 goto out; 978 goto out;
@@ -1008,7 +1012,7 @@ struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
1008out: 1012out:
1009 return n; 1013 return n;
1010} 1014}
1011EXPORT_SYMBOL(__pskb_copy); 1015EXPORT_SYMBOL(__pskb_copy_fclone);
1012 1016
1013/** 1017/**
1014 * pskb_expand_head - reallocate header of &sk_buff 1018 * pskb_expand_head - reallocate header of &sk_buff
@@ -2881,12 +2885,14 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
2881 int pos; 2885 int pos;
2882 int dummy; 2886 int dummy;
2883 2887
2888 __skb_push(head_skb, doffset);
2884 proto = skb_network_protocol(head_skb, &dummy); 2889 proto = skb_network_protocol(head_skb, &dummy);
2885 if (unlikely(!proto)) 2890 if (unlikely(!proto))
2886 return ERR_PTR(-EINVAL); 2891 return ERR_PTR(-EINVAL);
2887 2892
2888 csum = !!can_checksum_protocol(features, proto); 2893 csum = !head_skb->encap_hdr_csum &&
2889 __skb_push(head_skb, doffset); 2894 !!can_checksum_protocol(features, proto);
2895
2890 headroom = skb_headroom(head_skb); 2896 headroom = skb_headroom(head_skb);
2891 pos = skb_headlen(head_skb); 2897 pos = skb_headlen(head_skb);
2892 2898
@@ -2983,6 +2989,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
2983 nskb->csum = skb_copy_and_csum_bits(head_skb, offset, 2989 nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
2984 skb_put(nskb, len), 2990 skb_put(nskb, len),
2985 len, 0); 2991 len, 0);
2992 SKB_GSO_CB(nskb)->csum_start =
2993 skb_headroom(nskb) + offset;
2986 continue; 2994 continue;
2987 } 2995 }
2988 2996
@@ -3052,6 +3060,8 @@ perform_csum_check:
3052 nskb->csum = skb_checksum(nskb, doffset, 3060 nskb->csum = skb_checksum(nskb, doffset,
3053 nskb->len - doffset, 0); 3061 nskb->len - doffset, 0);
3054 nskb->ip_summed = CHECKSUM_NONE; 3062 nskb->ip_summed = CHECKSUM_NONE;
3063 SKB_GSO_CB(nskb)->csum_start =
3064 skb_headroom(nskb) + doffset;
3055 } 3065 }
3056 } while ((offset += len) < head_skb->len); 3066 } while ((offset += len) < head_skb->len);
3057 3067
@@ -3913,7 +3923,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
3913 skb->tstamp.tv64 = 0; 3923 skb->tstamp.tv64 = 0;
3914 skb->pkt_type = PACKET_HOST; 3924 skb->pkt_type = PACKET_HOST;
3915 skb->skb_iif = 0; 3925 skb->skb_iif = 0;
3916 skb->local_df = 0; 3926 skb->ignore_df = 0;
3917 skb_dst_drop(skb); 3927 skb_dst_drop(skb);
3918 skb->mark = 0; 3928 skb->mark = 0;
3919 secpath_reset(skb); 3929 secpath_reset(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index 664ee4295b6f..026e01f70274 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -784,7 +784,7 @@ set_rcvbuf:
784 break; 784 break;
785 785
786 case SO_NO_CHECK: 786 case SO_NO_CHECK:
787 sk->sk_no_check = valbool; 787 sk->sk_no_check_tx = valbool;
788 break; 788 break;
789 789
790 case SO_PRIORITY: 790 case SO_PRIORITY:
@@ -1064,7 +1064,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1064 break; 1064 break;
1065 1065
1066 case SO_NO_CHECK: 1066 case SO_NO_CHECK:
1067 v.val = sk->sk_no_check; 1067 v.val = sk->sk_no_check_tx;
1068 break; 1068 break;
1069 1069
1070 case SO_PRIORITY: 1070 case SO_PRIORITY:
diff --git a/net/core/tso.c b/net/core/tso.c
new file mode 100644
index 000000000000..8c3203c585b0
--- /dev/null
+++ b/net/core/tso.c
@@ -0,0 +1,77 @@
1#include <linux/export.h>
2#include <net/ip.h>
3#include <net/tso.h>
4
5/* Calculate expected number of TX descriptors */
6int tso_count_descs(struct sk_buff *skb)
7{
8 /* The Marvell Way */
9 return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
10}
11EXPORT_SYMBOL(tso_count_descs);
12
13void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
14 int size, bool is_last)
15{
16 struct iphdr *iph;
17 struct tcphdr *tcph;
18 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
19 int mac_hdr_len = skb_network_offset(skb);
20
21 memcpy(hdr, skb->data, hdr_len);
22 iph = (struct iphdr *)(hdr + mac_hdr_len);
23 iph->id = htons(tso->ip_id);
24 iph->tot_len = htons(size + hdr_len - mac_hdr_len);
25 tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
26 tcph->seq = htonl(tso->tcp_seq);
27 tso->ip_id++;
28
29 if (!is_last) {
30 /* Clear all special flags for not last packet */
31 tcph->psh = 0;
32 tcph->fin = 0;
33 tcph->rst = 0;
34 }
35}
36EXPORT_SYMBOL(tso_build_hdr);
37
38void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size)
39{
40 tso->tcp_seq += size;
41 tso->size -= size;
42 tso->data += size;
43
44 if ((tso->size == 0) &&
45 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
46 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
47
48 /* Move to next segment */
49 tso->size = frag->size;
50 tso->data = page_address(frag->page.p) + frag->page_offset;
51 tso->next_frag_idx++;
52 }
53}
54EXPORT_SYMBOL(tso_build_data);
55
56void tso_start(struct sk_buff *skb, struct tso_t *tso)
57{
58 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
59
60 tso->ip_id = ntohs(ip_hdr(skb)->id);
61 tso->tcp_seq = ntohl(tcp_hdr(skb)->seq);
62 tso->next_frag_idx = 0;
63
64 /* Build first data */
65 tso->size = skb_headlen(skb) - hdr_len;
66 tso->data = skb->data + hdr_len;
67 if ((tso->size == 0) &&
68 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
69 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
70
71 /* Move to next segment */
72 tso->size = frag->size;
73 tso->data = page_address(frag->page.p) + frag->page_offset;
74 tso->next_frag_idx++;
75 }
76}
77EXPORT_SYMBOL(tso_start);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 22b5d818b200..6ca645c4b48e 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -1024,7 +1024,6 @@ static struct inet_protosw dccp_v4_protosw = {
1024 .protocol = IPPROTO_DCCP, 1024 .protocol = IPPROTO_DCCP,
1025 .prot = &dccp_v4_prot, 1025 .prot = &dccp_v4_prot,
1026 .ops = &inet_dccp_ops, 1026 .ops = &inet_dccp_ops,
1027 .no_check = 0,
1028 .flags = INET_PROTOSW_ICSK, 1027 .flags = INET_PROTOSW_ICSK,
1029}; 1028};
1030 1029
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index eb892b4f4814..de2c1e719305 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1084,14 +1084,15 @@ EXPORT_SYMBOL_GPL(dccp_shutdown);
1084 1084
1085static inline int dccp_mib_init(void) 1085static inline int dccp_mib_init(void)
1086{ 1086{
1087 return snmp_mib_init((void __percpu **)dccp_statistics, 1087 dccp_statistics = alloc_percpu(struct dccp_mib);
1088 sizeof(struct dccp_mib), 1088 if (!dccp_statistics)
1089 __alignof__(struct dccp_mib)); 1089 return -ENOMEM;
1090 return 0;
1090} 1091}
1091 1092
1092static inline void dccp_mib_exit(void) 1093static inline void dccp_mib_exit(void)
1093{ 1094{
1094 snmp_mib_free((void __percpu **)dccp_statistics); 1095 free_percpu(dccp_statistics);
1095} 1096}
1096 1097
1097static int thash_entries; 1098static int thash_entries;
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 607ab71b5a0c..53731e45403c 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -20,6 +20,7 @@
20 20
21/* Boundary values */ 21/* Boundary values */
22static int zero = 0, 22static int zero = 0,
23 one = 1,
23 u8_max = 0xFF; 24 u8_max = 0xFF;
24static unsigned long seqw_min = DCCPF_SEQ_WMIN, 25static unsigned long seqw_min = DCCPF_SEQ_WMIN,
25 seqw_max = 0xFFFFFFFF; /* maximum on 32 bit */ 26 seqw_max = 0xFFFFFFFF; /* maximum on 32 bit */
@@ -58,7 +59,7 @@ static struct ctl_table dccp_default_table[] = {
58 .maxlen = sizeof(sysctl_dccp_request_retries), 59 .maxlen = sizeof(sysctl_dccp_request_retries),
59 .mode = 0644, 60 .mode = 0644,
60 .proc_handler = proc_dointvec_minmax, 61 .proc_handler = proc_dointvec_minmax,
61 .extra1 = &zero, 62 .extra1 = &one,
62 .extra2 = &u8_max, 63 .extra2 = &u8_max,
63 }, 64 },
64 { 65 {
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 16f0b223102e..1cd46a345cb0 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -280,7 +280,7 @@ static ktime_t dccp_timestamp_seed;
280 */ 280 */
281u32 dccp_timestamp(void) 281u32 dccp_timestamp(void)
282{ 282{
283 s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed); 283 u64 delta = (u64)ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
284 284
285 do_div(delta, 10); 285 do_div(delta, 10);
286 return delta; 286 return delta;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 4c04848953bd..ae011b46c071 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -481,7 +481,7 @@ static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gf
481 481
482 sk->sk_backlog_rcv = dn_nsp_backlog_rcv; 482 sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
483 sk->sk_destruct = dn_destruct; 483 sk->sk_destruct = dn_destruct;
484 sk->sk_no_check = 1; 484 sk->sk_no_check_tx = 1;
485 sk->sk_family = PF_DECnet; 485 sk->sk_family = PF_DECnet;
486 sk->sk_protocol = 0; 486 sk->sk_protocol = 0;
487 sk->sk_allocation = gfp; 487 sk->sk_allocation = gfp;
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index e7b6d53eef88..9acec61f5433 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -93,8 +93,8 @@ int dns_query(const char *type, const char *name, size_t namelen,
93 } 93 }
94 94
95 if (!namelen) 95 if (!namelen)
96 namelen = strlen(name); 96 namelen = strnlen(name, 256);
97 if (namelen < 3) 97 if (namelen < 3 || namelen > 255)
98 return -EINVAL; 98 return -EINVAL;
99 desclen += namelen + 1; 99 desclen += namelen + 1;
100 100
@@ -149,7 +149,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
149 if (!*_result) 149 if (!*_result)
150 goto put; 150 goto put;
151 151
152 memcpy(*_result, upayload->data, len + 1); 152 memcpy(*_result, upayload->data, len);
153 *_result[len] = '\0';
154
153 if (_expiry) 155 if (_expiry)
154 *_expiry = rkey->expiry; 156 *_expiry = rkey->expiry;
155 157
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 02c0e1716f64..64c5af0a10dd 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -346,7 +346,7 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
346 return slave_dev; 346 return slave_dev;
347 347
348 slave_dev->features = master->vlan_features; 348 slave_dev->features = master->vlan_features;
349 SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops); 349 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
350 eth_hw_addr_inherit(slave_dev, master); 350 eth_hw_addr_inherit(slave_dev, master);
351 slave_dev->tx_queue_len = 0; 351 slave_dev->tx_queue_len = 0;
352 352
diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
index 0f5a69ed746d..fe6bd7a71081 100644
--- a/net/ieee802154/6lowpan_rtnl.c
+++ b/net/ieee802154/6lowpan_rtnl.c
@@ -92,6 +92,7 @@ static int lowpan_header_create(struct sk_buff *skb,
92 const u8 *saddr = _saddr; 92 const u8 *saddr = _saddr;
93 const u8 *daddr = _daddr; 93 const u8 *daddr = _daddr;
94 struct ieee802154_addr sa, da; 94 struct ieee802154_addr sa, da;
95 struct ieee802154_mac_cb *cb = mac_cb_init(skb);
95 96
96 /* TODO: 97 /* TODO:
97 * if this package isn't ipv6 one, where should it be routed? 98 * if this package isn't ipv6 one, where should it be routed?
@@ -115,8 +116,7 @@ static int lowpan_header_create(struct sk_buff *skb,
115 * from MAC subif of the 'dev' and 'real_dev' network devices, but 116 * from MAC subif of the 'dev' and 'real_dev' network devices, but
116 * this isn't implemented in mainline yet, so currently we assign 0xff 117 * this isn't implemented in mainline yet, so currently we assign 0xff
117 */ 118 */
118 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; 119 cb->type = IEEE802154_FC_TYPE_DATA;
119 mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
120 120
121 /* prepare wpan address data */ 121 /* prepare wpan address data */
122 sa.mode = IEEE802154_ADDR_LONG; 122 sa.mode = IEEE802154_ADDR_LONG;
@@ -135,11 +135,10 @@ static int lowpan_header_create(struct sk_buff *skb,
135 } else { 135 } else {
136 da.mode = IEEE802154_ADDR_LONG; 136 da.mode = IEEE802154_ADDR_LONG;
137 da.extended_addr = ieee802154_devaddr_from_raw(daddr); 137 da.extended_addr = ieee802154_devaddr_from_raw(daddr);
138
139 /* request acknowledgment */
140 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
141 } 138 }
142 139
140 cb->ackreq = !lowpan_is_addr_broadcast(daddr);
141
143 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, 142 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
144 type, (void *)&da, (void *)&sa, 0); 143 type, (void *)&da, (void *)&sa, 0);
145} 144}
@@ -221,139 +220,149 @@ static int lowpan_set_address(struct net_device *dev, void *p)
221 return 0; 220 return 0;
222} 221}
223 222
224static int 223static struct sk_buff*
225lowpan_fragment_xmit(struct sk_buff *skb, u8 *head, 224lowpan_alloc_frag(struct sk_buff *skb, int size,
226 int mlen, int plen, int offset, int type) 225 const struct ieee802154_hdr *master_hdr)
227{ 226{
227 struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev;
228 struct sk_buff *frag; 228 struct sk_buff *frag;
229 int hlen; 229 int rc;
230 230
231 hlen = (type == LOWPAN_DISPATCH_FRAG1) ? 231 frag = alloc_skb(real_dev->hard_header_len +
232 LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE; 232 real_dev->needed_tailroom + size,
233 233 GFP_ATOMIC);
234 raw_dump_inline(__func__, "6lowpan fragment header", head, hlen); 234
235 if (likely(frag)) {
236 frag->dev = real_dev;
237 frag->priority = skb->priority;
238 skb_reserve(frag, real_dev->hard_header_len);
239 skb_reset_network_header(frag);
240 *mac_cb(frag) = *mac_cb(skb);
241
242 rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest,
243 &master_hdr->source, size);
244 if (rc < 0) {
245 kfree_skb(frag);
246 return ERR_PTR(-rc);
247 }
248 } else {
249 frag = ERR_PTR(ENOMEM);
250 }
235 251
236 frag = netdev_alloc_skb(skb->dev, 252 return frag;
237 hlen + mlen + plen + IEEE802154_MFR_SIZE); 253}
238 if (!frag)
239 return -ENOMEM;
240 254
241 frag->priority = skb->priority; 255static int
256lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
257 u8 *frag_hdr, int frag_hdrlen,
258 int offset, int len)
259{
260 struct sk_buff *frag;
242 261
243 /* copy header, MFR and payload */ 262 raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
244 skb_put(frag, mlen);
245 skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
246 263
247 skb_put(frag, hlen); 264 frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
248 skb_copy_to_linear_data_offset(frag, mlen, head, hlen); 265 if (IS_ERR(frag))
266 return -PTR_ERR(frag);
249 267
250 skb_put(frag, plen); 268 memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
251 skb_copy_to_linear_data_offset(frag, mlen + hlen, 269 memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
252 skb_network_header(skb) + offset, plen);
253 270
254 raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len); 271 raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
255 272
256 return dev_queue_xmit(frag); 273 return dev_queue_xmit(frag);
257} 274}
258 275
259static int 276static int
260lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev) 277lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
278 const struct ieee802154_hdr *wpan_hdr)
261{ 279{
262 int err; 280 u16 dgram_size, dgram_offset;
263 u16 dgram_offset, dgram_size, payload_length, header_length, 281 __be16 frag_tag;
264 lowpan_size, frag_plen, offset; 282 u8 frag_hdr[5];
265 __be16 tag; 283 int frag_cap, frag_len, payload_cap, rc;
266 u8 head[5]; 284 int skb_unprocessed, skb_offset;
267 285
268 header_length = skb->mac_len;
269 payload_length = skb->len - header_length;
270 tag = lowpan_dev_info(dev)->fragment_tag++;
271 lowpan_size = skb_network_header_len(skb);
272 dgram_size = lowpan_uncompress_size(skb, &dgram_offset) - 286 dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
273 header_length; 287 skb->mac_len;
288 frag_tag = lowpan_dev_info(dev)->fragment_tag++;
274 289
275 /* first fragment header */ 290 frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
276 head[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x7); 291 frag_hdr[1] = dgram_size & 0xff;
277 head[1] = dgram_size & 0xff; 292 memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
278 memcpy(head + 2, &tag, sizeof(tag));
279 293
280 /* calc the nearest payload length(divided to 8) for first fragment 294 payload_cap = ieee802154_max_payload(wpan_hdr);
281 * which fits into a IEEE802154_MTU
282 */
283 frag_plen = round_down(IEEE802154_MTU - header_length -
284 LOWPAN_FRAG1_HEAD_SIZE - lowpan_size -
285 IEEE802154_MFR_SIZE, 8);
286
287 err = lowpan_fragment_xmit(skb, head, header_length,
288 frag_plen + lowpan_size, 0,
289 LOWPAN_DISPATCH_FRAG1);
290 if (err) {
291 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
292 __func__, tag);
293 goto exit;
294 }
295 295
296 offset = lowpan_size + frag_plen; 296 frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
297 dgram_offset += frag_plen; 297 skb_network_header_len(skb), 8);
298 298
299 /* next fragment header */ 299 skb_offset = skb_network_header_len(skb);
300 head[0] &= ~LOWPAN_DISPATCH_FRAG1; 300 skb_unprocessed = skb->len - skb->mac_len - skb_offset;
301 head[0] |= LOWPAN_DISPATCH_FRAGN;
302 301
303 frag_plen = round_down(IEEE802154_MTU - header_length - 302 rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
304 LOWPAN_FRAGN_HEAD_SIZE - IEEE802154_MFR_SIZE, 8); 303 LOWPAN_FRAG1_HEAD_SIZE, 0,
304 frag_len + skb_network_header_len(skb));
305 if (rc) {
306 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
307 __func__, frag_tag);
308 goto err;
309 }
305 310
306 while (payload_length - offset > 0) { 311 frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
307 int len = frag_plen; 312 frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
313 frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
308 314
309 head[4] = dgram_offset >> 3; 315 do {
316 dgram_offset += frag_len;
317 skb_offset += frag_len;
318 skb_unprocessed -= frag_len;
319 frag_len = min(frag_cap, skb_unprocessed);
310 320
311 if (payload_length - offset < len) 321 frag_hdr[4] = dgram_offset >> 3;
312 len = payload_length - offset;
313 322
314 err = lowpan_fragment_xmit(skb, head, header_length, len, 323 rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
315 offset, LOWPAN_DISPATCH_FRAGN); 324 LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
316 if (err) { 325 frag_len);
326 if (rc) {
317 pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n", 327 pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
318 __func__, tag, offset); 328 __func__, frag_tag, skb_offset);
319 goto exit; 329 goto err;
320 } 330 }
331 } while (skb_unprocessed > frag_cap);
321 332
322 offset += len; 333 consume_skb(skb);
323 dgram_offset += len; 334 return NET_XMIT_SUCCESS;
324 }
325 335
326exit: 336err:
327 return err; 337 kfree_skb(skb);
338 return rc;
328} 339}
329 340
330static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) 341static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
331{ 342{
332 int err = -1; 343 struct ieee802154_hdr wpan_hdr;
344 int max_single;
333 345
334 pr_debug("package xmit\n"); 346 pr_debug("package xmit\n");
335 347
336 skb->dev = lowpan_dev_info(dev)->real_dev; 348 if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
337 if (skb->dev == NULL) { 349 kfree_skb(skb);
338 pr_debug("ERROR: no real wpan device found\n"); 350 return NET_XMIT_DROP;
339 goto error;
340 } 351 }
341 352
342 /* Send directly if less than the MTU minus the 2 checksum bytes. */ 353 max_single = ieee802154_max_payload(&wpan_hdr);
343 if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
344 err = dev_queue_xmit(skb);
345 goto out;
346 }
347 354
348 pr_debug("frame is too big, fragmentation is needed\n"); 355 if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
349 err = lowpan_skb_fragmentation(skb, dev); 356 skb->dev = lowpan_dev_info(dev)->real_dev;
350error: 357 return dev_queue_xmit(skb);
351 dev_kfree_skb(skb); 358 } else {
352out: 359 netdev_tx_t rc;
353 if (err) 360
354 pr_debug("ERROR: xmit failed\n"); 361 pr_debug("frame is too big, fragmentation is needed\n");
362 rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr);
355 363
356 return (err < 0) ? NET_XMIT_DROP : err; 364 return rc < 0 ? NET_XMIT_DROP : rc;
365 }
357} 366}
358 367
359static struct wpan_phy *lowpan_get_phy(const struct net_device *dev) 368static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 786437bc0c08..4f0ed8780194 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -21,6 +21,7 @@
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> 21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 */ 22 */
23 23
24#include <linux/capability.h>
24#include <linux/net.h> 25#include <linux/net.h>
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/if_arp.h> 27#include <linux/if_arp.h>
@@ -45,7 +46,12 @@ struct dgram_sock {
45 struct ieee802154_addr dst_addr; 46 struct ieee802154_addr dst_addr;
46 47
47 unsigned int bound:1; 48 unsigned int bound:1;
49 unsigned int connected:1;
48 unsigned int want_ack:1; 50 unsigned int want_ack:1;
51 unsigned int secen:1;
52 unsigned int secen_override:1;
53 unsigned int seclevel:3;
54 unsigned int seclevel_override:1;
49}; 55};
50 56
51static inline struct dgram_sock *dgram_sk(const struct sock *sk) 57static inline struct dgram_sock *dgram_sk(const struct sock *sk)
@@ -73,10 +79,7 @@ static int dgram_init(struct sock *sk)
73{ 79{
74 struct dgram_sock *ro = dgram_sk(sk); 80 struct dgram_sock *ro = dgram_sk(sk);
75 81
76 ro->dst_addr.mode = IEEE802154_ADDR_LONG;
77 ro->dst_addr.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
78 ro->want_ack = 1; 82 ro->want_ack = 1;
79 memset(&ro->dst_addr.extended_addr, 0xff, IEEE802154_ADDR_LEN);
80 return 0; 83 return 0;
81} 84}
82 85
@@ -183,6 +186,7 @@ static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
183 } 186 }
184 187
185 ieee802154_addr_from_sa(&ro->dst_addr, &addr->addr); 188 ieee802154_addr_from_sa(&ro->dst_addr, &addr->addr);
189 ro->connected = 1;
186 190
187out: 191out:
188 release_sock(sk); 192 release_sock(sk);
@@ -194,10 +198,7 @@ static int dgram_disconnect(struct sock *sk, int flags)
194 struct dgram_sock *ro = dgram_sk(sk); 198 struct dgram_sock *ro = dgram_sk(sk);
195 199
196 lock_sock(sk); 200 lock_sock(sk);
197 201 ro->connected = 0;
198 ro->dst_addr.mode = IEEE802154_ADDR_LONG;
199 memset(&ro->dst_addr.extended_addr, 0xff, IEEE802154_ADDR_LEN);
200
201 release_sock(sk); 202 release_sock(sk);
202 203
203 return 0; 204 return 0;
@@ -209,7 +210,9 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
209 struct net_device *dev; 210 struct net_device *dev;
210 unsigned int mtu; 211 unsigned int mtu;
211 struct sk_buff *skb; 212 struct sk_buff *skb;
213 struct ieee802154_mac_cb *cb;
212 struct dgram_sock *ro = dgram_sk(sk); 214 struct dgram_sock *ro = dgram_sk(sk);
215 struct ieee802154_addr dst_addr;
213 int hlen, tlen; 216 int hlen, tlen;
214 int err; 217 int err;
215 218
@@ -218,6 +221,11 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
218 return -EOPNOTSUPP; 221 return -EOPNOTSUPP;
219 } 222 }
220 223
224 if (!ro->connected && !msg->msg_name)
225 return -EDESTADDRREQ;
226 else if (ro->connected && msg->msg_name)
227 return -EISCONN;
228
221 if (!ro->bound) 229 if (!ro->bound)
222 dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154); 230 dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
223 else 231 else
@@ -249,18 +257,28 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
249 257
250 skb_reset_network_header(skb); 258 skb_reset_network_header(skb);
251 259
252 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; 260 cb = mac_cb_init(skb);
253 if (ro->want_ack) 261 cb->type = IEEE802154_FC_TYPE_DATA;
254 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ; 262 cb->ackreq = ro->want_ack;
263
264 if (msg->msg_name) {
265 DECLARE_SOCKADDR(struct sockaddr_ieee802154*, daddr, msg->msg_name);
255 266
256 mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev); 267 ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
257 err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &ro->dst_addr, 268 } else {
258 ro->bound ? &ro->src_addr : NULL, size); 269 dst_addr = ro->dst_addr;
270 }
271
272 cb->secen = ro->secen;
273 cb->secen_override = ro->secen_override;
274 cb->seclevel = ro->seclevel;
275 cb->seclevel_override = ro->seclevel_override;
276
277 err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr,
278 ro->bound ? &ro->src_addr : NULL, size);
259 if (err < 0) 279 if (err < 0)
260 goto out_skb; 280 goto out_skb;
261 281
262 skb_reset_mac_header(skb);
263
264 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); 282 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
265 if (err < 0) 283 if (err < 0)
266 goto out_skb; 284 goto out_skb;
@@ -419,6 +437,20 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname,
419 case WPAN_WANTACK: 437 case WPAN_WANTACK:
420 val = ro->want_ack; 438 val = ro->want_ack;
421 break; 439 break;
440 case WPAN_SECURITY:
441 if (!ro->secen_override)
442 val = WPAN_SECURITY_DEFAULT;
443 else if (ro->secen)
444 val = WPAN_SECURITY_ON;
445 else
446 val = WPAN_SECURITY_OFF;
447 break;
448 case WPAN_SECURITY_LEVEL:
449 if (!ro->seclevel_override)
450 val = WPAN_SECURITY_LEVEL_DEFAULT;
451 else
452 val = ro->seclevel;
453 break;
422 default: 454 default:
423 return -ENOPROTOOPT; 455 return -ENOPROTOOPT;
424 } 456 }
@@ -434,6 +466,7 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname,
434 char __user *optval, unsigned int optlen) 466 char __user *optval, unsigned int optlen)
435{ 467{
436 struct dgram_sock *ro = dgram_sk(sk); 468 struct dgram_sock *ro = dgram_sk(sk);
469 struct net *net = sock_net(sk);
437 int val; 470 int val;
438 int err = 0; 471 int err = 0;
439 472
@@ -449,6 +482,47 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname,
449 case WPAN_WANTACK: 482 case WPAN_WANTACK:
450 ro->want_ack = !!val; 483 ro->want_ack = !!val;
451 break; 484 break;
485 case WPAN_SECURITY:
486 if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
487 !ns_capable(net->user_ns, CAP_NET_RAW)) {
488 err = -EPERM;
489 break;
490 }
491
492 switch (val) {
493 case WPAN_SECURITY_DEFAULT:
494 ro->secen_override = 0;
495 break;
496 case WPAN_SECURITY_ON:
497 ro->secen_override = 1;
498 ro->secen = 1;
499 break;
500 case WPAN_SECURITY_OFF:
501 ro->secen_override = 1;
502 ro->secen = 0;
503 break;
504 default:
505 err = -EINVAL;
506 break;
507 }
508 break;
509 case WPAN_SECURITY_LEVEL:
510 if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
511 !ns_capable(net->user_ns, CAP_NET_RAW)) {
512 err = -EPERM;
513 break;
514 }
515
516 if (val < WPAN_SECURITY_LEVEL_DEFAULT ||
517 val > IEEE802154_SCF_SECLEVEL_ENC_MIC128) {
518 err = -EINVAL;
519 } else if (val == WPAN_SECURITY_LEVEL_DEFAULT) {
520 ro->seclevel_override = 0;
521 } else {
522 ro->seclevel_override = 1;
523 ro->seclevel = val;
524 }
525 break;
452 default: 526 default:
453 err = -ENOPROTOOPT; 527 err = -ENOPROTOOPT;
454 break; 528 break;
diff --git a/net/ieee802154/header_ops.c b/net/ieee802154/header_ops.c
index bed42a48408c..c09294e39ca6 100644
--- a/net/ieee802154/header_ops.c
+++ b/net/ieee802154/header_ops.c
@@ -195,15 +195,16 @@ ieee802154_hdr_get_sechdr(const u8 *buf, struct ieee802154_sechdr *hdr)
195 return pos; 195 return pos;
196} 196}
197 197
198static int ieee802154_sechdr_lengths[4] = {
199 [IEEE802154_SCF_KEY_IMPLICIT] = 5,
200 [IEEE802154_SCF_KEY_INDEX] = 6,
201 [IEEE802154_SCF_KEY_SHORT_INDEX] = 10,
202 [IEEE802154_SCF_KEY_HW_INDEX] = 14,
203};
204
198static int ieee802154_hdr_sechdr_len(u8 sc) 205static int ieee802154_hdr_sechdr_len(u8 sc)
199{ 206{
200 switch (IEEE802154_SCF_KEY_ID_MODE(sc)) { 207 return ieee802154_sechdr_lengths[IEEE802154_SCF_KEY_ID_MODE(sc)];
201 case IEEE802154_SCF_KEY_IMPLICIT: return 5;
202 case IEEE802154_SCF_KEY_INDEX: return 6;
203 case IEEE802154_SCF_KEY_SHORT_INDEX: return 10;
204 case IEEE802154_SCF_KEY_HW_INDEX: return 14;
205 default: return -EINVAL;
206 }
207} 208}
208 209
209static int ieee802154_hdr_minlen(const struct ieee802154_hdr *hdr) 210static int ieee802154_hdr_minlen(const struct ieee802154_hdr *hdr)
@@ -285,3 +286,40 @@ ieee802154_hdr_peek_addrs(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
285 return pos; 286 return pos;
286} 287}
287EXPORT_SYMBOL_GPL(ieee802154_hdr_peek_addrs); 288EXPORT_SYMBOL_GPL(ieee802154_hdr_peek_addrs);
289
290int
291ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
292{
293 const u8 *buf = skb_mac_header(skb);
294 int pos;
295
296 pos = ieee802154_hdr_peek_addrs(skb, hdr);
297 if (pos < 0)
298 return -EINVAL;
299
300 if (hdr->fc.security_enabled) {
301 u8 key_id_mode = IEEE802154_SCF_KEY_ID_MODE(*(buf + pos));
302 int want = pos + ieee802154_sechdr_lengths[key_id_mode];
303
304 if (buf + want > skb_tail_pointer(skb))
305 return -EINVAL;
306
307 pos += ieee802154_hdr_get_sechdr(buf + pos, &hdr->sec);
308 }
309
310 return pos;
311}
312EXPORT_SYMBOL_GPL(ieee802154_hdr_peek);
313
314int ieee802154_max_payload(const struct ieee802154_hdr *hdr)
315{
316 int hlen = ieee802154_hdr_minlen(hdr);
317
318 if (hdr->fc.security_enabled) {
319 hlen += ieee802154_sechdr_lengths[hdr->sec.key_id_mode] - 1;
320 hlen += ieee802154_sechdr_authtag_len(&hdr->sec);
321 }
322
323 return IEEE802154_MTU - hlen - IEEE802154_MFR_SIZE;
324}
325EXPORT_SYMBOL_GPL(ieee802154_max_payload);
diff --git a/net/ieee802154/ieee802154.h b/net/ieee802154/ieee802154.h
index 6693a5cf01ce..8b83a231299e 100644
--- a/net/ieee802154/ieee802154.h
+++ b/net/ieee802154/ieee802154.h
@@ -68,4 +68,23 @@ int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info);
68int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb); 68int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb);
69int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info); 69int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info);
70 70
71int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info);
72int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info);
73int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info);
74int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info);
75int ieee802154_llsec_dump_keys(struct sk_buff *skb,
76 struct netlink_callback *cb);
77int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info);
78int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info);
79int ieee802154_llsec_dump_devs(struct sk_buff *skb,
80 struct netlink_callback *cb);
81int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info);
82int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info);
83int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
84 struct netlink_callback *cb);
85int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info);
86int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info);
87int ieee802154_llsec_dump_seclevels(struct sk_buff *skb,
88 struct netlink_callback *cb);
89
71#endif 90#endif
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index 04b20589d97a..26efcf4fd2ff 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -124,6 +124,26 @@ static const struct genl_ops ieee8021154_ops[] = {
124 IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface, 124 IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface,
125 ieee802154_dump_iface), 125 ieee802154_dump_iface),
126 IEEE802154_OP(IEEE802154_SET_MACPARAMS, ieee802154_set_macparams), 126 IEEE802154_OP(IEEE802154_SET_MACPARAMS, ieee802154_set_macparams),
127 IEEE802154_OP(IEEE802154_LLSEC_GETPARAMS, ieee802154_llsec_getparams),
128 IEEE802154_OP(IEEE802154_LLSEC_SETPARAMS, ieee802154_llsec_setparams),
129 IEEE802154_DUMP(IEEE802154_LLSEC_LIST_KEY, NULL,
130 ieee802154_llsec_dump_keys),
131 IEEE802154_OP(IEEE802154_LLSEC_ADD_KEY, ieee802154_llsec_add_key),
132 IEEE802154_OP(IEEE802154_LLSEC_DEL_KEY, ieee802154_llsec_del_key),
133 IEEE802154_DUMP(IEEE802154_LLSEC_LIST_DEV, NULL,
134 ieee802154_llsec_dump_devs),
135 IEEE802154_OP(IEEE802154_LLSEC_ADD_DEV, ieee802154_llsec_add_dev),
136 IEEE802154_OP(IEEE802154_LLSEC_DEL_DEV, ieee802154_llsec_del_dev),
137 IEEE802154_DUMP(IEEE802154_LLSEC_LIST_DEVKEY, NULL,
138 ieee802154_llsec_dump_devkeys),
139 IEEE802154_OP(IEEE802154_LLSEC_ADD_DEVKEY, ieee802154_llsec_add_devkey),
140 IEEE802154_OP(IEEE802154_LLSEC_DEL_DEVKEY, ieee802154_llsec_del_devkey),
141 IEEE802154_DUMP(IEEE802154_LLSEC_LIST_SECLEVEL, NULL,
142 ieee802154_llsec_dump_seclevels),
143 IEEE802154_OP(IEEE802154_LLSEC_ADD_SECLEVEL,
144 ieee802154_llsec_add_seclevel),
145 IEEE802154_OP(IEEE802154_LLSEC_DEL_SECLEVEL,
146 ieee802154_llsec_del_seclevel),
127}; 147};
128 148
129static const struct genl_multicast_group ieee802154_mcgrps[] = { 149static const struct genl_multicast_group ieee802154_mcgrps[] = {
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 5d285498c0f6..a3281b8bfd5b 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -715,3 +715,812 @@ out:
715 dev_put(dev); 715 dev_put(dev);
716 return rc; 716 return rc;
717} 717}
718
719
720
721static int
722ieee802154_llsec_parse_key_id(struct genl_info *info,
723 struct ieee802154_llsec_key_id *desc)
724{
725 memset(desc, 0, sizeof(*desc));
726
727 if (!info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE])
728 return -EINVAL;
729
730 desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]);
731
732 if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
733 if (!info->attrs[IEEE802154_ATTR_PAN_ID] &&
734 !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] ||
735 info->attrs[IEEE802154_ATTR_HW_ADDR]))
736 return -EINVAL;
737
738 desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
739
740 if (info->attrs[IEEE802154_ATTR_SHORT_ADDR]) {
741 desc->device_addr.mode = IEEE802154_ADDR_SHORT;
742 desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
743 } else {
744 desc->device_addr.mode = IEEE802154_ADDR_LONG;
745 desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
746 }
747 }
748
749 if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT &&
750 !info->attrs[IEEE802154_ATTR_LLSEC_KEY_ID])
751 return -EINVAL;
752
753 if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
754 !info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT])
755 return -EINVAL;
756
757 if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
758 !info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED])
759 return -EINVAL;
760
761 if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT)
762 desc->id = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_ID]);
763
764 switch (desc->mode) {
765 case IEEE802154_SCF_KEY_SHORT_INDEX:
766 {
767 u32 source = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT]);
768 desc->short_source = cpu_to_le32(source);
769 break;
770 }
771 case IEEE802154_SCF_KEY_HW_INDEX:
772 desc->extended_source = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED]);
773 break;
774 }
775
776 return 0;
777}
778
779static int
780ieee802154_llsec_fill_key_id(struct sk_buff *msg,
781 const struct ieee802154_llsec_key_id *desc)
782{
783 if (nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_MODE, desc->mode))
784 return -EMSGSIZE;
785
786 if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
787 if (nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID,
788 desc->device_addr.pan_id))
789 return -EMSGSIZE;
790
791 if (desc->device_addr.mode == IEEE802154_ADDR_SHORT &&
792 nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
793 desc->device_addr.short_addr))
794 return -EMSGSIZE;
795
796 if (desc->device_addr.mode == IEEE802154_ADDR_LONG &&
797 nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR,
798 desc->device_addr.extended_addr))
799 return -EMSGSIZE;
800 }
801
802 if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT &&
803 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_ID, desc->id))
804 return -EMSGSIZE;
805
806 if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
807 nla_put_u32(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT,
808 le32_to_cpu(desc->short_source)))
809 return -EMSGSIZE;
810
811 if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
812 nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED,
813 desc->extended_source))
814 return -EMSGSIZE;
815
816 return 0;
817}
818
819int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
820{
821 struct sk_buff *msg;
822 struct net_device *dev = NULL;
823 int rc = -ENOBUFS;
824 struct ieee802154_mlme_ops *ops;
825 void *hdr;
826 struct ieee802154_llsec_params params;
827
828 pr_debug("%s\n", __func__);
829
830 dev = ieee802154_nl_get_dev(info);
831 if (!dev)
832 return -ENODEV;
833
834 ops = ieee802154_mlme_ops(dev);
835 if (!ops->llsec) {
836 rc = -EOPNOTSUPP;
837 goto out_dev;
838 }
839
840 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
841 if (!msg)
842 goto out_dev;
843
844 hdr = genlmsg_put(msg, 0, info->snd_seq, &nl802154_family, 0,
845 IEEE802154_LLSEC_GETPARAMS);
846 if (!hdr)
847 goto out_free;
848
849 rc = ops->llsec->get_params(dev, &params);
850 if (rc < 0)
851 goto out_free;
852
853 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
854 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
855 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_ENABLED, params.enabled) ||
856 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
857 nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
858 be32_to_cpu(params.frame_counter)) ||
859 ieee802154_llsec_fill_key_id(msg, &params.out_key))
860 goto out_free;
861
862 dev_put(dev);
863
864 return ieee802154_nl_reply(msg, info);
865out_free:
866 nlmsg_free(msg);
867out_dev:
868 dev_put(dev);
869 return rc;
870}
871
872int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info)
873{
874 struct net_device *dev = NULL;
875 int rc = -EINVAL;
876 struct ieee802154_mlme_ops *ops;
877 struct ieee802154_llsec_params params;
878 int changed = 0;
879
880 pr_debug("%s\n", __func__);
881
882 dev = ieee802154_nl_get_dev(info);
883 if (!dev)
884 return -ENODEV;
885
886 if (!info->attrs[IEEE802154_ATTR_LLSEC_ENABLED] &&
887 !info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE] &&
888 !info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL])
889 goto out;
890
891 ops = ieee802154_mlme_ops(dev);
892 if (!ops->llsec) {
893 rc = -EOPNOTSUPP;
894 goto out;
895 }
896
897 if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL] &&
898 nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) > 7)
899 goto out;
900
901 if (info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]) {
902 params.enabled = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]);
903 changed |= IEEE802154_LLSEC_PARAM_ENABLED;
904 }
905
906 if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]) {
907 if (ieee802154_llsec_parse_key_id(info, &params.out_key))
908 goto out;
909
910 changed |= IEEE802154_LLSEC_PARAM_OUT_KEY;
911 }
912
913 if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) {
914 params.out_level = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]);
915 changed |= IEEE802154_LLSEC_PARAM_OUT_LEVEL;
916 }
917
918 if (info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]) {
919 u32 fc = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
920
921 params.frame_counter = cpu_to_be32(fc);
922 changed |= IEEE802154_LLSEC_PARAM_FRAME_COUNTER;
923 }
924
925 rc = ops->llsec->set_params(dev, &params, changed);
926
927 dev_put(dev);
928
929 return rc;
930out:
931 dev_put(dev);
932 return rc;
933}
934
935
936
937struct llsec_dump_data {
938 struct sk_buff *skb;
939 int s_idx, s_idx2;
940 int portid;
941 int nlmsg_seq;
942 struct net_device *dev;
943 struct ieee802154_mlme_ops *ops;
944 struct ieee802154_llsec_table *table;
945};
946
947static int
948ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
949 int (*step)(struct llsec_dump_data*))
950{
951 struct net *net = sock_net(skb->sk);
952 struct net_device *dev;
953 struct llsec_dump_data data;
954 int idx = 0;
955 int first_dev = cb->args[0];
956 int rc;
957
958 for_each_netdev(net, dev) {
959 if (idx < first_dev || dev->type != ARPHRD_IEEE802154)
960 goto skip;
961
962 data.ops = ieee802154_mlme_ops(dev);
963 if (!data.ops->llsec)
964 goto skip;
965
966 data.skb = skb;
967 data.s_idx = cb->args[1];
968 data.s_idx2 = cb->args[2];
969 data.dev = dev;
970 data.portid = NETLINK_CB(cb->skb).portid;
971 data.nlmsg_seq = cb->nlh->nlmsg_seq;
972
973 data.ops->llsec->lock_table(dev);
974 data.ops->llsec->get_table(data.dev, &data.table);
975 rc = step(&data);
976 data.ops->llsec->unlock_table(dev);
977
978 if (rc < 0)
979 break;
980
981skip:
982 idx++;
983 }
984 cb->args[0] = idx;
985
986 return skb->len;
987}
988
989static int
990ieee802154_nl_llsec_change(struct sk_buff *skb, struct genl_info *info,
991 int (*fn)(struct net_device*, struct genl_info*))
992{
993 struct net_device *dev = NULL;
994 int rc = -EINVAL;
995
996 dev = ieee802154_nl_get_dev(info);
997 if (!dev)
998 return -ENODEV;
999
1000 if (!ieee802154_mlme_ops(dev)->llsec)
1001 rc = -EOPNOTSUPP;
1002 else
1003 rc = fn(dev, info);
1004
1005 dev_put(dev);
1006 return rc;
1007}
1008
1009
1010
1011static int
1012ieee802154_llsec_parse_key(struct genl_info *info,
1013 struct ieee802154_llsec_key *key)
1014{
1015 u8 frames;
1016 u32 commands[256 / 32];
1017
1018 memset(key, 0, sizeof(*key));
1019
1020 if (!info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES] ||
1021 !info->attrs[IEEE802154_ATTR_LLSEC_KEY_BYTES])
1022 return -EINVAL;
1023
1024 frames = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES]);
1025 if ((frames & BIT(IEEE802154_FC_TYPE_MAC_CMD)) &&
1026 !info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS])
1027 return -EINVAL;
1028
1029 if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS]) {
1030 nla_memcpy(commands,
1031 info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS],
1032 256 / 8);
1033
1034 if (commands[0] || commands[1] || commands[2] || commands[3] ||
1035 commands[4] || commands[5] || commands[6] ||
1036 commands[7] >= BIT(IEEE802154_CMD_GTS_REQ + 1))
1037 return -EINVAL;
1038
1039 key->cmd_frame_ids = commands[7];
1040 }
1041
1042 key->frame_types = frames;
1043
1044 nla_memcpy(key->key, info->attrs[IEEE802154_ATTR_LLSEC_KEY_BYTES],
1045 IEEE802154_LLSEC_KEY_SIZE);
1046
1047 return 0;
1048}
1049
1050static int llsec_add_key(struct net_device *dev, struct genl_info *info)
1051{
1052 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1053 struct ieee802154_llsec_key key;
1054 struct ieee802154_llsec_key_id id;
1055
1056 if (ieee802154_llsec_parse_key(info, &key) ||
1057 ieee802154_llsec_parse_key_id(info, &id))
1058 return -EINVAL;
1059
1060 return ops->llsec->add_key(dev, &id, &key);
1061}
1062
1063int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info)
1064{
1065 if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
1066 (NLM_F_CREATE | NLM_F_EXCL))
1067 return -EINVAL;
1068
1069 return ieee802154_nl_llsec_change(skb, info, llsec_add_key);
1070}
1071
1072static int llsec_remove_key(struct net_device *dev, struct genl_info *info)
1073{
1074 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1075 struct ieee802154_llsec_key_id id;
1076
1077 if (ieee802154_llsec_parse_key_id(info, &id))
1078 return -EINVAL;
1079
1080 return ops->llsec->del_key(dev, &id);
1081}
1082
1083int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info)
1084{
1085 return ieee802154_nl_llsec_change(skb, info, llsec_remove_key);
1086}
1087
1088static int
1089ieee802154_nl_fill_key(struct sk_buff *msg, u32 portid, u32 seq,
1090 const struct ieee802154_llsec_key_entry *key,
1091 const struct net_device *dev)
1092{
1093 void *hdr;
1094 u32 commands[256 / 32];
1095
1096 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
1097 IEEE802154_LLSEC_LIST_KEY);
1098 if (!hdr)
1099 goto out;
1100
1101 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
1102 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
1103 ieee802154_llsec_fill_key_id(msg, &key->id) ||
1104 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES,
1105 key->key->frame_types))
1106 goto nla_put_failure;
1107
1108 if (key->key->frame_types & BIT(IEEE802154_FC_TYPE_MAC_CMD)) {
1109 memset(commands, 0, sizeof(commands));
1110 commands[7] = key->key->cmd_frame_ids;
1111 if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS,
1112 sizeof(commands), commands))
1113 goto nla_put_failure;
1114 }
1115
1116 if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_BYTES,
1117 IEEE802154_LLSEC_KEY_SIZE, key->key->key))
1118 goto nla_put_failure;
1119
1120 genlmsg_end(msg, hdr);
1121 return 0;
1122
1123nla_put_failure:
1124 genlmsg_cancel(msg, hdr);
1125out:
1126 return -EMSGSIZE;
1127}
1128
1129static int llsec_iter_keys(struct llsec_dump_data *data)
1130{
1131 struct ieee802154_llsec_key_entry *pos;
1132 int rc = 0, idx = 0;
1133
1134 list_for_each_entry(pos, &data->table->keys, list) {
1135 if (idx++ < data->s_idx)
1136 continue;
1137
1138 if (ieee802154_nl_fill_key(data->skb, data->portid,
1139 data->nlmsg_seq, pos, data->dev)) {
1140 rc = -EMSGSIZE;
1141 break;
1142 }
1143
1144 data->s_idx++;
1145 }
1146
1147 return rc;
1148}
1149
1150int ieee802154_llsec_dump_keys(struct sk_buff *skb, struct netlink_callback *cb)
1151{
1152 return ieee802154_llsec_dump_table(skb, cb, llsec_iter_keys);
1153}
1154
1155
1156
1157static int
1158llsec_parse_dev(struct genl_info *info,
1159 struct ieee802154_llsec_device *dev)
1160{
1161 memset(dev, 0, sizeof(*dev));
1162
1163 if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] ||
1164 !info->attrs[IEEE802154_ATTR_HW_ADDR] ||
1165 !info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] ||
1166 !info->attrs[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] ||
1167 (!!info->attrs[IEEE802154_ATTR_PAN_ID] !=
1168 !!info->attrs[IEEE802154_ATTR_SHORT_ADDR]))
1169 return -EINVAL;
1170
1171 if (info->attrs[IEEE802154_ATTR_PAN_ID]) {
1172 dev->pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
1173 dev->short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
1174 } else {
1175 dev->short_addr = cpu_to_le16(IEEE802154_ADDR_UNDEF);
1176 }
1177
1178 dev->hwaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
1179 dev->frame_counter = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
1180 dev->seclevel_exempt = !!nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]);
1181 dev->key_mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE]);
1182
1183 if (dev->key_mode >= __IEEE802154_LLSEC_DEVKEY_MAX)
1184 return -EINVAL;
1185
1186 return 0;
1187}
1188
1189static int llsec_add_dev(struct net_device *dev, struct genl_info *info)
1190{
1191 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1192 struct ieee802154_llsec_device desc;
1193
1194 if (llsec_parse_dev(info, &desc))
1195 return -EINVAL;
1196
1197 return ops->llsec->add_dev(dev, &desc);
1198}
1199
1200int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info)
1201{
1202 if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
1203 (NLM_F_CREATE | NLM_F_EXCL))
1204 return -EINVAL;
1205
1206 return ieee802154_nl_llsec_change(skb, info, llsec_add_dev);
1207}
1208
1209static int llsec_del_dev(struct net_device *dev, struct genl_info *info)
1210{
1211 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1212 __le64 devaddr;
1213
1214 if (!info->attrs[IEEE802154_ATTR_HW_ADDR])
1215 return -EINVAL;
1216
1217 devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
1218
1219 return ops->llsec->del_dev(dev, devaddr);
1220}
1221
1222int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info)
1223{
1224 return ieee802154_nl_llsec_change(skb, info, llsec_del_dev);
1225}
1226
1227static int
1228ieee802154_nl_fill_dev(struct sk_buff *msg, u32 portid, u32 seq,
1229 const struct ieee802154_llsec_device *desc,
1230 const struct net_device *dev)
1231{
1232 void *hdr;
1233
1234 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
1235 IEEE802154_LLSEC_LIST_DEV);
1236 if (!hdr)
1237 goto out;
1238
1239 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
1240 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
1241 nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->pan_id) ||
1242 nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
1243 desc->short_addr) ||
1244 nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr) ||
1245 nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
1246 desc->frame_counter) ||
1247 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
1248 desc->seclevel_exempt) ||
1249 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_KEY_MODE, desc->key_mode))
1250 goto nla_put_failure;
1251
1252 genlmsg_end(msg, hdr);
1253 return 0;
1254
1255nla_put_failure:
1256 genlmsg_cancel(msg, hdr);
1257out:
1258 return -EMSGSIZE;
1259}
1260
1261static int llsec_iter_devs(struct llsec_dump_data *data)
1262{
1263 struct ieee802154_llsec_device *pos;
1264 int rc = 0, idx = 0;
1265
1266 list_for_each_entry(pos, &data->table->devices, list) {
1267 if (idx++ < data->s_idx)
1268 continue;
1269
1270 if (ieee802154_nl_fill_dev(data->skb, data->portid,
1271 data->nlmsg_seq, pos, data->dev)) {
1272 rc = -EMSGSIZE;
1273 break;
1274 }
1275
1276 data->s_idx++;
1277 }
1278
1279 return rc;
1280}
1281
1282int ieee802154_llsec_dump_devs(struct sk_buff *skb, struct netlink_callback *cb)
1283{
1284 return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devs);
1285}
1286
1287
1288
1289static int llsec_add_devkey(struct net_device *dev, struct genl_info *info)
1290{
1291 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1292 struct ieee802154_llsec_device_key key;
1293 __le64 devaddr;
1294
1295 if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] ||
1296 !info->attrs[IEEE802154_ATTR_HW_ADDR] ||
1297 ieee802154_llsec_parse_key_id(info, &key.key_id))
1298 return -EINVAL;
1299
1300 devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
1301 key.frame_counter = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
1302
1303 return ops->llsec->add_devkey(dev, devaddr, &key);
1304}
1305
1306int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info)
1307{
1308 if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
1309 (NLM_F_CREATE | NLM_F_EXCL))
1310 return -EINVAL;
1311
1312 return ieee802154_nl_llsec_change(skb, info, llsec_add_devkey);
1313}
1314
1315static int llsec_del_devkey(struct net_device *dev, struct genl_info *info)
1316{
1317 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1318 struct ieee802154_llsec_device_key key;
1319 __le64 devaddr;
1320
1321 if (!info->attrs[IEEE802154_ATTR_HW_ADDR] ||
1322 ieee802154_llsec_parse_key_id(info, &key.key_id))
1323 return -EINVAL;
1324
1325 devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
1326
1327 return ops->llsec->del_devkey(dev, devaddr, &key);
1328}
1329
1330int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info)
1331{
1332 return ieee802154_nl_llsec_change(skb, info, llsec_del_devkey);
1333}
1334
1335static int
1336ieee802154_nl_fill_devkey(struct sk_buff *msg, u32 portid, u32 seq,
1337 __le64 devaddr,
1338 const struct ieee802154_llsec_device_key *devkey,
1339 const struct net_device *dev)
1340{
1341 void *hdr;
1342
1343 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
1344 IEEE802154_LLSEC_LIST_DEVKEY);
1345 if (!hdr)
1346 goto out;
1347
1348 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
1349 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
1350 nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr) ||
1351 nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
1352 devkey->frame_counter) ||
1353 ieee802154_llsec_fill_key_id(msg, &devkey->key_id))
1354 goto nla_put_failure;
1355
1356 genlmsg_end(msg, hdr);
1357 return 0;
1358
1359nla_put_failure:
1360 genlmsg_cancel(msg, hdr);
1361out:
1362 return -EMSGSIZE;
1363}
1364
1365static int llsec_iter_devkeys(struct llsec_dump_data *data)
1366{
1367 struct ieee802154_llsec_device *dpos;
1368 struct ieee802154_llsec_device_key *kpos;
1369 int rc = 0, idx = 0, idx2;
1370
1371 list_for_each_entry(dpos, &data->table->devices, list) {
1372 if (idx++ < data->s_idx)
1373 continue;
1374
1375 idx2 = 0;
1376
1377 list_for_each_entry(kpos, &dpos->keys, list) {
1378 if (idx2++ < data->s_idx2)
1379 continue;
1380
1381 if (ieee802154_nl_fill_devkey(data->skb, data->portid,
1382 data->nlmsg_seq,
1383 dpos->hwaddr, kpos,
1384 data->dev)) {
1385 return rc = -EMSGSIZE;
1386 }
1387
1388 data->s_idx2++;
1389 }
1390
1391 data->s_idx++;
1392 }
1393
1394 return rc;
1395}
1396
1397int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
1398 struct netlink_callback *cb)
1399{
1400 return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devkeys);
1401}
1402
1403
1404
1405static int
1406llsec_parse_seclevel(struct genl_info *info,
1407 struct ieee802154_llsec_seclevel *sl)
1408{
1409 memset(sl, 0, sizeof(*sl));
1410
1411 if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_TYPE] ||
1412 !info->attrs[IEEE802154_ATTR_LLSEC_SECLEVELS] ||
1413 !info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE])
1414 return -EINVAL;
1415
1416 sl->frame_type = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_TYPE]);
1417 if (sl->frame_type == IEEE802154_FC_TYPE_MAC_CMD) {
1418 if (!info->attrs[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID])
1419 return -EINVAL;
1420
1421 sl->cmd_frame_id = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID]);
1422 }
1423
1424 sl->sec_levels = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVELS]);
1425 sl->device_override = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]);
1426
1427 return 0;
1428}
1429
1430static int llsec_add_seclevel(struct net_device *dev, struct genl_info *info)
1431{
1432 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1433 struct ieee802154_llsec_seclevel sl;
1434
1435 if (llsec_parse_seclevel(info, &sl))
1436 return -EINVAL;
1437
1438 return ops->llsec->add_seclevel(dev, &sl);
1439}
1440
1441int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info)
1442{
1443 if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
1444 (NLM_F_CREATE | NLM_F_EXCL))
1445 return -EINVAL;
1446
1447 return ieee802154_nl_llsec_change(skb, info, llsec_add_seclevel);
1448}
1449
1450static int llsec_del_seclevel(struct net_device *dev, struct genl_info *info)
1451{
1452 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1453 struct ieee802154_llsec_seclevel sl;
1454
1455 if (llsec_parse_seclevel(info, &sl))
1456 return -EINVAL;
1457
1458 return ops->llsec->del_seclevel(dev, &sl);
1459}
1460
1461int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info)
1462{
1463 return ieee802154_nl_llsec_change(skb, info, llsec_del_seclevel);
1464}
1465
1466static int
1467ieee802154_nl_fill_seclevel(struct sk_buff *msg, u32 portid, u32 seq,
1468 const struct ieee802154_llsec_seclevel *sl,
1469 const struct net_device *dev)
1470{
1471 void *hdr;
1472
1473 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
1474 IEEE802154_LLSEC_LIST_SECLEVEL);
1475 if (!hdr)
1476 goto out;
1477
1478 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
1479 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
1480 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_FRAME_TYPE, sl->frame_type) ||
1481 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVELS, sl->sec_levels) ||
1482 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
1483 sl->device_override))
1484 goto nla_put_failure;
1485
1486 if (sl->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
1487 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_CMD_FRAME_ID,
1488 sl->cmd_frame_id))
1489 goto nla_put_failure;
1490
1491 genlmsg_end(msg, hdr);
1492 return 0;
1493
1494nla_put_failure:
1495 genlmsg_cancel(msg, hdr);
1496out:
1497 return -EMSGSIZE;
1498}
1499
1500static int llsec_iter_seclevels(struct llsec_dump_data *data)
1501{
1502 struct ieee802154_llsec_seclevel *pos;
1503 int rc = 0, idx = 0;
1504
1505 list_for_each_entry(pos, &data->table->security_levels, list) {
1506 if (idx++ < data->s_idx)
1507 continue;
1508
1509 if (ieee802154_nl_fill_seclevel(data->skb, data->portid,
1510 data->nlmsg_seq, pos,
1511 data->dev)) {
1512 rc = -EMSGSIZE;
1513 break;
1514 }
1515
1516 data->s_idx++;
1517 }
1518
1519 return rc;
1520}
1521
1522int ieee802154_llsec_dump_seclevels(struct sk_buff *skb,
1523 struct netlink_callback *cb)
1524{
1525 return ieee802154_llsec_dump_table(skb, cb, llsec_iter_seclevels);
1526}
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
index fd7be5e45cef..3a703ab88348 100644
--- a/net/ieee802154/nl_policy.c
+++ b/net/ieee802154/nl_policy.c
@@ -62,5 +62,21 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
62 [IEEE802154_ATTR_CSMA_MAX_BE] = { .type = NLA_U8, }, 62 [IEEE802154_ATTR_CSMA_MAX_BE] = { .type = NLA_U8, },
63 63
64 [IEEE802154_ATTR_FRAME_RETRIES] = { .type = NLA_S8, }, 64 [IEEE802154_ATTR_FRAME_RETRIES] = { .type = NLA_S8, },
65
66 [IEEE802154_ATTR_LLSEC_ENABLED] = { .type = NLA_U8, },
67 [IEEE802154_ATTR_LLSEC_SECLEVEL] = { .type = NLA_U8, },
68 [IEEE802154_ATTR_LLSEC_KEY_MODE] = { .type = NLA_U8, },
69 [IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT] = { .type = NLA_U32, },
70 [IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED] = { .type = NLA_HW_ADDR, },
71 [IEEE802154_ATTR_LLSEC_KEY_ID] = { .type = NLA_U8, },
72 [IEEE802154_ATTR_LLSEC_FRAME_COUNTER] = { .type = NLA_U32 },
73 [IEEE802154_ATTR_LLSEC_KEY_BYTES] = { .len = 16, },
74 [IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES] = { .type = NLA_U8, },
75 [IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS] = { .len = 258 / 8 },
76 [IEEE802154_ATTR_LLSEC_FRAME_TYPE] = { .type = NLA_U8, },
77 [IEEE802154_ATTR_LLSEC_CMD_FRAME_ID] = { .type = NLA_U8, },
78 [IEEE802154_ATTR_LLSEC_SECLEVELS] = { .type = NLA_U8, },
79 [IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] = { .type = NLA_U8, },
80 [IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] = { .type = NLA_U8, },
65}; 81};
66 82
diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
index ef2d54372b13..6f1428c4870b 100644
--- a/net/ieee802154/reassembly.c
+++ b/net/ieee802154/reassembly.c
@@ -36,7 +36,7 @@ struct lowpan_frag_info {
36 u8 d_offset; 36 u8 d_offset;
37}; 37};
38 38
39struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb) 39static struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
40{ 40{
41 return (struct lowpan_frag_info *)skb->cb; 41 return (struct lowpan_frag_info *)skb->cb;
42} 42}
@@ -120,6 +120,8 @@ fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
120 struct inet_frag_queue *q; 120 struct inet_frag_queue *q;
121 struct lowpan_create_arg arg; 121 struct lowpan_create_arg arg;
122 unsigned int hash; 122 unsigned int hash;
123 struct netns_ieee802154_lowpan *ieee802154_lowpan =
124 net_ieee802154_lowpan(net);
123 125
124 arg.tag = frag_info->d_tag; 126 arg.tag = frag_info->d_tag;
125 arg.d_size = frag_info->d_size; 127 arg.d_size = frag_info->d_size;
@@ -129,7 +131,7 @@ fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
129 read_lock(&lowpan_frags.lock); 131 read_lock(&lowpan_frags.lock);
130 hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst); 132 hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
131 133
132 q = inet_frag_find(&net->ieee802154_lowpan.frags, 134 q = inet_frag_find(&ieee802154_lowpan->frags,
133 &lowpan_frags, &arg, hash); 135 &lowpan_frags, &arg, hash);
134 if (IS_ERR_OR_NULL(q)) { 136 if (IS_ERR_OR_NULL(q)) {
135 inet_frag_maybe_warn_overflow(q, pr_fmt()); 137 inet_frag_maybe_warn_overflow(q, pr_fmt());
@@ -357,6 +359,8 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
357 struct net *net = dev_net(skb->dev); 359 struct net *net = dev_net(skb->dev);
358 struct lowpan_frag_info *frag_info = lowpan_cb(skb); 360 struct lowpan_frag_info *frag_info = lowpan_cb(skb);
359 struct ieee802154_addr source, dest; 361 struct ieee802154_addr source, dest;
362 struct netns_ieee802154_lowpan *ieee802154_lowpan =
363 net_ieee802154_lowpan(net);
360 int err; 364 int err;
361 365
362 source = mac_cb(skb)->source; 366 source = mac_cb(skb)->source;
@@ -366,10 +370,10 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
366 if (err < 0) 370 if (err < 0)
367 goto err; 371 goto err;
368 372
369 if (frag_info->d_size > net->ieee802154_lowpan.max_dsize) 373 if (frag_info->d_size > ieee802154_lowpan->max_dsize)
370 goto err; 374 goto err;
371 375
372 inet_frag_evictor(&net->ieee802154_lowpan.frags, &lowpan_frags, false); 376 inet_frag_evictor(&ieee802154_lowpan->frags, &lowpan_frags, false);
373 377
374 fq = fq_find(net, frag_info, &source, &dest); 378 fq = fq_find(net, frag_info, &source, &dest);
375 if (fq != NULL) { 379 if (fq != NULL) {
@@ -436,6 +440,8 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
436{ 440{
437 struct ctl_table *table; 441 struct ctl_table *table;
438 struct ctl_table_header *hdr; 442 struct ctl_table_header *hdr;
443 struct netns_ieee802154_lowpan *ieee802154_lowpan =
444 net_ieee802154_lowpan(net);
439 445
440 table = lowpan_frags_ns_ctl_table; 446 table = lowpan_frags_ns_ctl_table;
441 if (!net_eq(net, &init_net)) { 447 if (!net_eq(net, &init_net)) {
@@ -444,10 +450,10 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
444 if (table == NULL) 450 if (table == NULL)
445 goto err_alloc; 451 goto err_alloc;
446 452
447 table[0].data = &net->ieee802154_lowpan.frags.high_thresh; 453 table[0].data = &ieee802154_lowpan->frags.high_thresh;
448 table[1].data = &net->ieee802154_lowpan.frags.low_thresh; 454 table[1].data = &ieee802154_lowpan->frags.low_thresh;
449 table[2].data = &net->ieee802154_lowpan.frags.timeout; 455 table[2].data = &ieee802154_lowpan->frags.timeout;
450 table[3].data = &net->ieee802154_lowpan.max_dsize; 456 table[3].data = &ieee802154_lowpan->max_dsize;
451 457
452 /* Don't export sysctls to unprivileged users */ 458 /* Don't export sysctls to unprivileged users */
453 if (net->user_ns != &init_user_ns) 459 if (net->user_ns != &init_user_ns)
@@ -458,7 +464,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
458 if (hdr == NULL) 464 if (hdr == NULL)
459 goto err_reg; 465 goto err_reg;
460 466
461 net->ieee802154_lowpan.sysctl.frags_hdr = hdr; 467 ieee802154_lowpan->sysctl.frags_hdr = hdr;
462 return 0; 468 return 0;
463 469
464err_reg: 470err_reg:
@@ -471,9 +477,11 @@ err_alloc:
471static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net) 477static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
472{ 478{
473 struct ctl_table *table; 479 struct ctl_table *table;
480 struct netns_ieee802154_lowpan *ieee802154_lowpan =
481 net_ieee802154_lowpan(net);
474 482
475 table = net->ieee802154_lowpan.sysctl.frags_hdr->ctl_table_arg; 483 table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
476 unregister_net_sysctl_table(net->ieee802154_lowpan.sysctl.frags_hdr); 484 unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
477 if (!net_eq(net, &init_net)) 485 if (!net_eq(net, &init_net))
478 kfree(table); 486 kfree(table);
479} 487}
@@ -514,20 +522,26 @@ static inline void lowpan_frags_sysctl_unregister(void)
514 522
515static int __net_init lowpan_frags_init_net(struct net *net) 523static int __net_init lowpan_frags_init_net(struct net *net)
516{ 524{
517 net->ieee802154_lowpan.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; 525 struct netns_ieee802154_lowpan *ieee802154_lowpan =
518 net->ieee802154_lowpan.frags.low_thresh = IPV6_FRAG_LOW_THRESH; 526 net_ieee802154_lowpan(net);
519 net->ieee802154_lowpan.frags.timeout = IPV6_FRAG_TIMEOUT;
520 net->ieee802154_lowpan.max_dsize = 0xFFFF;
521 527
522 inet_frags_init_net(&net->ieee802154_lowpan.frags); 528 ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
529 ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
530 ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
531 ieee802154_lowpan->max_dsize = 0xFFFF;
532
533 inet_frags_init_net(&ieee802154_lowpan->frags);
523 534
524 return lowpan_frags_ns_sysctl_register(net); 535 return lowpan_frags_ns_sysctl_register(net);
525} 536}
526 537
527static void __net_exit lowpan_frags_exit_net(struct net *net) 538static void __net_exit lowpan_frags_exit_net(struct net *net)
528{ 539{
540 struct netns_ieee802154_lowpan *ieee802154_lowpan =
541 net_ieee802154_lowpan(net);
542
529 lowpan_frags_ns_sysctl_unregister(net); 543 lowpan_frags_ns_sysctl_unregister(net);
530 inet_frags_exit_net(&net->ieee802154_lowpan.frags, &lowpan_frags); 544 inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
531} 545}
532 546
533static struct pernet_operations lowpan_frags_ops = { 547static struct pernet_operations lowpan_frags_ops = {
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 6d6dd345bc4d..d5e6836cf772 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -254,7 +254,6 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
254 struct inet_sock *inet; 254 struct inet_sock *inet;
255 struct proto *answer_prot; 255 struct proto *answer_prot;
256 unsigned char answer_flags; 256 unsigned char answer_flags;
257 char answer_no_check;
258 int try_loading_module = 0; 257 int try_loading_module = 0;
259 int err; 258 int err;
260 259
@@ -312,7 +311,6 @@ lookup_protocol:
312 311
313 sock->ops = answer->ops; 312 sock->ops = answer->ops;
314 answer_prot = answer->prot; 313 answer_prot = answer->prot;
315 answer_no_check = answer->no_check;
316 answer_flags = answer->flags; 314 answer_flags = answer->flags;
317 rcu_read_unlock(); 315 rcu_read_unlock();
318 316
@@ -324,7 +322,6 @@ lookup_protocol:
324 goto out; 322 goto out;
325 323
326 err = 0; 324 err = 0;
327 sk->sk_no_check = answer_no_check;
328 if (INET_PROTOSW_REUSE & answer_flags) 325 if (INET_PROTOSW_REUSE & answer_flags)
329 sk->sk_reuse = SK_CAN_REUSE; 326 sk->sk_reuse = SK_CAN_REUSE;
330 327
@@ -1002,7 +999,6 @@ static struct inet_protosw inetsw_array[] =
1002 .protocol = IPPROTO_TCP, 999 .protocol = IPPROTO_TCP,
1003 .prot = &tcp_prot, 1000 .prot = &tcp_prot,
1004 .ops = &inet_stream_ops, 1001 .ops = &inet_stream_ops,
1005 .no_check = 0,
1006 .flags = INET_PROTOSW_PERMANENT | 1002 .flags = INET_PROTOSW_PERMANENT |
1007 INET_PROTOSW_ICSK, 1003 INET_PROTOSW_ICSK,
1008 }, 1004 },
@@ -1012,7 +1008,6 @@ static struct inet_protosw inetsw_array[] =
1012 .protocol = IPPROTO_UDP, 1008 .protocol = IPPROTO_UDP,
1013 .prot = &udp_prot, 1009 .prot = &udp_prot,
1014 .ops = &inet_dgram_ops, 1010 .ops = &inet_dgram_ops,
1015 .no_check = UDP_CSUM_DEFAULT,
1016 .flags = INET_PROTOSW_PERMANENT, 1011 .flags = INET_PROTOSW_PERMANENT,
1017 }, 1012 },
1018 1013
@@ -1021,7 +1016,6 @@ static struct inet_protosw inetsw_array[] =
1021 .protocol = IPPROTO_ICMP, 1016 .protocol = IPPROTO_ICMP,
1022 .prot = &ping_prot, 1017 .prot = &ping_prot,
1023 .ops = &inet_dgram_ops, 1018 .ops = &inet_dgram_ops,
1024 .no_check = UDP_CSUM_DEFAULT,
1025 .flags = INET_PROTOSW_REUSE, 1019 .flags = INET_PROTOSW_REUSE,
1026 }, 1020 },
1027 1021
@@ -1030,7 +1024,6 @@ static struct inet_protosw inetsw_array[] =
1030 .protocol = IPPROTO_IP, /* wild card */ 1024 .protocol = IPPROTO_IP, /* wild card */
1031 .prot = &raw_prot, 1025 .prot = &raw_prot,
1032 .ops = &inet_sockraw_ops, 1026 .ops = &inet_sockraw_ops,
1033 .no_check = UDP_CSUM_DEFAULT,
1034 .flags = INET_PROTOSW_REUSE, 1027 .flags = INET_PROTOSW_REUSE,
1035 } 1028 }
1036}; 1029};
@@ -1261,10 +1254,12 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1261 SKB_GSO_DODGY | 1254 SKB_GSO_DODGY |
1262 SKB_GSO_TCP_ECN | 1255 SKB_GSO_TCP_ECN |
1263 SKB_GSO_GRE | 1256 SKB_GSO_GRE |
1257 SKB_GSO_GRE_CSUM |
1264 SKB_GSO_IPIP | 1258 SKB_GSO_IPIP |
1265 SKB_GSO_SIT | 1259 SKB_GSO_SIT |
1266 SKB_GSO_TCPV6 | 1260 SKB_GSO_TCPV6 |
1267 SKB_GSO_UDP_TUNNEL | 1261 SKB_GSO_UDP_TUNNEL |
1262 SKB_GSO_UDP_TUNNEL_CSUM |
1268 SKB_GSO_MPLS | 1263 SKB_GSO_MPLS |
1269 0))) 1264 0)))
1270 goto out; 1265 goto out;
@@ -1476,22 +1471,20 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1476} 1471}
1477EXPORT_SYMBOL_GPL(inet_ctl_sock_create); 1472EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1478 1473
1479unsigned long snmp_fold_field(void __percpu *mib[], int offt) 1474unsigned long snmp_fold_field(void __percpu *mib, int offt)
1480{ 1475{
1481 unsigned long res = 0; 1476 unsigned long res = 0;
1482 int i, j; 1477 int i;
1483 1478
1484 for_each_possible_cpu(i) { 1479 for_each_possible_cpu(i)
1485 for (j = 0; j < SNMP_ARRAY_SZ; j++) 1480 res += *(((unsigned long *) per_cpu_ptr(mib, i)) + offt);
1486 res += *(((unsigned long *) per_cpu_ptr(mib[j], i)) + offt);
1487 }
1488 return res; 1481 return res;
1489} 1482}
1490EXPORT_SYMBOL_GPL(snmp_fold_field); 1483EXPORT_SYMBOL_GPL(snmp_fold_field);
1491 1484
1492#if BITS_PER_LONG==32 1485#if BITS_PER_LONG==32
1493 1486
1494u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset) 1487u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
1495{ 1488{
1496 u64 res = 0; 1489 u64 res = 0;
1497 int cpu; 1490 int cpu;
@@ -1502,7 +1495,7 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
1502 u64 v; 1495 u64 v;
1503 unsigned int start; 1496 unsigned int start;
1504 1497
1505 bhptr = per_cpu_ptr(mib[0], cpu); 1498 bhptr = per_cpu_ptr(mib, cpu);
1506 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset); 1499 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1507 do { 1500 do {
1508 start = u64_stats_fetch_begin_irq(syncp); 1501 start = u64_stats_fetch_begin_irq(syncp);
@@ -1516,25 +1509,6 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
1516EXPORT_SYMBOL_GPL(snmp_fold_field64); 1509EXPORT_SYMBOL_GPL(snmp_fold_field64);
1517#endif 1510#endif
1518 1511
1519int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
1520{
1521 BUG_ON(ptr == NULL);
1522 ptr[0] = __alloc_percpu(mibsize, align);
1523 if (!ptr[0])
1524 return -ENOMEM;
1525
1526#if SNMP_ARRAY_SZ == 2
1527 ptr[1] = __alloc_percpu(mibsize, align);
1528 if (!ptr[1]) {
1529 free_percpu(ptr[0]);
1530 ptr[0] = NULL;
1531 return -ENOMEM;
1532 }
1533#endif
1534 return 0;
1535}
1536EXPORT_SYMBOL_GPL(snmp_mib_init);
1537
1538#ifdef CONFIG_IP_MULTICAST 1512#ifdef CONFIG_IP_MULTICAST
1539static const struct net_protocol igmp_protocol = { 1513static const struct net_protocol igmp_protocol = {
1540 .handler = igmp_rcv, 1514 .handler = igmp_rcv,
@@ -1570,40 +1544,30 @@ static __net_init int ipv4_mib_init_net(struct net *net)
1570{ 1544{
1571 int i; 1545 int i;
1572 1546
1573 if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics, 1547 net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
1574 sizeof(struct tcp_mib), 1548 if (!net->mib.tcp_statistics)
1575 __alignof__(struct tcp_mib)) < 0)
1576 goto err_tcp_mib; 1549 goto err_tcp_mib;
1577 if (snmp_mib_init((void __percpu **)net->mib.ip_statistics, 1550 net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
1578 sizeof(struct ipstats_mib), 1551 if (!net->mib.ip_statistics)
1579 __alignof__(struct ipstats_mib)) < 0)
1580 goto err_ip_mib; 1552 goto err_ip_mib;
1581 1553
1582 for_each_possible_cpu(i) { 1554 for_each_possible_cpu(i) {
1583 struct ipstats_mib *af_inet_stats; 1555 struct ipstats_mib *af_inet_stats;
1584 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[0], i); 1556 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
1585 u64_stats_init(&af_inet_stats->syncp); 1557 u64_stats_init(&af_inet_stats->syncp);
1586#if SNMP_ARRAY_SZ == 2
1587 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[1], i);
1588 u64_stats_init(&af_inet_stats->syncp);
1589#endif
1590 } 1558 }
1591 1559
1592 if (snmp_mib_init((void __percpu **)net->mib.net_statistics, 1560 net->mib.net_statistics = alloc_percpu(struct linux_mib);
1593 sizeof(struct linux_mib), 1561 if (!net->mib.net_statistics)
1594 __alignof__(struct linux_mib)) < 0)
1595 goto err_net_mib; 1562 goto err_net_mib;
1596 if (snmp_mib_init((void __percpu **)net->mib.udp_statistics, 1563 net->mib.udp_statistics = alloc_percpu(struct udp_mib);
1597 sizeof(struct udp_mib), 1564 if (!net->mib.udp_statistics)
1598 __alignof__(struct udp_mib)) < 0)
1599 goto err_udp_mib; 1565 goto err_udp_mib;
1600 if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics, 1566 net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
1601 sizeof(struct udp_mib), 1567 if (!net->mib.udplite_statistics)
1602 __alignof__(struct udp_mib)) < 0)
1603 goto err_udplite_mib; 1568 goto err_udplite_mib;
1604 if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics, 1569 net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
1605 sizeof(struct icmp_mib), 1570 if (!net->mib.icmp_statistics)
1606 __alignof__(struct icmp_mib)) < 0)
1607 goto err_icmp_mib; 1571 goto err_icmp_mib;
1608 net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib), 1572 net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
1609 GFP_KERNEL); 1573 GFP_KERNEL);
@@ -1614,17 +1578,17 @@ static __net_init int ipv4_mib_init_net(struct net *net)
1614 return 0; 1578 return 0;
1615 1579
1616err_icmpmsg_mib: 1580err_icmpmsg_mib:
1617 snmp_mib_free((void __percpu **)net->mib.icmp_statistics); 1581 free_percpu(net->mib.icmp_statistics);
1618err_icmp_mib: 1582err_icmp_mib:
1619 snmp_mib_free((void __percpu **)net->mib.udplite_statistics); 1583 free_percpu(net->mib.udplite_statistics);
1620err_udplite_mib: 1584err_udplite_mib:
1621 snmp_mib_free((void __percpu **)net->mib.udp_statistics); 1585 free_percpu(net->mib.udp_statistics);
1622err_udp_mib: 1586err_udp_mib:
1623 snmp_mib_free((void __percpu **)net->mib.net_statistics); 1587 free_percpu(net->mib.net_statistics);
1624err_net_mib: 1588err_net_mib:
1625 snmp_mib_free((void __percpu **)net->mib.ip_statistics); 1589 free_percpu(net->mib.ip_statistics);
1626err_ip_mib: 1590err_ip_mib:
1627 snmp_mib_free((void __percpu **)net->mib.tcp_statistics); 1591 free_percpu(net->mib.tcp_statistics);
1628err_tcp_mib: 1592err_tcp_mib:
1629 return -ENOMEM; 1593 return -ENOMEM;
1630} 1594}
@@ -1632,12 +1596,12 @@ err_tcp_mib:
1632static __net_exit void ipv4_mib_exit_net(struct net *net) 1596static __net_exit void ipv4_mib_exit_net(struct net *net)
1633{ 1597{
1634 kfree(net->mib.icmpmsg_statistics); 1598 kfree(net->mib.icmpmsg_statistics);
1635 snmp_mib_free((void __percpu **)net->mib.icmp_statistics); 1599 free_percpu(net->mib.icmp_statistics);
1636 snmp_mib_free((void __percpu **)net->mib.udplite_statistics); 1600 free_percpu(net->mib.udplite_statistics);
1637 snmp_mib_free((void __percpu **)net->mib.udp_statistics); 1601 free_percpu(net->mib.udp_statistics);
1638 snmp_mib_free((void __percpu **)net->mib.net_statistics); 1602 free_percpu(net->mib.net_statistics);
1639 snmp_mib_free((void __percpu **)net->mib.ip_statistics); 1603 free_percpu(net->mib.ip_statistics);
1640 snmp_mib_free((void __percpu **)net->mib.tcp_statistics); 1604 free_percpu(net->mib.tcp_statistics);
1641} 1605}
1642 1606
1643static __net_initdata struct pernet_operations ipv4_mib_ops = { 1607static __net_initdata struct pernet_operations ipv4_mib_ops = {
@@ -1736,13 +1700,9 @@ static int __init inet_init(void)
1736 1700
1737 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb)); 1701 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
1738 1702
1739 sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
1740 if (!sysctl_local_reserved_ports)
1741 goto out;
1742
1743 rc = proto_register(&tcp_prot, 1); 1703 rc = proto_register(&tcp_prot, 1);
1744 if (rc) 1704 if (rc)
1745 goto out_free_reserved_ports; 1705 goto out;
1746 1706
1747 rc = proto_register(&udp_prot, 1); 1707 rc = proto_register(&udp_prot, 1);
1748 if (rc) 1708 if (rc)
@@ -1852,8 +1812,6 @@ out_unregister_udp_proto:
1852 proto_unregister(&udp_prot); 1812 proto_unregister(&udp_prot);
1853out_unregister_tcp_proto: 1813out_unregister_tcp_proto:
1854 proto_unregister(&tcp_prot); 1814 proto_unregister(&tcp_prot);
1855out_free_reserved_ports:
1856 kfree(sysctl_local_reserved_ports);
1857 goto out; 1815 goto out;
1858} 1816}
1859 1817
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 8b5134c582f1..a3095fdefbed 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -86,18 +86,26 @@ out:
86} 86}
87EXPORT_SYMBOL(ip4_datagram_connect); 87EXPORT_SYMBOL(ip4_datagram_connect);
88 88
89/* Because UDP xmit path can manipulate sk_dst_cache without holding
90 * socket lock, we need to use sk_dst_set() here,
91 * even if we own the socket lock.
92 */
89void ip4_datagram_release_cb(struct sock *sk) 93void ip4_datagram_release_cb(struct sock *sk)
90{ 94{
91 const struct inet_sock *inet = inet_sk(sk); 95 const struct inet_sock *inet = inet_sk(sk);
92 const struct ip_options_rcu *inet_opt; 96 const struct ip_options_rcu *inet_opt;
93 __be32 daddr = inet->inet_daddr; 97 __be32 daddr = inet->inet_daddr;
98 struct dst_entry *dst;
94 struct flowi4 fl4; 99 struct flowi4 fl4;
95 struct rtable *rt; 100 struct rtable *rt;
96 101
97 if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
98 return;
99
100 rcu_read_lock(); 102 rcu_read_lock();
103
104 dst = __sk_dst_get(sk);
105 if (!dst || !dst->obsolete || dst->ops->check(dst, 0)) {
106 rcu_read_unlock();
107 return;
108 }
101 inet_opt = rcu_dereference(inet->inet_opt); 109 inet_opt = rcu_dereference(inet->inet_opt);
102 if (inet_opt && inet_opt->opt.srr) 110 if (inet_opt && inet_opt->opt.srr)
103 daddr = inet_opt->opt.faddr; 111 daddr = inet_opt->opt.faddr;
@@ -105,8 +113,10 @@ void ip4_datagram_release_cb(struct sock *sk)
105 inet->inet_saddr, inet->inet_dport, 113 inet->inet_saddr, inet->inet_dport,
106 inet->inet_sport, sk->sk_protocol, 114 inet->inet_sport, sk->sk_protocol,
107 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); 115 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
108 if (!IS_ERR(rt)) 116
109 __sk_dst_set(sk, &rt->dst); 117 dst = !IS_ERR(rt) ? &rt->dst : NULL;
118 sk_dst_set(sk, dst);
119
110 rcu_read_unlock(); 120 rcu_read_unlock();
111} 121}
112EXPORT_SYMBOL_GPL(ip4_datagram_release_cb); 122EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index bdbf68bb2e2d..e9449376b58e 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -106,7 +106,6 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
106#define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT) 106#define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
107 107
108static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE]; 108static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
109static DEFINE_SPINLOCK(inet_addr_hash_lock);
110 109
111static u32 inet_addr_hash(struct net *net, __be32 addr) 110static u32 inet_addr_hash(struct net *net, __be32 addr)
112{ 111{
@@ -119,16 +118,14 @@ static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
119{ 118{
120 u32 hash = inet_addr_hash(net, ifa->ifa_local); 119 u32 hash = inet_addr_hash(net, ifa->ifa_local);
121 120
122 spin_lock(&inet_addr_hash_lock); 121 ASSERT_RTNL();
123 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]); 122 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
124 spin_unlock(&inet_addr_hash_lock);
125} 123}
126 124
127static void inet_hash_remove(struct in_ifaddr *ifa) 125static void inet_hash_remove(struct in_ifaddr *ifa)
128{ 126{
129 spin_lock(&inet_addr_hash_lock); 127 ASSERT_RTNL();
130 hlist_del_init_rcu(&ifa->hash); 128 hlist_del_init_rcu(&ifa->hash);
131 spin_unlock(&inet_addr_hash_lock);
132} 129}
133 130
134/** 131/**
@@ -830,7 +827,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
830 ifa_existing = find_matching_ifa(ifa); 827 ifa_existing = find_matching_ifa(ifa);
831 if (!ifa_existing) { 828 if (!ifa_existing) {
832 /* It would be best to check for !NLM_F_CREATE here but 829 /* It would be best to check for !NLM_F_CREATE here but
833 * userspace alreay relies on not having to provide this. 830 * userspace already relies on not having to provide this.
834 */ 831 */
835 set_ifa_lifetime(ifa, valid_lft, prefered_lft); 832 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
836 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid); 833 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 250be7421ab3..4e9619bca732 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -84,7 +84,8 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
84 ptr--; 84 ptr--;
85 } 85 }
86 if (tpi->flags&TUNNEL_CSUM && 86 if (tpi->flags&TUNNEL_CSUM &&
87 !(skb_shinfo(skb)->gso_type & SKB_GSO_GRE)) { 87 !(skb_shinfo(skb)->gso_type &
88 (SKB_GSO_GRE|SKB_GSO_GRE_CSUM))) {
88 *ptr = 0; 89 *ptr = 0;
89 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0, 90 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
90 skb->len, 0)); 91 skb->len, 0));
@@ -93,28 +94,6 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
93} 94}
94EXPORT_SYMBOL_GPL(gre_build_header); 95EXPORT_SYMBOL_GPL(gre_build_header);
95 96
96static __sum16 check_checksum(struct sk_buff *skb)
97{
98 __sum16 csum = 0;
99
100 switch (skb->ip_summed) {
101 case CHECKSUM_COMPLETE:
102 csum = csum_fold(skb->csum);
103
104 if (!csum)
105 break;
106 /* Fall through. */
107
108 case CHECKSUM_NONE:
109 skb->csum = 0;
110 csum = __skb_checksum_complete(skb);
111 skb->ip_summed = CHECKSUM_COMPLETE;
112 break;
113 }
114
115 return csum;
116}
117
118static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, 97static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
119 bool *csum_err) 98 bool *csum_err)
120{ 99{
@@ -141,7 +120,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
141 120
142 options = (__be32 *)(greh + 1); 121 options = (__be32 *)(greh + 1);
143 if (greh->flags & GRE_CSUM) { 122 if (greh->flags & GRE_CSUM) {
144 if (check_checksum(skb)) { 123 if (skb_checksum_simple_validate(skb)) {
145 *csum_err = true; 124 *csum_err = true;
146 return -EINVAL; 125 return -EINVAL;
147 } 126 }
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index f1d32280cb54..eb92deb12666 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -42,6 +42,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
42 SKB_GSO_DODGY | 42 SKB_GSO_DODGY |
43 SKB_GSO_TCP_ECN | 43 SKB_GSO_TCP_ECN |
44 SKB_GSO_GRE | 44 SKB_GSO_GRE |
45 SKB_GSO_GRE_CSUM |
45 SKB_GSO_IPIP))) 46 SKB_GSO_IPIP)))
46 goto out; 47 goto out;
47 48
@@ -55,6 +56,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
55 goto out; 56 goto out;
56 57
57 csum = !!(greh->flags & GRE_CSUM); 58 csum = !!(greh->flags & GRE_CSUM);
59 if (csum)
60 skb->encap_hdr_csum = 1;
58 61
59 if (unlikely(!pskb_may_pull(skb, ghl))) 62 if (unlikely(!pskb_may_pull(skb, ghl)))
60 goto out; 63 goto out;
@@ -94,10 +97,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
94 } 97 }
95 } 98 }
96 99
97 greh = (struct gre_base_hdr *)(skb->data); 100 skb_reset_transport_header(skb);
101
102 greh = (struct gre_base_hdr *)
103 skb_transport_header(skb);
98 pcsum = (__be32 *)(greh + 1); 104 pcsum = (__be32 *)(greh + 1);
99 *pcsum = 0; 105 *pcsum = 0;
100 *(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0)); 106 *(__sum16 *)pcsum = gso_make_checksum(skb, 0);
101 } 107 }
102 __skb_push(skb, tnl_hlen - ghl); 108 __skb_push(skb, tnl_hlen - ghl);
103 109
@@ -125,10 +131,12 @@ static __sum16 gro_skb_checksum(struct sk_buff *skb)
125 csum_partial(skb->data, skb_gro_offset(skb), 0)); 131 csum_partial(skb->data, skb_gro_offset(skb), 0));
126 sum = csum_fold(NAPI_GRO_CB(skb)->csum); 132 sum = csum_fold(NAPI_GRO_CB(skb)->csum);
127 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) { 133 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) {
128 if (unlikely(!sum)) 134 if (unlikely(!sum) && !skb->csum_complete_sw)
129 netdev_rx_csum_fault(skb->dev); 135 netdev_rx_csum_fault(skb->dev);
130 } else 136 } else {
131 skb->ip_summed = CHECKSUM_COMPLETE; 137 skb->ip_summed = CHECKSUM_COMPLETE;
138 skb->csum_complete_sw = 1;
139 }
132 140
133 return sum; 141 return sum;
134} 142}
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 0134663fdbce..79c3d947a481 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -337,6 +337,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
337 struct sock *sk; 337 struct sock *sk;
338 struct inet_sock *inet; 338 struct inet_sock *inet;
339 __be32 daddr, saddr; 339 __be32 daddr, saddr;
340 u32 mark = IP4_REPLY_MARK(net, skb->mark);
340 341
341 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb)) 342 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
342 return; 343 return;
@@ -349,6 +350,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
349 icmp_param->data.icmph.checksum = 0; 350 icmp_param->data.icmph.checksum = 0;
350 351
351 inet->tos = ip_hdr(skb)->tos; 352 inet->tos = ip_hdr(skb)->tos;
353 sk->sk_mark = mark;
352 daddr = ipc.addr = ip_hdr(skb)->saddr; 354 daddr = ipc.addr = ip_hdr(skb)->saddr;
353 saddr = fib_compute_spec_dst(skb); 355 saddr = fib_compute_spec_dst(skb);
354 ipc.opt = NULL; 356 ipc.opt = NULL;
@@ -364,6 +366,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
364 memset(&fl4, 0, sizeof(fl4)); 366 memset(&fl4, 0, sizeof(fl4));
365 fl4.daddr = daddr; 367 fl4.daddr = daddr;
366 fl4.saddr = saddr; 368 fl4.saddr = saddr;
369 fl4.flowi4_mark = mark;
367 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 370 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
368 fl4.flowi4_proto = IPPROTO_ICMP; 371 fl4.flowi4_proto = IPPROTO_ICMP;
369 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 372 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
@@ -382,7 +385,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
382 struct flowi4 *fl4, 385 struct flowi4 *fl4,
383 struct sk_buff *skb_in, 386 struct sk_buff *skb_in,
384 const struct iphdr *iph, 387 const struct iphdr *iph,
385 __be32 saddr, u8 tos, 388 __be32 saddr, u8 tos, u32 mark,
386 int type, int code, 389 int type, int code,
387 struct icmp_bxm *param) 390 struct icmp_bxm *param)
388{ 391{
@@ -394,6 +397,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
394 fl4->daddr = (param->replyopts.opt.opt.srr ? 397 fl4->daddr = (param->replyopts.opt.opt.srr ?
395 param->replyopts.opt.opt.faddr : iph->saddr); 398 param->replyopts.opt.opt.faddr : iph->saddr);
396 fl4->saddr = saddr; 399 fl4->saddr = saddr;
400 fl4->flowi4_mark = mark;
397 fl4->flowi4_tos = RT_TOS(tos); 401 fl4->flowi4_tos = RT_TOS(tos);
398 fl4->flowi4_proto = IPPROTO_ICMP; 402 fl4->flowi4_proto = IPPROTO_ICMP;
399 fl4->fl4_icmp_type = type; 403 fl4->fl4_icmp_type = type;
@@ -491,6 +495,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
491 struct flowi4 fl4; 495 struct flowi4 fl4;
492 __be32 saddr; 496 __be32 saddr;
493 u8 tos; 497 u8 tos;
498 u32 mark;
494 struct net *net; 499 struct net *net;
495 struct sock *sk; 500 struct sock *sk;
496 501
@@ -592,6 +597,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
592 tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | 597 tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
593 IPTOS_PREC_INTERNETCONTROL) : 598 IPTOS_PREC_INTERNETCONTROL) :
594 iph->tos; 599 iph->tos;
600 mark = IP4_REPLY_MARK(net, skb_in->mark);
595 601
596 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in)) 602 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in))
597 goto out_unlock; 603 goto out_unlock;
@@ -608,13 +614,14 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
608 icmp_param->skb = skb_in; 614 icmp_param->skb = skb_in;
609 icmp_param->offset = skb_network_offset(skb_in); 615 icmp_param->offset = skb_network_offset(skb_in);
610 inet_sk(sk)->tos = tos; 616 inet_sk(sk)->tos = tos;
617 sk->sk_mark = mark;
611 ipc.addr = iph->saddr; 618 ipc.addr = iph->saddr;
612 ipc.opt = &icmp_param->replyopts.opt; 619 ipc.opt = &icmp_param->replyopts.opt;
613 ipc.tx_flags = 0; 620 ipc.tx_flags = 0;
614 ipc.ttl = 0; 621 ipc.ttl = 0;
615 ipc.tos = -1; 622 ipc.tos = -1;
616 623
617 rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, 624 rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
618 type, code, icmp_param); 625 type, code, icmp_param);
619 if (IS_ERR(rt)) 626 if (IS_ERR(rt))
620 goto out_unlock; 627 goto out_unlock;
@@ -908,16 +915,8 @@ int icmp_rcv(struct sk_buff *skb)
908 915
909 ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS); 916 ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS);
910 917
911 switch (skb->ip_summed) { 918 if (skb_checksum_simple_validate(skb))
912 case CHECKSUM_COMPLETE: 919 goto csum_error;
913 if (!csum_fold(skb->csum))
914 break;
915 /* fall through */
916 case CHECKSUM_NONE:
917 skb->csum = 0;
918 if (__skb_checksum_complete(skb))
919 goto csum_error;
920 }
921 920
922 if (!pskb_pull(skb, sizeof(*icmph))) 921 if (!pskb_pull(skb, sizeof(*icmph)))
923 goto error; 922 goto error;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 97e4d1655d26..6748d420f714 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -369,7 +369,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
369 pip->saddr = fl4.saddr; 369 pip->saddr = fl4.saddr;
370 pip->protocol = IPPROTO_IGMP; 370 pip->protocol = IPPROTO_IGMP;
371 pip->tot_len = 0; /* filled in later */ 371 pip->tot_len = 0; /* filled in later */
372 ip_select_ident(skb, &rt->dst, NULL); 372 ip_select_ident(skb, NULL);
373 ((u8 *)&pip[1])[0] = IPOPT_RA; 373 ((u8 *)&pip[1])[0] = IPOPT_RA;
374 ((u8 *)&pip[1])[1] = 4; 374 ((u8 *)&pip[1])[1] = 4;
375 ((u8 *)&pip[1])[2] = 0; 375 ((u8 *)&pip[1])[2] = 0;
@@ -714,7 +714,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
714 iph->daddr = dst; 714 iph->daddr = dst;
715 iph->saddr = fl4.saddr; 715 iph->saddr = fl4.saddr;
716 iph->protocol = IPPROTO_IGMP; 716 iph->protocol = IPPROTO_IGMP;
717 ip_select_ident(skb, &rt->dst, NULL); 717 ip_select_ident(skb, NULL);
718 ((u8 *)&iph[1])[0] = IPOPT_RA; 718 ((u8 *)&iph[1])[0] = IPOPT_RA;
719 ((u8 *)&iph[1])[1] = 4; 719 ((u8 *)&iph[1])[1] = 4;
720 ((u8 *)&iph[1])[2] = 0; 720 ((u8 *)&iph[1])[2] = 0;
@@ -988,16 +988,8 @@ int igmp_rcv(struct sk_buff *skb)
988 if (!pskb_may_pull(skb, sizeof(struct igmphdr))) 988 if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
989 goto drop; 989 goto drop;
990 990
991 switch (skb->ip_summed) { 991 if (skb_checksum_simple_validate(skb))
992 case CHECKSUM_COMPLETE: 992 goto drop;
993 if (!csum_fold(skb->csum))
994 break;
995 /* fall through */
996 case CHECKSUM_NONE:
997 skb->csum = 0;
998 if (__skb_checksum_complete(skb))
999 goto drop;
1000 }
1001 993
1002 ih = igmp_hdr(skb); 994 ih = igmp_hdr(skb);
1003 switch (ih->type) { 995 switch (ih->type) {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index a56b8e6e866a..14d02ea905b6 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -29,9 +29,6 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
29EXPORT_SYMBOL(inet_csk_timer_bug_msg); 29EXPORT_SYMBOL(inet_csk_timer_bug_msg);
30#endif 30#endif
31 31
32unsigned long *sysctl_local_reserved_ports;
33EXPORT_SYMBOL(sysctl_local_reserved_ports);
34
35void inet_get_local_port_range(struct net *net, int *low, int *high) 32void inet_get_local_port_range(struct net *net, int *low, int *high)
36{ 33{
37 unsigned int seq; 34 unsigned int seq;
@@ -113,7 +110,7 @@ again:
113 110
114 smallest_size = -1; 111 smallest_size = -1;
115 do { 112 do {
116 if (inet_is_reserved_local_port(rover)) 113 if (inet_is_local_reserved_port(net, rover))
117 goto next_nolock; 114 goto next_nolock;
118 head = &hashinfo->bhash[inet_bhashfn(net, rover, 115 head = &hashinfo->bhash[inet_bhashfn(net, rover,
119 hashinfo->bhash_size)]; 116 hashinfo->bhash_size)];
@@ -408,7 +405,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
408 struct net *net = sock_net(sk); 405 struct net *net = sock_net(sk);
409 int flags = inet_sk_flowi_flags(sk); 406 int flags = inet_sk_flowi_flags(sk);
410 407
411 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 408 flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark,
412 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 409 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
413 sk->sk_protocol, 410 sk->sk_protocol,
414 flags, 411 flags,
@@ -445,7 +442,7 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
445 442
446 rcu_read_lock(); 443 rcu_read_lock();
447 opt = rcu_dereference(newinet->inet_opt); 444 opt = rcu_dereference(newinet->inet_opt);
448 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 445 flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark,
449 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 446 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
450 sk->sk_protocol, inet_sk_flowi_flags(sk), 447 sk->sk_protocol, inet_sk_flowi_flags(sk),
451 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 448 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
@@ -680,6 +677,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
680 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num); 677 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
681 newsk->sk_write_space = sk_stream_write_space; 678 newsk->sk_write_space = sk_stream_write_space;
682 679
680 newsk->sk_mark = inet_rsk(req)->ir_mark;
681
683 newicsk->icsk_retransmits = 0; 682 newicsk->icsk_retransmits = 0;
684 newicsk->icsk_backoff = 0; 683 newicsk->icsk_backoff = 0;
685 newicsk->icsk_probes_out = 0; 684 newicsk->icsk_probes_out = 0;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 8b9cf279450d..43116e8c8e13 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -274,7 +274,7 @@ struct sock *__inet_lookup_established(struct net *net,
274 const __be32 daddr, const u16 hnum, 274 const __be32 daddr, const u16 hnum,
275 const int dif) 275 const int dif)
276{ 276{
277 INET_ADDR_COOKIE(acookie, saddr, daddr) 277 INET_ADDR_COOKIE(acookie, saddr, daddr);
278 const __portpair ports = INET_COMBINED_PORTS(sport, hnum); 278 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
279 struct sock *sk; 279 struct sock *sk;
280 const struct hlist_nulls_node *node; 280 const struct hlist_nulls_node *node;
@@ -327,7 +327,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
327 __be32 daddr = inet->inet_rcv_saddr; 327 __be32 daddr = inet->inet_rcv_saddr;
328 __be32 saddr = inet->inet_daddr; 328 __be32 saddr = inet->inet_daddr;
329 int dif = sk->sk_bound_dev_if; 329 int dif = sk->sk_bound_dev_if;
330 INET_ADDR_COOKIE(acookie, saddr, daddr) 330 INET_ADDR_COOKIE(acookie, saddr, daddr);
331 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); 331 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
332 struct net *net = sock_net(sk); 332 struct net *net = sock_net(sk);
333 unsigned int hash = inet_ehashfn(net, daddr, lport, 333 unsigned int hash = inet_ehashfn(net, daddr, lport,
@@ -500,7 +500,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
500 local_bh_disable(); 500 local_bh_disable();
501 for (i = 1; i <= remaining; i++) { 501 for (i = 1; i <= remaining; i++) {
502 port = low + (i + offset) % remaining; 502 port = low + (i + offset) % remaining;
503 if (inet_is_reserved_local_port(port)) 503 if (inet_is_local_reserved_port(net, port))
504 continue; 504 continue;
505 head = &hinfo->bhash[inet_bhashfn(net, port, 505 head = &hinfo->bhash[inet_bhashfn(net, port,
506 hinfo->bhash_size)]; 506 hinfo->bhash_size)];
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 56cd458a1b8c..bd5f5928167d 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -26,20 +26,7 @@
26 * Theory of operations. 26 * Theory of operations.
27 * We keep one entry for each peer IP address. The nodes contains long-living 27 * We keep one entry for each peer IP address. The nodes contains long-living
28 * information about the peer which doesn't depend on routes. 28 * information about the peer which doesn't depend on routes.
29 * At this moment this information consists only of ID field for the next
30 * outgoing IP packet. This field is incremented with each packet as encoded
31 * in inet_getid() function (include/net/inetpeer.h).
32 * At the moment of writing this notes identifier of IP packets is generated
33 * to be unpredictable using this code only for packets subjected
34 * (actually or potentially) to defragmentation. I.e. DF packets less than
35 * PMTU in size when local fragmentation is disabled use a constant ID and do
36 * not use this code (see ip_select_ident() in include/net/ip.h).
37 * 29 *
38 * Route cache entries hold references to our nodes.
39 * New cache entries get references via lookup by destination IP address in
40 * the avl tree. The reference is grabbed only when it's needed i.e. only
41 * when we try to output IP packet which needs an unpredictable ID (see
42 * __ip_select_ident() in net/ipv4/route.c).
43 * Nodes are removed only when reference counter goes to 0. 30 * Nodes are removed only when reference counter goes to 0.
44 * When it's happened the node may be removed when a sufficient amount of 31 * When it's happened the node may be removed when a sufficient amount of
45 * time has been passed since its last use. The less-recently-used entry can 32 * time has been passed since its last use. The less-recently-used entry can
@@ -62,7 +49,6 @@
62 * refcnt: atomically against modifications on other CPU; 49 * refcnt: atomically against modifications on other CPU;
63 * usually under some other lock to prevent node disappearing 50 * usually under some other lock to prevent node disappearing
64 * daddr: unchangeable 51 * daddr: unchangeable
65 * ip_id_count: atomic value (no lock needed)
66 */ 52 */
67 53
68static struct kmem_cache *peer_cachep __read_mostly; 54static struct kmem_cache *peer_cachep __read_mostly;
@@ -120,7 +106,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
120static void inetpeer_gc_worker(struct work_struct *work) 106static void inetpeer_gc_worker(struct work_struct *work)
121{ 107{
122 struct inet_peer *p, *n, *c; 108 struct inet_peer *p, *n, *c;
123 LIST_HEAD(list); 109 struct list_head list;
124 110
125 spin_lock_bh(&gc_lock); 111 spin_lock_bh(&gc_lock);
126 list_replace_init(&gc_list, &list); 112 list_replace_init(&gc_list, &list);
@@ -497,10 +483,6 @@ relookup:
497 p->daddr = *daddr; 483 p->daddr = *daddr;
498 atomic_set(&p->refcnt, 1); 484 atomic_set(&p->refcnt, 1);
499 atomic_set(&p->rid, 0); 485 atomic_set(&p->rid, 0);
500 atomic_set(&p->ip_id_count,
501 (daddr->family == AF_INET) ?
502 secure_ip_id(daddr->addr.a4) :
503 secure_ipv6_id(daddr->addr.a6));
504 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; 486 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
505 p->rate_tokens = 0; 487 p->rate_tokens = 0;
506 /* 60*HZ is arbitrary, but chosen enough high so that the first 488 /* 60*HZ is arbitrary, but chosen enough high so that the first
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 6f111e48e11c..3a83ce5efa80 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -42,7 +42,7 @@
42static bool ip_may_fragment(const struct sk_buff *skb) 42static bool ip_may_fragment(const struct sk_buff *skb)
43{ 43{
44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || 44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
45 skb->local_df; 45 skb->ignore_df;
46} 46}
47 47
48static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) 48static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 94213c891565..9b842544aea3 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -410,7 +410,7 @@ static int ipgre_open(struct net_device *dev)
410 struct flowi4 fl4; 410 struct flowi4 fl4;
411 struct rtable *rt; 411 struct rtable *rt;
412 412
413 rt = ip_route_output_gre(dev_net(dev), &fl4, 413 rt = ip_route_output_gre(t->net, &fl4,
414 t->parms.iph.daddr, 414 t->parms.iph.daddr,
415 t->parms.iph.saddr, 415 t->parms.iph.saddr,
416 t->parms.o_key, 416 t->parms.o_key,
@@ -434,7 +434,7 @@ static int ipgre_close(struct net_device *dev)
434 434
435 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) { 435 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
436 struct in_device *in_dev; 436 struct in_device *in_dev;
437 in_dev = inetdev_by_index(dev_net(dev), t->mlink); 437 in_dev = inetdev_by_index(t->net, t->mlink);
438 if (in_dev) 438 if (in_dev)
439 ip_mc_dec_group(in_dev, t->parms.iph.daddr); 439 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
440 } 440 }
@@ -478,7 +478,7 @@ static void __gre_tunnel_init(struct net_device *dev)
478 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4; 478 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
479 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4; 479 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
480 480
481 dev->features |= NETIF_F_NETNS_LOCAL | GRE_FEATURES; 481 dev->features |= GRE_FEATURES;
482 dev->hw_features |= GRE_FEATURES; 482 dev->hw_features |= GRE_FEATURES;
483 483
484 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) { 484 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
@@ -649,6 +649,7 @@ static void ipgre_tap_setup(struct net_device *dev)
649{ 649{
650 ether_setup(dev); 650 ether_setup(dev);
651 dev->netdev_ops = &gre_tap_netdev_ops; 651 dev->netdev_ops = &gre_tap_netdev_ops;
652 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
652 ip_tunnel_setup(dev, gre_tap_net_id); 653 ip_tunnel_setup(dev, gre_tap_net_id);
653} 654}
654 655
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index f4ab72e19af9..5e7aecea05cd 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -364,7 +364,7 @@ int ip_options_compile(struct net *net,
364 } 364 }
365 if (optptr[2] <= optlen) { 365 if (optptr[2] <= optlen) {
366 unsigned char *timeptr = NULL; 366 unsigned char *timeptr = NULL;
367 if (optptr[2]+3 > optptr[1]) { 367 if (optptr[2]+3 > optlen) {
368 pp_ptr = optptr + 2; 368 pp_ptr = optptr + 2;
369 goto error; 369 goto error;
370 } 370 }
@@ -376,7 +376,7 @@ int ip_options_compile(struct net *net,
376 optptr[2] += 4; 376 optptr[2] += 4;
377 break; 377 break;
378 case IPOPT_TS_TSANDADDR: 378 case IPOPT_TS_TSANDADDR:
379 if (optptr[2]+7 > optptr[1]) { 379 if (optptr[2]+7 > optlen) {
380 pp_ptr = optptr + 2; 380 pp_ptr = optptr + 2;
381 goto error; 381 goto error;
382 } 382 }
@@ -390,7 +390,7 @@ int ip_options_compile(struct net *net,
390 optptr[2] += 8; 390 optptr[2] += 8;
391 break; 391 break;
392 case IPOPT_TS_PRESPEC: 392 case IPOPT_TS_PRESPEC:
393 if (optptr[2]+7 > optptr[1]) { 393 if (optptr[2]+7 > optlen) {
394 pp_ptr = optptr + 2; 394 pp_ptr = optptr + 2;
395 goto error; 395 goto error;
396 } 396 }
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index a52f50187b54..8d3b6b0e9857 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
148 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); 148 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
149 iph->saddr = saddr; 149 iph->saddr = saddr;
150 iph->protocol = sk->sk_protocol; 150 iph->protocol = sk->sk_protocol;
151 ip_select_ident(skb, &rt->dst, sk); 151 ip_select_ident(skb, sk);
152 152
153 if (opt && opt->opt.optlen) { 153 if (opt && opt->opt.optlen) {
154 iph->ihl += opt->opt.optlen>>2; 154 iph->ihl += opt->opt.optlen>>2;
@@ -415,7 +415,7 @@ packet_routed:
415 skb_reset_network_header(skb); 415 skb_reset_network_header(skb);
416 iph = ip_hdr(skb); 416 iph = ip_hdr(skb);
417 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); 417 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
418 if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df) 418 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
419 iph->frag_off = htons(IP_DF); 419 iph->frag_off = htons(IP_DF);
420 else 420 else
421 iph->frag_off = 0; 421 iph->frag_off = 0;
@@ -430,8 +430,7 @@ packet_routed:
430 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); 430 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
431 } 431 }
432 432
433 ip_select_ident_more(skb, &rt->dst, sk, 433 ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1);
434 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
435 434
436 /* TODO : should we use skb->sk here instead of sk ? */ 435 /* TODO : should we use skb->sk here instead of sk ? */
437 skb->priority = sk->sk_priority; 436 skb->priority = sk->sk_priority;
@@ -501,7 +500,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
501 iph = ip_hdr(skb); 500 iph = ip_hdr(skb);
502 501
503 mtu = ip_skb_dst_mtu(skb); 502 mtu = ip_skb_dst_mtu(skb);
504 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->local_df) || 503 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
505 (IPCB(skb)->frag_max_size && 504 (IPCB(skb)->frag_max_size &&
506 IPCB(skb)->frag_max_size > mtu))) { 505 IPCB(skb)->frag_max_size > mtu))) {
507 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); 506 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
@@ -866,7 +865,7 @@ static int __ip_append_data(struct sock *sk,
866 865
867 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 866 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
868 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 867 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
869 maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu; 868 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
870 869
871 if (cork->length + length > maxnonfragsize - fragheaderlen) { 870 if (cork->length + length > maxnonfragsize - fragheaderlen) {
872 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, 871 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1189,7 +1188,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1189 1188
1190 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 1189 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1191 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 1190 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1192 maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu; 1191 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
1193 1192
1194 if (cork->length + size > maxnonfragsize - fragheaderlen) { 1193 if (cork->length + size > maxnonfragsize - fragheaderlen) {
1195 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, 1194 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1350,10 +1349,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
1350 * to fragment the frame generated here. No matter, what transforms 1349 * to fragment the frame generated here. No matter, what transforms
1351 * how transforms change size of the packet, it will come out. 1350 * how transforms change size of the packet, it will come out.
1352 */ 1351 */
1353 skb->local_df = ip_sk_local_df(sk); 1352 skb->ignore_df = ip_sk_ignore_df(sk);
1354 1353
1355 /* DF bit is set when we want to see DF on outgoing frames. 1354 /* DF bit is set when we want to see DF on outgoing frames.
1356 * If local_df is set too, we still allow to fragment this frame 1355 * If ignore_df is set too, we still allow to fragment this frame
1357 * locally. */ 1356 * locally. */
1358 if (inet->pmtudisc == IP_PMTUDISC_DO || 1357 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1359 inet->pmtudisc == IP_PMTUDISC_PROBE || 1358 inet->pmtudisc == IP_PMTUDISC_PROBE ||
@@ -1379,7 +1378,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
1379 iph->ttl = ttl; 1378 iph->ttl = ttl;
1380 iph->protocol = sk->sk_protocol; 1379 iph->protocol = sk->sk_protocol;
1381 ip_copy_addrs(iph, fl4); 1380 ip_copy_addrs(iph, fl4);
1382 ip_select_ident(skb, &rt->dst, sk); 1381 ip_select_ident(skb, sk);
1383 1382
1384 if (opt) { 1383 if (opt) {
1385 iph->ihl += opt->optlen>>2; 1384 iph->ihl += opt->optlen>>2;
@@ -1546,7 +1545,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
1546 daddr = replyopts.opt.opt.faddr; 1545 daddr = replyopts.opt.opt.faddr;
1547 } 1546 }
1548 1547
1549 flowi4_init_output(&fl4, arg->bound_dev_if, 0, 1548 flowi4_init_output(&fl4, arg->bound_dev_if,
1549 IP4_REPLY_MARK(net, skb->mark),
1550 RT_TOS(arg->tos), 1550 RT_TOS(arg->tos),
1551 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol, 1551 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1552 ip_reply_arg_flowi_flags(arg), 1552 ip_reply_arg_flowi_flags(arg),
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 2acc2337d38b..097b3e7c1e8f 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -268,6 +268,7 @@ static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
268 __be32 remote = parms->iph.daddr; 268 __be32 remote = parms->iph.daddr;
269 __be32 local = parms->iph.saddr; 269 __be32 local = parms->iph.saddr;
270 __be32 key = parms->i_key; 270 __be32 key = parms->i_key;
271 __be16 flags = parms->i_flags;
271 int link = parms->link; 272 int link = parms->link;
272 struct ip_tunnel *t = NULL; 273 struct ip_tunnel *t = NULL;
273 struct hlist_head *head = ip_bucket(itn, parms); 274 struct hlist_head *head = ip_bucket(itn, parms);
@@ -275,9 +276,9 @@ static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
275 hlist_for_each_entry_rcu(t, head, hash_node) { 276 hlist_for_each_entry_rcu(t, head, hash_node) {
276 if (local == t->parms.iph.saddr && 277 if (local == t->parms.iph.saddr &&
277 remote == t->parms.iph.daddr && 278 remote == t->parms.iph.daddr &&
278 key == t->parms.i_key &&
279 link == t->parms.link && 279 link == t->parms.link &&
280 type == t->dev->type) 280 type == t->dev->type &&
281 ip_tunnel_key_match(&t->parms, flags, key))
281 break; 282 break;
282 } 283 }
283 return t; 284 return t;
@@ -395,11 +396,10 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
395 struct ip_tunnel_net *itn, 396 struct ip_tunnel_net *itn,
396 struct ip_tunnel_parm *parms) 397 struct ip_tunnel_parm *parms)
397{ 398{
398 struct ip_tunnel *nt, *fbt; 399 struct ip_tunnel *nt;
399 struct net_device *dev; 400 struct net_device *dev;
400 401
401 BUG_ON(!itn->fb_tunnel_dev); 402 BUG_ON(!itn->fb_tunnel_dev);
402 fbt = netdev_priv(itn->fb_tunnel_dev);
403 dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms); 403 dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
404 if (IS_ERR(dev)) 404 if (IS_ERR(dev))
405 return ERR_CAST(dev); 405 return ERR_CAST(dev);
@@ -668,6 +668,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
668 dev->needed_headroom = max_headroom; 668 dev->needed_headroom = max_headroom;
669 669
670 if (skb_cow_head(skb, dev->needed_headroom)) { 670 if (skb_cow_head(skb, dev->needed_headroom)) {
671 ip_rt_put(rt);
671 dev->stats.tx_dropped++; 672 dev->stats.tx_dropped++;
672 kfree_skb(skb); 673 kfree_skb(skb);
673 return; 674 return;
@@ -747,19 +748,19 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
747 goto done; 748 goto done;
748 if (p->iph.ttl) 749 if (p->iph.ttl)
749 p->iph.frag_off |= htons(IP_DF); 750 p->iph.frag_off |= htons(IP_DF);
750 if (!(p->i_flags&TUNNEL_KEY)) 751 if (!(p->i_flags & VTI_ISVTI)) {
751 p->i_key = 0; 752 if (!(p->i_flags & TUNNEL_KEY))
752 if (!(p->o_flags&TUNNEL_KEY)) 753 p->i_key = 0;
753 p->o_key = 0; 754 if (!(p->o_flags & TUNNEL_KEY))
755 p->o_key = 0;
756 }
754 757
755 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); 758 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
756 759
757 if (!t && (cmd == SIOCADDTUNNEL)) { 760 if (!t && (cmd == SIOCADDTUNNEL)) {
758 t = ip_tunnel_create(net, itn, p); 761 t = ip_tunnel_create(net, itn, p);
759 if (IS_ERR(t)) { 762 err = PTR_ERR_OR_ZERO(t);
760 err = PTR_ERR(t); 763 break;
761 break;
762 }
763 } 764 }
764 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 765 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
765 if (t != NULL) { 766 if (t != NULL) {
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index bcf206c79005..f4c987bb7e94 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -74,7 +74,7 @@ int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
74 iph->daddr = dst; 74 iph->daddr = dst;
75 iph->saddr = src; 75 iph->saddr = src;
76 iph->ttl = ttl; 76 iph->ttl = ttl;
77 __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); 77 __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);
78 78
79 err = ip_local_out_sk(sk, skb); 79 err = ip_local_out_sk(sk, skb);
80 if (unlikely(net_xmit_eval(err))) 80 if (unlikely(net_xmit_eval(err)))
@@ -135,6 +135,14 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
135 return skb; 135 return skb;
136 } 136 }
137 137
138 /* If packet is not gso and we are resolving any partial checksum,
139 * clear encapsulation flag. This allows setting CHECKSUM_PARTIAL
140 * on the outer header without confusing devices that implement
141 * NETIF_F_IP_CSUM with encapsulation.
142 */
143 if (csum_help)
144 skb->encapsulation = 0;
145
138 if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) { 146 if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
139 err = skb_checksum_help(skb); 147 err = skb_checksum_help(skb);
140 if (unlikely(err)) 148 if (unlikely(err))
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 13ef00f1e17b..b8960f3527f3 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -313,7 +313,13 @@ vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
313 return -EINVAL; 313 return -EINVAL;
314 } 314 }
315 315
316 p.i_flags |= VTI_ISVTI; 316 if (!(p.i_flags & GRE_KEY))
317 p.i_key = 0;
318 if (!(p.o_flags & GRE_KEY))
319 p.o_key = 0;
320
321 p.i_flags = VTI_ISVTI;
322
317 err = ip_tunnel_ioctl(dev, &p, cmd); 323 err = ip_tunnel_ioctl(dev, &p, cmd);
318 if (err) 324 if (err)
319 return err; 325 return err;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 812b18351462..62eaa005e146 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -149,13 +149,13 @@ static int ipip_err(struct sk_buff *skb, u32 info)
149 149
150 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 150 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
151 ipv4_update_pmtu(skb, dev_net(skb->dev), info, 151 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
152 t->dev->ifindex, 0, IPPROTO_IPIP, 0); 152 t->parms.link, 0, IPPROTO_IPIP, 0);
153 err = 0; 153 err = 0;
154 goto out; 154 goto out;
155 } 155 }
156 156
157 if (type == ICMP_REDIRECT) { 157 if (type == ICMP_REDIRECT) {
158 ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0, 158 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
159 IPPROTO_IPIP, 0); 159 IPPROTO_IPIP, 0);
160 err = 0; 160 err = 0;
161 goto out; 161 goto out;
@@ -486,4 +486,5 @@ static void __exit ipip_fini(void)
486module_init(ipip_init); 486module_init(ipip_init);
487module_exit(ipip_fini); 487module_exit(ipip_fini);
488MODULE_LICENSE("GPL"); 488MODULE_LICENSE("GPL");
489MODULE_ALIAS_RTNL_LINK("ipip");
489MODULE_ALIAS_NETDEV("tunl0"); 490MODULE_ALIAS_NETDEV("tunl0");
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index d84dc8d4c916..65bcaa789043 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -484,7 +484,7 @@ static void reg_vif_setup(struct net_device *dev)
484 dev->type = ARPHRD_PIMREG; 484 dev->type = ARPHRD_PIMREG;
485 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; 485 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
486 dev->flags = IFF_NOARP; 486 dev->flags = IFF_NOARP;
487 dev->netdev_ops = &reg_vif_netdev_ops, 487 dev->netdev_ops = &reg_vif_netdev_ops;
488 dev->destructor = free_netdev; 488 dev->destructor = free_netdev;
489 dev->features |= NETIF_F_NETNS_LOCAL; 489 dev->features |= NETIF_F_NETNS_LOCAL;
490} 490}
@@ -1663,7 +1663,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1663 iph->protocol = IPPROTO_IPIP; 1663 iph->protocol = IPPROTO_IPIP;
1664 iph->ihl = 5; 1664 iph->ihl = 5;
1665 iph->tot_len = htons(skb->len); 1665 iph->tot_len = htons(skb->len);
1666 ip_select_ident(skb, skb_dst(skb), NULL); 1666 ip_select_ident(skb, NULL);
1667 ip_send_check(iph); 1667 ip_send_check(iph);
1668 1668
1669 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1669 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index ee2886126e3d..f1787c04a4dd 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -91,17 +91,9 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops,
91 if (nf_ct_is_untracked(ct)) 91 if (nf_ct_is_untracked(ct))
92 return NF_ACCEPT; 92 return NF_ACCEPT;
93 93
94 nat = nfct_nat(ct); 94 nat = nf_ct_nat_ext_add(ct);
95 if (!nat) { 95 if (nat == NULL)
96 /* NAT module was loaded late. */ 96 return NF_ACCEPT;
97 if (nf_ct_is_confirmed(ct))
98 return NF_ACCEPT;
99 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
100 if (nat == NULL) {
101 pr_debug("failed to add NAT extension\n");
102 return NF_ACCEPT;
103 }
104 }
105 97
106 switch (ctinfo) { 98 switch (ctinfo) {
107 case IP_CT_RELATED: 99 case IP_CT_RELATED:
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index f40f321b41fc..b8f6381c7d0b 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -34,7 +34,7 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
34 34
35 if (!err) { 35 if (!err) {
36 ip_send_check(ip_hdr(skb)); 36 ip_send_check(ip_hdr(skb));
37 skb->local_df = 1; 37 skb->ignore_df = 1;
38 } 38 }
39 39
40 return err; 40 return err;
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
index b5b256d45e67..3964157d826c 100644
--- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
@@ -48,15 +48,9 @@ static unsigned int nf_nat_fn(const struct nf_hook_ops *ops,
48 48
49 NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))); 49 NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)));
50 50
51 nat = nfct_nat(ct); 51 nat = nf_ct_nat_ext_add(ct);
52 if (nat == NULL) { 52 if (nat == NULL)
53 /* Conntrack module was loaded late, can't add extension. */ 53 return NF_ACCEPT;
54 if (nf_ct_is_confirmed(ct))
55 return NF_ACCEPT;
56 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
57 if (nat == NULL)
58 return NF_ACCEPT;
59 }
60 54
61 switch (ctinfo) { 55 switch (ctinfo) {
62 case IP_CT_RELATED: 56 case IP_CT_RELATED:
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index ad737fad6d8b..ae0af9386f7c 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -345,15 +345,15 @@ static void icmp_put(struct seq_file *seq)
345 for (i = 0; icmpmibmap[i].name != NULL; i++) 345 for (i = 0; icmpmibmap[i].name != NULL; i++)
346 seq_printf(seq, " Out%s", icmpmibmap[i].name); 346 seq_printf(seq, " Out%s", icmpmibmap[i].name);
347 seq_printf(seq, "\nIcmp: %lu %lu %lu", 347 seq_printf(seq, "\nIcmp: %lu %lu %lu",
348 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INMSGS), 348 snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INMSGS),
349 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS), 349 snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INERRORS),
350 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS)); 350 snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS));
351 for (i = 0; icmpmibmap[i].name != NULL; i++) 351 for (i = 0; icmpmibmap[i].name != NULL; i++)
352 seq_printf(seq, " %lu", 352 seq_printf(seq, " %lu",
353 atomic_long_read(ptr + icmpmibmap[i].index)); 353 atomic_long_read(ptr + icmpmibmap[i].index));
354 seq_printf(seq, " %lu %lu", 354 seq_printf(seq, " %lu %lu",
355 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS), 355 snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
356 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS)); 356 snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
357 for (i = 0; icmpmibmap[i].name != NULL; i++) 357 for (i = 0; icmpmibmap[i].name != NULL; i++)
358 seq_printf(seq, " %lu", 358 seq_printf(seq, " %lu",
359 atomic_long_read(ptr + (icmpmibmap[i].index | 0x100))); 359 atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
@@ -379,7 +379,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
379 BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0); 379 BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
380 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) 380 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
381 seq_printf(seq, " %llu", 381 seq_printf(seq, " %llu",
382 snmp_fold_field64((void __percpu **)net->mib.ip_statistics, 382 snmp_fold_field64(net->mib.ip_statistics,
383 snmp4_ipstats_list[i].entry, 383 snmp4_ipstats_list[i].entry,
384 offsetof(struct ipstats_mib, syncp))); 384 offsetof(struct ipstats_mib, syncp)));
385 385
@@ -395,11 +395,11 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
395 /* MaxConn field is signed, RFC 2012 */ 395 /* MaxConn field is signed, RFC 2012 */
396 if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN) 396 if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
397 seq_printf(seq, " %ld", 397 seq_printf(seq, " %ld",
398 snmp_fold_field((void __percpu **)net->mib.tcp_statistics, 398 snmp_fold_field(net->mib.tcp_statistics,
399 snmp4_tcp_list[i].entry)); 399 snmp4_tcp_list[i].entry));
400 else 400 else
401 seq_printf(seq, " %lu", 401 seq_printf(seq, " %lu",
402 snmp_fold_field((void __percpu **)net->mib.tcp_statistics, 402 snmp_fold_field(net->mib.tcp_statistics,
403 snmp4_tcp_list[i].entry)); 403 snmp4_tcp_list[i].entry));
404 } 404 }
405 405
@@ -410,7 +410,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
410 seq_puts(seq, "\nUdp:"); 410 seq_puts(seq, "\nUdp:");
411 for (i = 0; snmp4_udp_list[i].name != NULL; i++) 411 for (i = 0; snmp4_udp_list[i].name != NULL; i++)
412 seq_printf(seq, " %lu", 412 seq_printf(seq, " %lu",
413 snmp_fold_field((void __percpu **)net->mib.udp_statistics, 413 snmp_fold_field(net->mib.udp_statistics,
414 snmp4_udp_list[i].entry)); 414 snmp4_udp_list[i].entry));
415 415
416 /* the UDP and UDP-Lite MIBs are the same */ 416 /* the UDP and UDP-Lite MIBs are the same */
@@ -421,7 +421,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
421 seq_puts(seq, "\nUdpLite:"); 421 seq_puts(seq, "\nUdpLite:");
422 for (i = 0; snmp4_udp_list[i].name != NULL; i++) 422 for (i = 0; snmp4_udp_list[i].name != NULL; i++)
423 seq_printf(seq, " %lu", 423 seq_printf(seq, " %lu",
424 snmp_fold_field((void __percpu **)net->mib.udplite_statistics, 424 snmp_fold_field(net->mib.udplite_statistics,
425 snmp4_udp_list[i].entry)); 425 snmp4_udp_list[i].entry));
426 426
427 seq_putc(seq, '\n'); 427 seq_putc(seq, '\n');
@@ -458,7 +458,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
458 seq_puts(seq, "\nTcpExt:"); 458 seq_puts(seq, "\nTcpExt:");
459 for (i = 0; snmp4_net_list[i].name != NULL; i++) 459 for (i = 0; snmp4_net_list[i].name != NULL; i++)
460 seq_printf(seq, " %lu", 460 seq_printf(seq, " %lu",
461 snmp_fold_field((void __percpu **)net->mib.net_statistics, 461 snmp_fold_field(net->mib.net_statistics,
462 snmp4_net_list[i].entry)); 462 snmp4_net_list[i].entry));
463 463
464 seq_puts(seq, "\nIpExt:"); 464 seq_puts(seq, "\nIpExt:");
@@ -468,7 +468,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
468 seq_puts(seq, "\nIpExt:"); 468 seq_puts(seq, "\nIpExt:");
469 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++) 469 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
470 seq_printf(seq, " %llu", 470 seq_printf(seq, " %llu",
471 snmp_fold_field64((void __percpu **)net->mib.ip_statistics, 471 snmp_fold_field64(net->mib.ip_statistics,
472 snmp4_ipextstats_list[i].entry, 472 snmp4_ipextstats_list[i].entry,
473 offsetof(struct ipstats_mib, syncp))); 473 offsetof(struct ipstats_mib, syncp)));
474 474
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a9dbe58bdfe7..2c65160565e1 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -389,7 +389,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
389 iph->check = 0; 389 iph->check = 0;
390 iph->tot_len = htons(length); 390 iph->tot_len = htons(length);
391 if (!iph->id) 391 if (!iph->id)
392 ip_select_ident(skb, &rt->dst, NULL); 392 ip_select_ident(skb, NULL);
393 393
394 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 394 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
395 } 395 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 5e676be3daeb..082239ffe34a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -89,6 +89,7 @@
89#include <linux/rcupdate.h> 89#include <linux/rcupdate.h>
90#include <linux/times.h> 90#include <linux/times.h>
91#include <linux/slab.h> 91#include <linux/slab.h>
92#include <linux/jhash.h>
92#include <net/dst.h> 93#include <net/dst.h>
93#include <net/net_namespace.h> 94#include <net/net_namespace.h>
94#include <net/protocol.h> 95#include <net/protocol.h>
@@ -456,39 +457,19 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
456 return neigh_create(&arp_tbl, pkey, dev); 457 return neigh_create(&arp_tbl, pkey, dev);
457} 458}
458 459
459/* 460atomic_t *ip_idents __read_mostly;
460 * Peer allocation may fail only in serious out-of-memory conditions. However 461EXPORT_SYMBOL(ip_idents);
461 * we still can generate some output.
462 * Random ID selection looks a bit dangerous because we have no chances to
463 * select ID being unique in a reasonable period of time.
464 * But broken packet identifier may be better than no packet at all.
465 */
466static void ip_select_fb_ident(struct iphdr *iph)
467{
468 static DEFINE_SPINLOCK(ip_fb_id_lock);
469 static u32 ip_fallback_id;
470 u32 salt;
471
472 spin_lock_bh(&ip_fb_id_lock);
473 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
474 iph->id = htons(salt & 0xFFFF);
475 ip_fallback_id = salt;
476 spin_unlock_bh(&ip_fb_id_lock);
477}
478 462
479void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) 463void __ip_select_ident(struct iphdr *iph, int segs)
480{ 464{
481 struct net *net = dev_net(dst->dev); 465 static u32 ip_idents_hashrnd __read_mostly;
482 struct inet_peer *peer; 466 u32 hash, id;
483 467
484 peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1); 468 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
485 if (peer) {
486 iph->id = htons(inet_getid(peer, more));
487 inet_putpeer(peer);
488 return;
489 }
490 469
491 ip_select_fb_ident(iph); 470 hash = jhash_1word((__force u32)iph->daddr, ip_idents_hashrnd);
471 id = ip_idents_reserve(hash, segs);
472 iph->id = htons(id);
492} 473}
493EXPORT_SYMBOL(__ip_select_ident); 474EXPORT_SYMBOL(__ip_select_ident);
494 475
@@ -993,6 +974,9 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
993 struct flowi4 fl4; 974 struct flowi4 fl4;
994 struct rtable *rt; 975 struct rtable *rt;
995 976
977 if (!mark)
978 mark = IP4_REPLY_MARK(net, skb->mark);
979
996 __build_flow_key(&fl4, NULL, iph, oif, 980 __build_flow_key(&fl4, NULL, iph, oif,
997 RT_TOS(iph->tos), protocol, mark, flow_flags); 981 RT_TOS(iph->tos), protocol, mark, flow_flags);
998 rt = __ip_route_output_key(net, &fl4); 982 rt = __ip_route_output_key(net, &fl4);
@@ -1010,6 +994,10 @@ static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1010 struct rtable *rt; 994 struct rtable *rt;
1011 995
1012 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); 996 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
997
998 if (!fl4.flowi4_mark)
999 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1000
1013 rt = __ip_route_output_key(sock_net(sk), &fl4); 1001 rt = __ip_route_output_key(sock_net(sk), &fl4);
1014 if (!IS_ERR(rt)) { 1002 if (!IS_ERR(rt)) {
1015 __ip_rt_update_pmtu(rt, &fl4, mtu); 1003 __ip_rt_update_pmtu(rt, &fl4, mtu);
@@ -2704,6 +2692,12 @@ int __init ip_rt_init(void)
2704{ 2692{
2705 int rc = 0; 2693 int rc = 0;
2706 2694
2695 ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
2696 if (!ip_idents)
2697 panic("IP: failed to allocate ip_idents\n");
2698
2699 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
2700
2707#ifdef CONFIG_IP_ROUTE_CLASSID 2701#ifdef CONFIG_IP_ROUTE_CLASSID
2708 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); 2702 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
2709 if (!ip_rt_acct) 2703 if (!ip_rt_acct)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index f2ed13c2125f..c86624b36a62 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -303,6 +303,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
303 ireq->ir_rmt_port = th->source; 303 ireq->ir_rmt_port = th->source;
304 ireq->ir_loc_addr = ip_hdr(skb)->daddr; 304 ireq->ir_loc_addr = ip_hdr(skb)->daddr;
305 ireq->ir_rmt_addr = ip_hdr(skb)->saddr; 305 ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
306 ireq->ir_mark = inet_request_mark(sk, skb);
306 ireq->ecn_ok = ecn_ok; 307 ireq->ecn_ok = ecn_ok;
307 ireq->snd_wscale = tcp_opt.snd_wscale; 308 ireq->snd_wscale = tcp_opt.snd_wscale;
308 ireq->sack_ok = tcp_opt.sack_ok; 309 ireq->sack_ok = tcp_opt.sack_ok;
@@ -339,7 +340,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
339 * hasn't changed since we received the original syn, but I see 340 * hasn't changed since we received the original syn, but I see
340 * no easy way to do this. 341 * no easy way to do this.
341 */ 342 */
342 flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark, 343 flowi4_init_output(&fl4, sk->sk_bound_dev_if, ireq->ir_mark,
343 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, 344 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
344 inet_sk_flowi_flags(sk), 345 inet_sk_flowi_flags(sk),
345 (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr, 346 (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr,
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 5cde8f263d40..79a007c52558 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -437,13 +437,6 @@ static struct ctl_table ipv4_table[] = {
437 .proc_handler = proc_dointvec 437 .proc_handler = proc_dointvec
438 }, 438 },
439 { 439 {
440 .procname = "ip_local_reserved_ports",
441 .data = NULL, /* initialized in sysctl_ipv4_init */
442 .maxlen = 65536,
443 .mode = 0644,
444 .proc_handler = proc_do_large_bitmap,
445 },
446 {
447 .procname = "igmp_max_memberships", 440 .procname = "igmp_max_memberships",
448 .data = &sysctl_igmp_max_memberships, 441 .data = &sysctl_igmp_max_memberships,
449 .maxlen = sizeof(int), 442 .maxlen = sizeof(int),
@@ -825,6 +818,13 @@ static struct ctl_table ipv4_net_table[] = {
825 .proc_handler = ipv4_local_port_range, 818 .proc_handler = ipv4_local_port_range,
826 }, 819 },
827 { 820 {
821 .procname = "ip_local_reserved_ports",
822 .data = &init_net.ipv4.sysctl_local_reserved_ports,
823 .maxlen = 65536,
824 .mode = 0644,
825 .proc_handler = proc_do_large_bitmap,
826 },
827 {
828 .procname = "ip_no_pmtu_disc", 828 .procname = "ip_no_pmtu_disc",
829 .data = &init_net.ipv4.sysctl_ip_no_pmtu_disc, 829 .data = &init_net.ipv4.sysctl_ip_no_pmtu_disc,
830 .maxlen = sizeof(int), 830 .maxlen = sizeof(int),
@@ -838,6 +838,20 @@ static struct ctl_table ipv4_net_table[] = {
838 .mode = 0644, 838 .mode = 0644,
839 .proc_handler = proc_dointvec, 839 .proc_handler = proc_dointvec,
840 }, 840 },
841 {
842 .procname = "fwmark_reflect",
843 .data = &init_net.ipv4.sysctl_fwmark_reflect,
844 .maxlen = sizeof(int),
845 .mode = 0644,
846 .proc_handler = proc_dointvec,
847 },
848 {
849 .procname = "tcp_fwmark_accept",
850 .data = &init_net.ipv4.sysctl_tcp_fwmark_accept,
851 .maxlen = sizeof(int),
852 .mode = 0644,
853 .proc_handler = proc_dointvec,
854 },
841 { } 855 { }
842}; 856};
843 857
@@ -862,8 +876,14 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
862 if (net->ipv4.ipv4_hdr == NULL) 876 if (net->ipv4.ipv4_hdr == NULL)
863 goto err_reg; 877 goto err_reg;
864 878
879 net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
880 if (!net->ipv4.sysctl_local_reserved_ports)
881 goto err_ports;
882
865 return 0; 883 return 0;
866 884
885err_ports:
886 unregister_net_sysctl_table(net->ipv4.ipv4_hdr);
867err_reg: 887err_reg:
868 if (!net_eq(net, &init_net)) 888 if (!net_eq(net, &init_net))
869 kfree(table); 889 kfree(table);
@@ -875,6 +895,7 @@ static __net_exit void ipv4_sysctl_exit_net(struct net *net)
875{ 895{
876 struct ctl_table *table; 896 struct ctl_table *table;
877 897
898 kfree(net->ipv4.sysctl_local_reserved_ports);
878 table = net->ipv4.ipv4_hdr->ctl_table_arg; 899 table = net->ipv4.ipv4_hdr->ctl_table_arg;
879 unregister_net_sysctl_table(net->ipv4.ipv4_hdr); 900 unregister_net_sysctl_table(net->ipv4.ipv4_hdr);
880 kfree(table); 901 kfree(table);
@@ -888,16 +909,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
888static __init int sysctl_ipv4_init(void) 909static __init int sysctl_ipv4_init(void)
889{ 910{
890 struct ctl_table_header *hdr; 911 struct ctl_table_header *hdr;
891 struct ctl_table *i;
892
893 for (i = ipv4_table; i->procname; i++) {
894 if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
895 i->data = sysctl_local_reserved_ports;
896 break;
897 }
898 }
899 if (!i->procname)
900 return -EINVAL;
901 912
902 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table); 913 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
903 if (hdr == NULL) 914 if (hdr == NULL)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4bd6d52eeffb..eb1dde37e678 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2916,6 +2916,14 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2916 case TCP_USER_TIMEOUT: 2916 case TCP_USER_TIMEOUT:
2917 val = jiffies_to_msecs(icsk->icsk_user_timeout); 2917 val = jiffies_to_msecs(icsk->icsk_user_timeout);
2918 break; 2918 break;
2919
2920 case TCP_FASTOPEN:
2921 if (icsk->icsk_accept_queue.fastopenq != NULL)
2922 val = icsk->icsk_accept_queue.fastopenq->max_qlen;
2923 else
2924 val = 0;
2925 break;
2926
2919 case TCP_TIMESTAMP: 2927 case TCP_TIMESTAMP:
2920 val = tcp_time_stamp + tp->tsoffset; 2928 val = tcp_time_stamp + tp->tsoffset;
2921 break; 2929 break;
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 821846fb0a7e..d5de69bc04f5 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -140,13 +140,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
140 ca->cnt = 1; 140 ca->cnt = 1;
141} 141}
142 142
143static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, 143static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
144 u32 in_flight)
145{ 144{
146 struct tcp_sock *tp = tcp_sk(sk); 145 struct tcp_sock *tp = tcp_sk(sk);
147 struct bictcp *ca = inet_csk_ca(sk); 146 struct bictcp *ca = inet_csk_ca(sk);
148 147
149 if (!tcp_is_cwnd_limited(sk, in_flight)) 148 if (!tcp_is_cwnd_limited(sk))
150 return; 149 return;
151 150
152 if (tp->snd_cwnd <= tp->snd_ssthresh) 151 if (tp->snd_cwnd <= tp->snd_ssthresh)
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 2b9464c93b88..7b09d8b49fa5 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -276,26 +276,6 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
276 return err; 276 return err;
277} 277}
278 278
279/* RFC2861 Check whether we are limited by application or congestion window
280 * This is the inverse of cwnd check in tcp_tso_should_defer
281 */
282bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
283{
284 const struct tcp_sock *tp = tcp_sk(sk);
285 u32 left;
286
287 if (in_flight >= tp->snd_cwnd)
288 return true;
289
290 left = tp->snd_cwnd - in_flight;
291 if (sk_can_gso(sk) &&
292 left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
293 left < tp->xmit_size_goal_segs)
294 return true;
295 return left <= tcp_max_tso_deferred_mss(tp);
296}
297EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
298
299/* Slow start is used when congestion window is no greater than the slow start 279/* Slow start is used when congestion window is no greater than the slow start
300 * threshold. We base on RFC2581 and also handle stretch ACKs properly. 280 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
301 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but 281 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
@@ -337,11 +317,11 @@ EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
337/* This is Jacobson's slow start and congestion avoidance. 317/* This is Jacobson's slow start and congestion avoidance.
338 * SIGCOMM '88, p. 328. 318 * SIGCOMM '88, p. 328.
339 */ 319 */
340void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 320void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
341{ 321{
342 struct tcp_sock *tp = tcp_sk(sk); 322 struct tcp_sock *tp = tcp_sk(sk);
343 323
344 if (!tcp_is_cwnd_limited(sk, in_flight)) 324 if (!tcp_is_cwnd_limited(sk))
345 return; 325 return;
346 326
347 /* In "safe" area, increase. */ 327 /* In "safe" area, increase. */
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index b4f1b29b08bd..a9bd8a4828a9 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -304,13 +304,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
304 ca->cnt = 1; 304 ca->cnt = 1;
305} 305}
306 306
307static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, 307static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
308 u32 in_flight)
309{ 308{
310 struct tcp_sock *tp = tcp_sk(sk); 309 struct tcp_sock *tp = tcp_sk(sk);
311 struct bictcp *ca = inet_csk_ca(sk); 310 struct bictcp *ca = inet_csk_ca(sk);
312 311
313 if (!tcp_is_cwnd_limited(sk, in_flight)) 312 if (!tcp_is_cwnd_limited(sk))
314 return; 313 return;
315 314
316 if (tp->snd_cwnd <= tp->snd_ssthresh) { 315 if (tp->snd_cwnd <= tp->snd_ssthresh) {
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index f195d9316e55..62e48cf84e60 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -72,25 +72,224 @@ error: kfree(ctx);
72 return err; 72 return err;
73} 73}
74 74
75/* Computes the fastopen cookie for the IP path. 75static bool __tcp_fastopen_cookie_gen(const void *path,
76 * The path is a 128 bits long (pad with zeros for IPv4). 76 struct tcp_fastopen_cookie *foc)
77 *
78 * The caller must check foc->len to determine if a valid cookie
79 * has been generated successfully.
80*/
81void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
82 struct tcp_fastopen_cookie *foc)
83{ 77{
84 __be32 path[4] = { src, dst, 0, 0 };
85 struct tcp_fastopen_context *ctx; 78 struct tcp_fastopen_context *ctx;
79 bool ok = false;
86 80
87 tcp_fastopen_init_key_once(true); 81 tcp_fastopen_init_key_once(true);
88 82
89 rcu_read_lock(); 83 rcu_read_lock();
90 ctx = rcu_dereference(tcp_fastopen_ctx); 84 ctx = rcu_dereference(tcp_fastopen_ctx);
91 if (ctx) { 85 if (ctx) {
92 crypto_cipher_encrypt_one(ctx->tfm, foc->val, (__u8 *)path); 86 crypto_cipher_encrypt_one(ctx->tfm, foc->val, path);
93 foc->len = TCP_FASTOPEN_COOKIE_SIZE; 87 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
88 ok = true;
94 } 89 }
95 rcu_read_unlock(); 90 rcu_read_unlock();
91 return ok;
92}
93
94/* Generate the fastopen cookie by doing aes128 encryption on both
95 * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6
96 * addresses. For the longer IPv6 addresses use CBC-MAC.
97 *
98 * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
99 */
100static bool tcp_fastopen_cookie_gen(struct request_sock *req,
101 struct sk_buff *syn,
102 struct tcp_fastopen_cookie *foc)
103{
104 if (req->rsk_ops->family == AF_INET) {
105 const struct iphdr *iph = ip_hdr(syn);
106
107 __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 };
108 return __tcp_fastopen_cookie_gen(path, foc);
109 }
110
111#if IS_ENABLED(CONFIG_IPV6)
112 if (req->rsk_ops->family == AF_INET6) {
113 const struct ipv6hdr *ip6h = ipv6_hdr(syn);
114 struct tcp_fastopen_cookie tmp;
115
116 if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
117 struct in6_addr *buf = (struct in6_addr *) tmp.val;
118 int i = 4;
119
120 for (i = 0; i < 4; i++)
121 buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
122 return __tcp_fastopen_cookie_gen(buf, foc);
123 }
124 }
125#endif
126 return false;
127}
128
129static bool tcp_fastopen_create_child(struct sock *sk,
130 struct sk_buff *skb,
131 struct dst_entry *dst,
132 struct request_sock *req)
133{
134 struct tcp_sock *tp = tcp_sk(sk);
135 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
136 struct sock *child;
137
138 req->num_retrans = 0;
139 req->num_timeout = 0;
140 req->sk = NULL;
141
142 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
143 if (child == NULL)
144 return false;
145
146 spin_lock(&queue->fastopenq->lock);
147 queue->fastopenq->qlen++;
148 spin_unlock(&queue->fastopenq->lock);
149
150 /* Initialize the child socket. Have to fix some values to take
151 * into account the child is a Fast Open socket and is created
152 * only out of the bits carried in the SYN packet.
153 */
154 tp = tcp_sk(child);
155
156 tp->fastopen_rsk = req;
157 /* Do a hold on the listner sk so that if the listener is being
158 * closed, the child that has been accepted can live on and still
159 * access listen_lock.
160 */
161 sock_hold(sk);
162 tcp_rsk(req)->listener = sk;
163
164 /* RFC1323: The window in SYN & SYN/ACK segments is never
165 * scaled. So correct it appropriately.
166 */
167 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
168
169 /* Activate the retrans timer so that SYNACK can be retransmitted.
170 * The request socket is not added to the SYN table of the parent
171 * because it's been added to the accept queue directly.
172 */
173 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
174 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
175
176 /* Add the child socket directly into the accept queue */
177 inet_csk_reqsk_queue_add(sk, req, child);
178
179 /* Now finish processing the fastopen child socket. */
180 inet_csk(child)->icsk_af_ops->rebuild_header(child);
181 tcp_init_congestion_control(child);
182 tcp_mtup_init(child);
183 tcp_init_metrics(child);
184 tcp_init_buffer_space(child);
185
186 /* Queue the data carried in the SYN packet. We need to first
187 * bump skb's refcnt because the caller will attempt to free it.
188 *
189 * XXX (TFO) - we honor a zero-payload TFO request for now,
190 * (any reason not to?) but no need to queue the skb since
191 * there is no data. How about SYN+FIN?
192 */
193 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) {
194 skb = skb_get(skb);
195 skb_dst_drop(skb);
196 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
197 skb_set_owner_r(skb, child);
198 __skb_queue_tail(&child->sk_receive_queue, skb);
199 tp->syn_data_acked = 1;
200 }
201 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
202 sk->sk_data_ready(sk);
203 bh_unlock_sock(child);
204 sock_put(child);
205 WARN_ON(req->sk == NULL);
206 return true;
207}
208EXPORT_SYMBOL(tcp_fastopen_create_child);
209
210static bool tcp_fastopen_queue_check(struct sock *sk)
211{
212 struct fastopen_queue *fastopenq;
213
214 /* Make sure the listener has enabled fastopen, and we don't
215 * exceed the max # of pending TFO requests allowed before trying
216 * to validating the cookie in order to avoid burning CPU cycles
217 * unnecessarily.
218 *
219 * XXX (TFO) - The implication of checking the max_qlen before
220 * processing a cookie request is that clients can't differentiate
221 * between qlen overflow causing Fast Open to be disabled
222 * temporarily vs a server not supporting Fast Open at all.
223 */
224 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
225 if (fastopenq == NULL || fastopenq->max_qlen == 0)
226 return false;
227
228 if (fastopenq->qlen >= fastopenq->max_qlen) {
229 struct request_sock *req1;
230 spin_lock(&fastopenq->lock);
231 req1 = fastopenq->rskq_rst_head;
232 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
233 spin_unlock(&fastopenq->lock);
234 NET_INC_STATS_BH(sock_net(sk),
235 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
236 return false;
237 }
238 fastopenq->rskq_rst_head = req1->dl_next;
239 fastopenq->qlen--;
240 spin_unlock(&fastopenq->lock);
241 reqsk_free(req1);
242 }
243 return true;
244}
245
246/* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
247 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
248 * cookie request (foc->len == 0).
249 */
250bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
251 struct request_sock *req,
252 struct tcp_fastopen_cookie *foc,
253 struct dst_entry *dst)
254{
255 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
256 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
257
258 if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
259 (syn_data || foc->len >= 0) &&
260 tcp_fastopen_queue_check(sk))) {
261 foc->len = -1;
262 return false;
263 }
264
265 if (syn_data && (sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD))
266 goto fastopen;
267
268 if (tcp_fastopen_cookie_gen(req, skb, &valid_foc) &&
269 foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
270 foc->len == valid_foc.len &&
271 !memcmp(foc->val, valid_foc.val, foc->len)) {
272 /* Cookie is valid. Create a (full) child socket to accept
273 * the data in SYN before returning a SYN-ACK to ack the
274 * data. If we fail to create the socket, fall back and
275 * ack the ISN only but includes the same cookie.
276 *
277 * Note: Data-less SYN with valid cookie is allowed to send
278 * data in SYN_RECV state.
279 */
280fastopen:
281 if (tcp_fastopen_create_child(sk, skb, dst, req)) {
282 foc->len = -1;
283 NET_INC_STATS_BH(sock_net(sk),
284 LINUX_MIB_TCPFASTOPENPASSIVE);
285 return true;
286 }
287 }
288
289 NET_INC_STATS_BH(sock_net(sk), foc->len ?
290 LINUX_MIB_TCPFASTOPENPASSIVEFAIL :
291 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
292 *foc = valid_foc;
293 return false;
96} 294}
295EXPORT_SYMBOL(tcp_try_fastopen);
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 8b9e7bad77c0..1c4908280d92 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -109,12 +109,12 @@ static void hstcp_init(struct sock *sk)
109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); 109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
110} 110}
111 111
112static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 112static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
113{ 113{
114 struct tcp_sock *tp = tcp_sk(sk); 114 struct tcp_sock *tp = tcp_sk(sk);
115 struct hstcp *ca = inet_csk_ca(sk); 115 struct hstcp *ca = inet_csk_ca(sk);
116 116
117 if (!tcp_is_cwnd_limited(sk, in_flight)) 117 if (!tcp_is_cwnd_limited(sk))
118 return; 118 return;
119 119
120 if (tp->snd_cwnd <= tp->snd_ssthresh) 120 if (tp->snd_cwnd <= tp->snd_ssthresh)
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 4a194acfd923..031361311a8b 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -227,12 +227,12 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
227 return max((tp->snd_cwnd * ca->beta) >> 7, 2U); 227 return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
228} 228}
229 229
230static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 230static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
231{ 231{
232 struct tcp_sock *tp = tcp_sk(sk); 232 struct tcp_sock *tp = tcp_sk(sk);
233 struct htcp *ca = inet_csk_ca(sk); 233 struct htcp *ca = inet_csk_ca(sk);
234 234
235 if (!tcp_is_cwnd_limited(sk, in_flight)) 235 if (!tcp_is_cwnd_limited(sk))
236 return; 236 return;
237 237
238 if (tp->snd_cwnd <= tp->snd_ssthresh) 238 if (tp->snd_cwnd <= tp->snd_ssthresh)
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index a15a799bf768..d8f8f05a4951 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -87,8 +87,7 @@ static inline u32 hybla_fraction(u32 odds)
87 * o Give cwnd a new value based on the model proposed 87 * o Give cwnd a new value based on the model proposed
88 * o remember increments <1 88 * o remember increments <1
89 */ 89 */
90static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked, 90static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
91 u32 in_flight)
92{ 91{
93 struct tcp_sock *tp = tcp_sk(sk); 92 struct tcp_sock *tp = tcp_sk(sk);
94 struct hybla *ca = inet_csk_ca(sk); 93 struct hybla *ca = inet_csk_ca(sk);
@@ -101,11 +100,11 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
101 ca->minrtt_us = tp->srtt_us; 100 ca->minrtt_us = tp->srtt_us;
102 } 101 }
103 102
104 if (!tcp_is_cwnd_limited(sk, in_flight)) 103 if (!tcp_is_cwnd_limited(sk))
105 return; 104 return;
106 105
107 if (!ca->hybla_en) { 106 if (!ca->hybla_en) {
108 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 107 tcp_reno_cong_avoid(sk, ack, acked);
109 return; 108 return;
110 } 109 }
111 110
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 863d105e3015..5999b3972e64 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -255,8 +255,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state)
255/* 255/*
256 * Increase window in response to successful acknowledgment. 256 * Increase window in response to successful acknowledgment.
257 */ 257 */
258static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked, 258static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
259 u32 in_flight)
260{ 259{
261 struct tcp_sock *tp = tcp_sk(sk); 260 struct tcp_sock *tp = tcp_sk(sk);
262 struct illinois *ca = inet_csk_ca(sk); 261 struct illinois *ca = inet_csk_ca(sk);
@@ -265,7 +264,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked,
265 update_params(sk); 264 update_params(sk);
266 265
267 /* RFC2861 only increase cwnd if fully utilized */ 266 /* RFC2861 only increase cwnd if fully utilized */
268 if (!tcp_is_cwnd_limited(sk, in_flight)) 267 if (!tcp_is_cwnd_limited(sk))
269 return; 268 return;
270 269
271 /* In slow start */ 270 /* In slow start */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3a26b3b23f16..40661fc1e233 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1167,7 +1167,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1167 } 1167 }
1168 pkt_len = new_len; 1168 pkt_len = new_len;
1169 } 1169 }
1170 err = tcp_fragment(sk, skb, pkt_len, mss); 1170 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
1171 if (err < 0) 1171 if (err < 0)
1172 return err; 1172 return err;
1173 } 1173 }
@@ -2241,7 +2241,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2241 break; 2241 break;
2242 2242
2243 mss = skb_shinfo(skb)->gso_size; 2243 mss = skb_shinfo(skb)->gso_size;
2244 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss); 2244 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss,
2245 mss, GFP_ATOMIC);
2245 if (err < 0) 2246 if (err < 0)
2246 break; 2247 break;
2247 cnt = packets; 2248 cnt = packets;
@@ -2937,10 +2938,11 @@ static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
2937 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L); 2938 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L);
2938} 2939}
2939 2940
2940static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 2941static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
2941{ 2942{
2942 const struct inet_connection_sock *icsk = inet_csk(sk); 2943 const struct inet_connection_sock *icsk = inet_csk(sk);
2943 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked, in_flight); 2944
2945 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
2944 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; 2946 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
2945} 2947}
2946 2948
@@ -3363,7 +3365,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3363 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3365 u32 ack_seq = TCP_SKB_CB(skb)->seq;
3364 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3366 u32 ack = TCP_SKB_CB(skb)->ack_seq;
3365 bool is_dupack = false; 3367 bool is_dupack = false;
3366 u32 prior_in_flight;
3367 u32 prior_fackets; 3368 u32 prior_fackets;
3368 int prior_packets = tp->packets_out; 3369 int prior_packets = tp->packets_out;
3369 const int prior_unsacked = tp->packets_out - tp->sacked_out; 3370 const int prior_unsacked = tp->packets_out - tp->sacked_out;
@@ -3396,7 +3397,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3396 flag |= FLAG_SND_UNA_ADVANCED; 3397 flag |= FLAG_SND_UNA_ADVANCED;
3397 3398
3398 prior_fackets = tp->fackets_out; 3399 prior_fackets = tp->fackets_out;
3399 prior_in_flight = tcp_packets_in_flight(tp);
3400 3400
3401 /* ts_recent update must be made after we are sure that the packet 3401 /* ts_recent update must be made after we are sure that the packet
3402 * is in window. 3402 * is in window.
@@ -3451,7 +3451,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3451 3451
3452 /* Advance cwnd if state allows */ 3452 /* Advance cwnd if state allows */
3453 if (tcp_may_raise_cwnd(sk, flag)) 3453 if (tcp_may_raise_cwnd(sk, flag))
3454 tcp_cong_avoid(sk, ack, acked, prior_in_flight); 3454 tcp_cong_avoid(sk, ack, acked);
3455 3455
3456 if (tcp_ack_is_dubious(sk, flag)) { 3456 if (tcp_ack_is_dubious(sk, flag)) {
3457 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3457 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
@@ -4702,28 +4702,6 @@ static int tcp_prune_queue(struct sock *sk)
4702 return -1; 4702 return -1;
4703} 4703}
4704 4704
4705/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
4706 * As additional protections, we do not touch cwnd in retransmission phases,
4707 * and if application hit its sndbuf limit recently.
4708 */
4709void tcp_cwnd_application_limited(struct sock *sk)
4710{
4711 struct tcp_sock *tp = tcp_sk(sk);
4712
4713 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
4714 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
4715 /* Limited by application or receiver window. */
4716 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
4717 u32 win_used = max(tp->snd_cwnd_used, init_win);
4718 if (win_used < tp->snd_cwnd) {
4719 tp->snd_ssthresh = tcp_current_ssthresh(sk);
4720 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
4721 }
4722 tp->snd_cwnd_used = 0;
4723 }
4724 tp->snd_cwnd_stamp = tcp_time_stamp;
4725}
4726
4727static bool tcp_should_expand_sndbuf(const struct sock *sk) 4705static bool tcp_should_expand_sndbuf(const struct sock *sk)
4728{ 4706{
4729 const struct tcp_sock *tp = tcp_sk(sk); 4707 const struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 438f3b95143d..77cccda1ad0c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -336,8 +336,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
336 const int code = icmp_hdr(icmp_skb)->code; 336 const int code = icmp_hdr(icmp_skb)->code;
337 struct sock *sk; 337 struct sock *sk;
338 struct sk_buff *skb; 338 struct sk_buff *skb;
339 struct request_sock *req; 339 struct request_sock *fastopen;
340 __u32 seq; 340 __u32 seq, snd_una;
341 __u32 remaining; 341 __u32 remaining;
342 int err; 342 int err;
343 struct net *net = dev_net(icmp_skb->dev); 343 struct net *net = dev_net(icmp_skb->dev);
@@ -378,12 +378,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
378 378
379 icsk = inet_csk(sk); 379 icsk = inet_csk(sk);
380 tp = tcp_sk(sk); 380 tp = tcp_sk(sk);
381 req = tp->fastopen_rsk;
382 seq = ntohl(th->seq); 381 seq = ntohl(th->seq);
382 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
383 fastopen = tp->fastopen_rsk;
384 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
383 if (sk->sk_state != TCP_LISTEN && 385 if (sk->sk_state != TCP_LISTEN &&
384 !between(seq, tp->snd_una, tp->snd_nxt) && 386 !between(seq, snd_una, tp->snd_nxt)) {
385 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
386 /* For a Fast Open socket, allow seq to be snt_isn. */
387 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 387 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
388 goto out; 388 goto out;
389 } 389 }
@@ -426,11 +426,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
426 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH) 426 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
427 break; 427 break;
428 if (seq != tp->snd_una || !icsk->icsk_retransmits || 428 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
429 !icsk->icsk_backoff) 429 !icsk->icsk_backoff || fastopen)
430 break; 430 break;
431 431
432 /* XXX (TFO) - revisit the following logic for TFO */
433
434 if (sock_owned_by_user(sk)) 432 if (sock_owned_by_user(sk))
435 break; 433 break;
436 434
@@ -462,14 +460,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
462 goto out; 460 goto out;
463 } 461 }
464 462
465 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
466 * than following the TCP_SYN_RECV case and closing the socket,
467 * we ignore the ICMP error and keep trying like a fully established
468 * socket. Is this the right thing to do?
469 */
470 if (req && req->sk == NULL)
471 goto out;
472
473 switch (sk->sk_state) { 463 switch (sk->sk_state) {
474 struct request_sock *req, **prev; 464 struct request_sock *req, **prev;
475 case TCP_LISTEN: 465 case TCP_LISTEN:
@@ -502,10 +492,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
502 goto out; 492 goto out;
503 493
504 case TCP_SYN_SENT: 494 case TCP_SYN_SENT:
505 case TCP_SYN_RECV: /* Cannot happen. 495 case TCP_SYN_RECV:
506 It can f.e. if SYNs crossed, 496 /* Only in fast or simultaneous open. If a fast open socket is
507 or Fast Open. 497 * is already accepted it is treated as a connected one below.
508 */ 498 */
499 if (fastopen && fastopen->sk == NULL)
500 break;
501
509 if (!sock_owned_by_user(sk)) { 502 if (!sock_owned_by_user(sk)) {
510 sk->sk_err = err; 503 sk->sk_err = err;
511 504
@@ -822,7 +815,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
822 */ 815 */
823static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 816static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
824 struct request_sock *req, 817 struct request_sock *req,
825 u16 queue_mapping) 818 u16 queue_mapping,
819 struct tcp_fastopen_cookie *foc)
826{ 820{
827 const struct inet_request_sock *ireq = inet_rsk(req); 821 const struct inet_request_sock *ireq = inet_rsk(req);
828 struct flowi4 fl4; 822 struct flowi4 fl4;
@@ -833,7 +827,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
833 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) 827 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834 return -1; 828 return -1;
835 829
836 skb = tcp_make_synack(sk, dst, req, NULL); 830 skb = tcp_make_synack(sk, dst, req, foc);
837 831
838 if (skb) { 832 if (skb) {
839 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); 833 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
@@ -852,7 +846,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
852 846
853static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req) 847static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
854{ 848{
855 int res = tcp_v4_send_synack(sk, NULL, req, 0); 849 int res = tcp_v4_send_synack(sk, NULL, req, 0, NULL);
856 850
857 if (!res) { 851 if (!res) {
858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 852 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
@@ -1260,187 +1254,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1260}; 1254};
1261#endif 1255#endif
1262 1256
1263static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1264 struct request_sock *req,
1265 struct tcp_fastopen_cookie *foc,
1266 struct tcp_fastopen_cookie *valid_foc)
1267{
1268 bool skip_cookie = false;
1269 struct fastopen_queue *fastopenq;
1270
1271 if (likely(!fastopen_cookie_present(foc))) {
1272 /* See include/net/tcp.h for the meaning of these knobs */
1273 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1274 ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1275 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1276 skip_cookie = true; /* no cookie to validate */
1277 else
1278 return false;
1279 }
1280 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1281 /* A FO option is present; bump the counter. */
1282 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1283
1284 /* Make sure the listener has enabled fastopen, and we don't
1285 * exceed the max # of pending TFO requests allowed before trying
1286 * to validating the cookie in order to avoid burning CPU cycles
1287 * unnecessarily.
1288 *
1289 * XXX (TFO) - The implication of checking the max_qlen before
1290 * processing a cookie request is that clients can't differentiate
1291 * between qlen overflow causing Fast Open to be disabled
1292 * temporarily vs a server not supporting Fast Open at all.
1293 */
1294 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1295 fastopenq == NULL || fastopenq->max_qlen == 0)
1296 return false;
1297
1298 if (fastopenq->qlen >= fastopenq->max_qlen) {
1299 struct request_sock *req1;
1300 spin_lock(&fastopenq->lock);
1301 req1 = fastopenq->rskq_rst_head;
1302 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1303 spin_unlock(&fastopenq->lock);
1304 NET_INC_STATS_BH(sock_net(sk),
1305 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1306 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1307 foc->len = -1;
1308 return false;
1309 }
1310 fastopenq->rskq_rst_head = req1->dl_next;
1311 fastopenq->qlen--;
1312 spin_unlock(&fastopenq->lock);
1313 reqsk_free(req1);
1314 }
1315 if (skip_cookie) {
1316 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1317 return true;
1318 }
1319
1320 if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1321 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1322 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1323 ip_hdr(skb)->daddr, valid_foc);
1324 if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1325 memcmp(&foc->val[0], &valid_foc->val[0],
1326 TCP_FASTOPEN_COOKIE_SIZE) != 0)
1327 return false;
1328 valid_foc->len = -1;
1329 }
1330 /* Acknowledge the data received from the peer. */
1331 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1332 return true;
1333 } else if (foc->len == 0) { /* Client requesting a cookie */
1334 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1335 ip_hdr(skb)->daddr, valid_foc);
1336 NET_INC_STATS_BH(sock_net(sk),
1337 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1338 } else {
1339 /* Client sent a cookie with wrong size. Treat it
1340 * the same as invalid and return a valid one.
1341 */
1342 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1343 ip_hdr(skb)->daddr, valid_foc);
1344 }
1345 return false;
1346}
1347
1348static int tcp_v4_conn_req_fastopen(struct sock *sk,
1349 struct sk_buff *skb,
1350 struct sk_buff *skb_synack,
1351 struct request_sock *req)
1352{
1353 struct tcp_sock *tp = tcp_sk(sk);
1354 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1355 const struct inet_request_sock *ireq = inet_rsk(req);
1356 struct sock *child;
1357 int err;
1358
1359 req->num_retrans = 0;
1360 req->num_timeout = 0;
1361 req->sk = NULL;
1362
1363 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1364 if (child == NULL) {
1365 NET_INC_STATS_BH(sock_net(sk),
1366 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1367 kfree_skb(skb_synack);
1368 return -1;
1369 }
1370 err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
1371 ireq->ir_rmt_addr, ireq->opt);
1372 err = net_xmit_eval(err);
1373 if (!err)
1374 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1375 /* XXX (TFO) - is it ok to ignore error and continue? */
1376
1377 spin_lock(&queue->fastopenq->lock);
1378 queue->fastopenq->qlen++;
1379 spin_unlock(&queue->fastopenq->lock);
1380
1381 /* Initialize the child socket. Have to fix some values to take
1382 * into account the child is a Fast Open socket and is created
1383 * only out of the bits carried in the SYN packet.
1384 */
1385 tp = tcp_sk(child);
1386
1387 tp->fastopen_rsk = req;
1388 /* Do a hold on the listner sk so that if the listener is being
1389 * closed, the child that has been accepted can live on and still
1390 * access listen_lock.
1391 */
1392 sock_hold(sk);
1393 tcp_rsk(req)->listener = sk;
1394
1395 /* RFC1323: The window in SYN & SYN/ACK segments is never
1396 * scaled. So correct it appropriately.
1397 */
1398 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1399
1400 /* Activate the retrans timer so that SYNACK can be retransmitted.
1401 * The request socket is not added to the SYN table of the parent
1402 * because it's been added to the accept queue directly.
1403 */
1404 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1405 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1406
1407 /* Add the child socket directly into the accept queue */
1408 inet_csk_reqsk_queue_add(sk, req, child);
1409
1410 /* Now finish processing the fastopen child socket. */
1411 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1412 tcp_init_congestion_control(child);
1413 tcp_mtup_init(child);
1414 tcp_init_metrics(child);
1415 tcp_init_buffer_space(child);
1416
1417 /* Queue the data carried in the SYN packet. We need to first
1418 * bump skb's refcnt because the caller will attempt to free it.
1419 *
1420 * XXX (TFO) - we honor a zero-payload TFO request for now.
1421 * (Any reason not to?)
1422 */
1423 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1424 /* Don't queue the skb if there is no payload in SYN.
1425 * XXX (TFO) - How about SYN+FIN?
1426 */
1427 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1428 } else {
1429 skb = skb_get(skb);
1430 skb_dst_drop(skb);
1431 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1432 skb_set_owner_r(skb, child);
1433 __skb_queue_tail(&child->sk_receive_queue, skb);
1434 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1435 tp->syn_data_acked = 1;
1436 }
1437 sk->sk_data_ready(sk);
1438 bh_unlock_sock(child);
1439 sock_put(child);
1440 WARN_ON(req->sk == NULL);
1441 return 0;
1442}
1443
1444int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1257int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1445{ 1258{
1446 struct tcp_options_received tmp_opt; 1259 struct tcp_options_received tmp_opt;
@@ -1451,12 +1264,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1451 __be32 saddr = ip_hdr(skb)->saddr; 1264 __be32 saddr = ip_hdr(skb)->saddr;
1452 __be32 daddr = ip_hdr(skb)->daddr; 1265 __be32 daddr = ip_hdr(skb)->daddr;
1453 __u32 isn = TCP_SKB_CB(skb)->when; 1266 __u32 isn = TCP_SKB_CB(skb)->when;
1454 bool want_cookie = false; 1267 bool want_cookie = false, fastopen;
1455 struct flowi4 fl4; 1268 struct flowi4 fl4;
1456 struct tcp_fastopen_cookie foc = { .len = -1 }; 1269 struct tcp_fastopen_cookie foc = { .len = -1 };
1457 struct tcp_fastopen_cookie valid_foc = { .len = -1 }; 1270 int err;
1458 struct sk_buff *skb_synack;
1459 int do_fastopen;
1460 1271
1461 /* Never answer to SYNs send to broadcast or multicast */ 1272 /* Never answer to SYNs send to broadcast or multicast */
1462 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1273 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1507,6 +1318,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1507 ireq->ir_rmt_addr = saddr; 1318 ireq->ir_rmt_addr = saddr;
1508 ireq->no_srccheck = inet_sk(sk)->transparent; 1319 ireq->no_srccheck = inet_sk(sk)->transparent;
1509 ireq->opt = tcp_v4_save_options(skb); 1320 ireq->opt = tcp_v4_save_options(skb);
1321 ireq->ir_mark = inet_request_mark(sk, skb);
1510 1322
1511 if (security_inet_conn_request(sk, skb, req)) 1323 if (security_inet_conn_request(sk, skb, req))
1512 goto drop_and_free; 1324 goto drop_and_free;
@@ -1555,52 +1367,24 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1555 1367
1556 isn = tcp_v4_init_sequence(skb); 1368 isn = tcp_v4_init_sequence(skb);
1557 } 1369 }
1558 tcp_rsk(req)->snt_isn = isn; 1370 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
1559
1560 if (dst == NULL) {
1561 dst = inet_csk_route_req(sk, &fl4, req);
1562 if (dst == NULL)
1563 goto drop_and_free;
1564 }
1565 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1566
1567 /* We don't call tcp_v4_send_synack() directly because we need
1568 * to make sure a child socket can be created successfully before
1569 * sending back synack!
1570 *
1571 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1572 * (or better yet, call tcp_send_synack() in the child context
1573 * directly, but will have to fix bunch of other code first)
1574 * after syn_recv_sock() except one will need to first fix the
1575 * latter to remove its dependency on the current implementation
1576 * of tcp_v4_send_synack()->tcp_select_initial_window().
1577 */
1578 skb_synack = tcp_make_synack(sk, dst, req,
1579 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1580
1581 if (skb_synack) {
1582 __tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1583 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1584 } else
1585 goto drop_and_free; 1371 goto drop_and_free;
1586 1372
1587 if (likely(!do_fastopen)) { 1373 tcp_rsk(req)->snt_isn = isn;
1588 int err; 1374 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1589 err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr, 1375 tcp_openreq_init_rwin(req, sk, dst);
1590 ireq->ir_rmt_addr, ireq->opt); 1376 fastopen = !want_cookie &&
1591 err = net_xmit_eval(err); 1377 tcp_try_fastopen(sk, skb, req, &foc, dst);
1378 err = tcp_v4_send_synack(sk, dst, req,
1379 skb_get_queue_mapping(skb), &foc);
1380 if (!fastopen) {
1592 if (err || want_cookie) 1381 if (err || want_cookie)
1593 goto drop_and_free; 1382 goto drop_and_free;
1594 1383
1595 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1384 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1596 tcp_rsk(req)->listener = NULL; 1385 tcp_rsk(req)->listener = NULL;
1597 /* Add the request_sock to the SYN table */
1598 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1386 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1599 if (fastopen_cookie_present(&foc) && foc.len != 0) 1387 }
1600 NET_INC_STATS_BH(sock_net(sk),
1601 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1602 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
1603 goto drop_and_free;
1604 1388
1605 return 0; 1389 return 0;
1606 1390
@@ -1744,28 +1528,6 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1744 return sk; 1528 return sk;
1745} 1529}
1746 1530
1747static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1748{
1749 const struct iphdr *iph = ip_hdr(skb);
1750
1751 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1752 if (!tcp_v4_check(skb->len, iph->saddr,
1753 iph->daddr, skb->csum)) {
1754 skb->ip_summed = CHECKSUM_UNNECESSARY;
1755 return 0;
1756 }
1757 }
1758
1759 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1760 skb->len, IPPROTO_TCP, 0);
1761
1762 if (skb->len <= 76) {
1763 return __skb_checksum_complete(skb);
1764 }
1765 return 0;
1766}
1767
1768
1769/* The socket must have it's spinlock held when we get 1531/* The socket must have it's spinlock held when we get
1770 * here. 1532 * here.
1771 * 1533 *
@@ -1960,7 +1722,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
1960 * Packet length and doff are validated by header prediction, 1722 * Packet length and doff are validated by header prediction,
1961 * provided case of th->doff==0 is eliminated. 1723 * provided case of th->doff==0 is eliminated.
1962 * So, we defer the checks. */ 1724 * So, we defer the checks. */
1963 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb)) 1725
1726 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1964 goto csum_error; 1727 goto csum_error;
1965 1728
1966 th = tcp_hdr(skb); 1729 th = tcp_hdr(skb);
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index c9aecae31327..1e70fa8fa793 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk)
115 * Will only call newReno CA when away from inference. 115 * Will only call newReno CA when away from inference.
116 * From TCP-LP's paper, this will be handled in additive increasement. 116 * From TCP-LP's paper, this will be handled in additive increasement.
117 */ 117 */
118static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked, 118static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
119 u32 in_flight)
120{ 119{
121 struct lp *lp = inet_csk_ca(sk); 120 struct lp *lp = inet_csk_ca(sk);
122 121
123 if (!(lp->flag & LP_WITHIN_INF)) 122 if (!(lp->flag & LP_WITHIN_INF))
124 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 123 tcp_reno_cong_avoid(sk, ack, acked);
125} 124}
126 125
127/** 126/**
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index dcaf72f10216..4fe041805989 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -1159,10 +1159,7 @@ static void __net_exit tcp_net_metrics_exit(struct net *net)
1159 tm = next; 1159 tm = next;
1160 } 1160 }
1161 } 1161 }
1162 if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash)) 1162 kvfree(net->ipv4.tcp_metrics_hash);
1163 vfree(net->ipv4.tcp_metrics_hash);
1164 else
1165 kfree(net->ipv4.tcp_metrics_hash);
1166} 1163}
1167 1164
1168static __net_initdata struct pernet_operations tcp_net_metrics_ops = { 1165static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 05c1b155251d..e68e0d4af6c9 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -362,6 +362,37 @@ void tcp_twsk_destructor(struct sock *sk)
362} 362}
363EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 363EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
364 364
365void tcp_openreq_init_rwin(struct request_sock *req,
366 struct sock *sk, struct dst_entry *dst)
367{
368 struct inet_request_sock *ireq = inet_rsk(req);
369 struct tcp_sock *tp = tcp_sk(sk);
370 __u8 rcv_wscale;
371 int mss = dst_metric_advmss(dst);
372
373 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
374 mss = tp->rx_opt.user_mss;
375
376 /* Set this up on the first call only */
377 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
378
379 /* limit the window selection if the user enforce a smaller rx buffer */
380 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
381 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
382 req->window_clamp = tcp_full_space(sk);
383
384 /* tcp_full_space because it is guaranteed to be the first packet */
385 tcp_select_initial_window(tcp_full_space(sk),
386 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
387 &req->rcv_wnd,
388 &req->window_clamp,
389 ireq->wscale_ok,
390 &rcv_wscale,
391 dst_metric(dst, RTAX_INITRWND));
392 ireq->rcv_wscale = rcv_wscale;
393}
394EXPORT_SYMBOL(tcp_openreq_init_rwin);
395
365static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, 396static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
366 struct request_sock *req) 397 struct request_sock *req)
367{ 398{
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index b92b81718ca4..4e86c59ec7f7 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -57,10 +57,12 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
57 SKB_GSO_TCP_ECN | 57 SKB_GSO_TCP_ECN |
58 SKB_GSO_TCPV6 | 58 SKB_GSO_TCPV6 |
59 SKB_GSO_GRE | 59 SKB_GSO_GRE |
60 SKB_GSO_GRE_CSUM |
60 SKB_GSO_IPIP | 61 SKB_GSO_IPIP |
61 SKB_GSO_SIT | 62 SKB_GSO_SIT |
62 SKB_GSO_MPLS | 63 SKB_GSO_MPLS |
63 SKB_GSO_UDP_TUNNEL | 64 SKB_GSO_UDP_TUNNEL |
65 SKB_GSO_UDP_TUNNEL_CSUM |
64 0) || 66 0) ||
65 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) 67 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
66 goto out; 68 goto out;
@@ -97,9 +99,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
97 th->check = newcheck; 99 th->check = newcheck;
98 100
99 if (skb->ip_summed != CHECKSUM_PARTIAL) 101 if (skb->ip_summed != CHECKSUM_PARTIAL)
100 th->check = 102 th->check = gso_make_checksum(skb, ~th->check);
101 csum_fold(csum_partial(skb_transport_header(skb),
102 thlen, skb->csum));
103 103
104 seq += mss; 104 seq += mss;
105 if (copy_destructor) { 105 if (copy_destructor) {
@@ -133,8 +133,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
133 th->check = ~csum_fold((__force __wsum)((__force u32)th->check + 133 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
134 (__force u32)delta)); 134 (__force u32)delta));
135 if (skb->ip_summed != CHECKSUM_PARTIAL) 135 if (skb->ip_summed != CHECKSUM_PARTIAL)
136 th->check = csum_fold(csum_partial(skb_transport_header(skb), 136 th->check = gso_make_checksum(skb, ~th->check);
137 thlen, skb->csum));
138out: 137out:
139 return segs; 138 return segs;
140} 139}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2d340bd2cd3d..d92bce0ea24e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -627,7 +627,7 @@ static unsigned int tcp_synack_options(struct sock *sk,
627 if (unlikely(!ireq->tstamp_ok)) 627 if (unlikely(!ireq->tstamp_ok))
628 remaining -= TCPOLEN_SACKPERM_ALIGNED; 628 remaining -= TCPOLEN_SACKPERM_ALIGNED;
629 } 629 }
630 if (foc != NULL) { 630 if (foc != NULL && foc->len >= 0) {
631 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; 631 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
632 need = (need + 3) & ~3U; /* Align to 32 bits */ 632 need = (need + 3) & ~3U; /* Align to 32 bits */
633 if (remaining >= need) { 633 if (remaining >= need) {
@@ -878,15 +878,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
878 BUG_ON(!skb || !tcp_skb_pcount(skb)); 878 BUG_ON(!skb || !tcp_skb_pcount(skb));
879 879
880 if (clone_it) { 880 if (clone_it) {
881 const struct sk_buff *fclone = skb + 1;
882
883 skb_mstamp_get(&skb->skb_mstamp); 881 skb_mstamp_get(&skb->skb_mstamp);
884 882
885 if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
886 fclone->fclone == SKB_FCLONE_CLONE))
887 NET_INC_STATS(sock_net(sk),
888 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
889
890 if (unlikely(skb_cloned(skb))) 883 if (unlikely(skb_cloned(skb)))
891 skb = pskb_copy(skb, gfp_mask); 884 skb = pskb_copy(skb, gfp_mask);
892 else 885 else
@@ -1081,7 +1074,7 @@ static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int de
1081 * Remember, these are still headerless SKBs at this point. 1074 * Remember, these are still headerless SKBs at this point.
1082 */ 1075 */
1083int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 1076int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1084 unsigned int mss_now) 1077 unsigned int mss_now, gfp_t gfp)
1085{ 1078{
1086 struct tcp_sock *tp = tcp_sk(sk); 1079 struct tcp_sock *tp = tcp_sk(sk);
1087 struct sk_buff *buff; 1080 struct sk_buff *buff;
@@ -1096,11 +1089,11 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1096 if (nsize < 0) 1089 if (nsize < 0)
1097 nsize = 0; 1090 nsize = 0;
1098 1091
1099 if (skb_unclone(skb, GFP_ATOMIC)) 1092 if (skb_unclone(skb, gfp))
1100 return -ENOMEM; 1093 return -ENOMEM;
1101 1094
1102 /* Get a new skb... force flag on. */ 1095 /* Get a new skb... force flag on. */
1103 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 1096 buff = sk_stream_alloc_skb(sk, nsize, gfp);
1104 if (buff == NULL) 1097 if (buff == NULL)
1105 return -ENOMEM; /* We'll just try again later. */ 1098 return -ENOMEM; /* We'll just try again later. */
1106 1099
@@ -1387,12 +1380,43 @@ unsigned int tcp_current_mss(struct sock *sk)
1387 return mss_now; 1380 return mss_now;
1388} 1381}
1389 1382
1390/* Congestion window validation. (RFC2861) */ 1383/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
1391static void tcp_cwnd_validate(struct sock *sk) 1384 * As additional protections, we do not touch cwnd in retransmission phases,
1385 * and if application hit its sndbuf limit recently.
1386 */
1387static void tcp_cwnd_application_limited(struct sock *sk)
1388{
1389 struct tcp_sock *tp = tcp_sk(sk);
1390
1391 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
1392 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1393 /* Limited by application or receiver window. */
1394 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
1395 u32 win_used = max(tp->snd_cwnd_used, init_win);
1396 if (win_used < tp->snd_cwnd) {
1397 tp->snd_ssthresh = tcp_current_ssthresh(sk);
1398 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
1399 }
1400 tp->snd_cwnd_used = 0;
1401 }
1402 tp->snd_cwnd_stamp = tcp_time_stamp;
1403}
1404
1405static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1392{ 1406{
1393 struct tcp_sock *tp = tcp_sk(sk); 1407 struct tcp_sock *tp = tcp_sk(sk);
1394 1408
1395 if (tp->packets_out >= tp->snd_cwnd) { 1409 /* Track the maximum number of outstanding packets in each
1410 * window, and remember whether we were cwnd-limited then.
1411 */
1412 if (!before(tp->snd_una, tp->max_packets_seq) ||
1413 tp->packets_out > tp->max_packets_out) {
1414 tp->max_packets_out = tp->packets_out;
1415 tp->max_packets_seq = tp->snd_nxt;
1416 tp->is_cwnd_limited = is_cwnd_limited;
1417 }
1418
1419 if (tcp_is_cwnd_limited(sk)) {
1396 /* Network is feed fully. */ 1420 /* Network is feed fully. */
1397 tp->snd_cwnd_used = 0; 1421 tp->snd_cwnd_used = 0;
1398 tp->snd_cwnd_stamp = tcp_time_stamp; 1422 tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -1601,7 +1625,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1601 1625
1602 /* All of a TSO frame must be composed of paged data. */ 1626 /* All of a TSO frame must be composed of paged data. */
1603 if (skb->len != skb->data_len) 1627 if (skb->len != skb->data_len)
1604 return tcp_fragment(sk, skb, len, mss_now); 1628 return tcp_fragment(sk, skb, len, mss_now, gfp);
1605 1629
1606 buff = sk_stream_alloc_skb(sk, 0, gfp); 1630 buff = sk_stream_alloc_skb(sk, 0, gfp);
1607 if (unlikely(buff == NULL)) 1631 if (unlikely(buff == NULL))
@@ -1644,7 +1668,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1644 * 1668 *
1645 * This algorithm is from John Heffner. 1669 * This algorithm is from John Heffner.
1646 */ 1670 */
1647static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1671static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1672 bool *is_cwnd_limited)
1648{ 1673{
1649 struct tcp_sock *tp = tcp_sk(sk); 1674 struct tcp_sock *tp = tcp_sk(sk);
1650 const struct inet_connection_sock *icsk = inet_csk(sk); 1675 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1708,6 +1733,9 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1708 if (!tp->tso_deferred) 1733 if (!tp->tso_deferred)
1709 tp->tso_deferred = 1 | (jiffies << 1); 1734 tp->tso_deferred = 1 | (jiffies << 1);
1710 1735
1736 if (cong_win < send_win && cong_win < skb->len)
1737 *is_cwnd_limited = true;
1738
1711 return true; 1739 return true;
1712 1740
1713send_now: 1741send_now:
@@ -1868,6 +1896,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1868 unsigned int tso_segs, sent_pkts; 1896 unsigned int tso_segs, sent_pkts;
1869 int cwnd_quota; 1897 int cwnd_quota;
1870 int result; 1898 int result;
1899 bool is_cwnd_limited = false;
1871 1900
1872 sent_pkts = 0; 1901 sent_pkts = 0;
1873 1902
@@ -1892,6 +1921,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1892 1921
1893 cwnd_quota = tcp_cwnd_test(tp, skb); 1922 cwnd_quota = tcp_cwnd_test(tp, skb);
1894 if (!cwnd_quota) { 1923 if (!cwnd_quota) {
1924 is_cwnd_limited = true;
1895 if (push_one == 2) 1925 if (push_one == 2)
1896 /* Force out a loss probe pkt. */ 1926 /* Force out a loss probe pkt. */
1897 cwnd_quota = 1; 1927 cwnd_quota = 1;
@@ -1908,7 +1938,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1908 nonagle : TCP_NAGLE_PUSH)))) 1938 nonagle : TCP_NAGLE_PUSH))))
1909 break; 1939 break;
1910 } else { 1940 } else {
1911 if (!push_one && tcp_tso_should_defer(sk, skb)) 1941 if (!push_one &&
1942 tcp_tso_should_defer(sk, skb, &is_cwnd_limited))
1912 break; 1943 break;
1913 } 1944 }
1914 1945
@@ -1973,7 +2004,7 @@ repair:
1973 /* Send one loss probe per tail loss episode. */ 2004 /* Send one loss probe per tail loss episode. */
1974 if (push_one != 2) 2005 if (push_one != 2)
1975 tcp_schedule_loss_probe(sk); 2006 tcp_schedule_loss_probe(sk);
1976 tcp_cwnd_validate(sk); 2007 tcp_cwnd_validate(sk, is_cwnd_limited);
1977 return false; 2008 return false;
1978 } 2009 }
1979 return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); 2010 return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
@@ -2037,6 +2068,25 @@ bool tcp_schedule_loss_probe(struct sock *sk)
2037 return true; 2068 return true;
2038} 2069}
2039 2070
2071/* Thanks to skb fast clones, we can detect if a prior transmit of
2072 * a packet is still in a qdisc or driver queue.
2073 * In this case, there is very little point doing a retransmit !
2074 * Note: This is called from BH context only.
2075 */
2076static bool skb_still_in_host_queue(const struct sock *sk,
2077 const struct sk_buff *skb)
2078{
2079 const struct sk_buff *fclone = skb + 1;
2080
2081 if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
2082 fclone->fclone == SKB_FCLONE_CLONE)) {
2083 NET_INC_STATS_BH(sock_net(sk),
2084 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
2085 return true;
2086 }
2087 return false;
2088}
2089
2040/* When probe timeout (PTO) fires, send a new segment if one exists, else 2090/* When probe timeout (PTO) fires, send a new segment if one exists, else
2041 * retransmit the last segment. 2091 * retransmit the last segment.
2042 */ 2092 */
@@ -2062,12 +2112,16 @@ void tcp_send_loss_probe(struct sock *sk)
2062 if (WARN_ON(!skb)) 2112 if (WARN_ON(!skb))
2063 goto rearm_timer; 2113 goto rearm_timer;
2064 2114
2115 if (skb_still_in_host_queue(sk, skb))
2116 goto rearm_timer;
2117
2065 pcount = tcp_skb_pcount(skb); 2118 pcount = tcp_skb_pcount(skb);
2066 if (WARN_ON(!pcount)) 2119 if (WARN_ON(!pcount))
2067 goto rearm_timer; 2120 goto rearm_timer;
2068 2121
2069 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { 2122 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
2070 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss))) 2123 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss,
2124 GFP_ATOMIC)))
2071 goto rearm_timer; 2125 goto rearm_timer;
2072 skb = tcp_write_queue_tail(sk); 2126 skb = tcp_write_queue_tail(sk);
2073 } 2127 }
@@ -2075,9 +2129,7 @@ void tcp_send_loss_probe(struct sock *sk)
2075 if (WARN_ON(!skb || !tcp_skb_pcount(skb))) 2129 if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
2076 goto rearm_timer; 2130 goto rearm_timer;
2077 2131
2078 /* Probe with zero data doesn't trigger fast recovery. */ 2132 err = __tcp_retransmit_skb(sk, skb);
2079 if (skb->len > 0)
2080 err = __tcp_retransmit_skb(sk, skb);
2081 2133
2082 /* Record snd_nxt for loss detection. */ 2134 /* Record snd_nxt for loss detection. */
2083 if (likely(!err)) 2135 if (likely(!err))
@@ -2383,6 +2435,9 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2383 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 2435 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
2384 return -EAGAIN; 2436 return -EAGAIN;
2385 2437
2438 if (skb_still_in_host_queue(sk, skb))
2439 return -EBUSY;
2440
2386 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 2441 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
2387 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 2442 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
2388 BUG(); 2443 BUG();
@@ -2405,7 +2460,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2405 return -EAGAIN; 2460 return -EAGAIN;
2406 2461
2407 if (skb->len > cur_mss) { 2462 if (skb->len > cur_mss) {
2408 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 2463 if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC))
2409 return -ENOMEM; /* We'll try again later. */ 2464 return -ENOMEM; /* We'll try again later. */
2410 } else { 2465 } else {
2411 int oldpcount = tcp_skb_pcount(skb); 2466 int oldpcount = tcp_skb_pcount(skb);
@@ -2476,7 +2531,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2476 * see tcp_input.c tcp_sacktag_write_queue(). 2531 * see tcp_input.c tcp_sacktag_write_queue().
2477 */ 2532 */
2478 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 2533 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
2479 } else { 2534 } else if (err != -EBUSY) {
2480 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2535 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2481 } 2536 }
2482 return err; 2537 return err;
@@ -2754,27 +2809,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2754 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2809 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2755 mss = tp->rx_opt.user_mss; 2810 mss = tp->rx_opt.user_mss;
2756 2811
2757 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2758 __u8 rcv_wscale;
2759 /* Set this up on the first call only */
2760 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2761
2762 /* limit the window selection if the user enforce a smaller rx buffer */
2763 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2764 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
2765 req->window_clamp = tcp_full_space(sk);
2766
2767 /* tcp_full_space because it is guaranteed to be the first packet */
2768 tcp_select_initial_window(tcp_full_space(sk),
2769 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2770 &req->rcv_wnd,
2771 &req->window_clamp,
2772 ireq->wscale_ok,
2773 &rcv_wscale,
2774 dst_metric(dst, RTAX_INITRWND));
2775 ireq->rcv_wscale = rcv_wscale;
2776 }
2777
2778 memset(&opts, 0, sizeof(opts)); 2812 memset(&opts, 0, sizeof(opts));
2779#ifdef CONFIG_SYN_COOKIES 2813#ifdef CONFIG_SYN_COOKIES
2780 if (unlikely(req->cookie_ts)) 2814 if (unlikely(req->cookie_ts))
@@ -3207,7 +3241,7 @@ int tcp_write_wakeup(struct sock *sk)
3207 skb->len > mss) { 3241 skb->len > mss) {
3208 seg_size = min(seg_size, mss); 3242 seg_size = min(seg_size, mss);
3209 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3243 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3210 if (tcp_fragment(sk, skb, seg_size, mss)) 3244 if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC))
3211 return -1; 3245 return -1;
3212 } else if (!tcp_skb_pcount(skb)) 3246 } else if (!tcp_skb_pcount(skb))
3213 tcp_set_skb_tso_segs(sk, skb, mss); 3247 tcp_set_skb_tso_segs(sk, skb, mss);
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 0ac50836da4d..8250949b8853 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -15,12 +15,11 @@
15#define TCP_SCALABLE_AI_CNT 50U 15#define TCP_SCALABLE_AI_CNT 50U
16#define TCP_SCALABLE_MD_SCALE 3 16#define TCP_SCALABLE_MD_SCALE 3
17 17
18static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked, 18static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
19 u32 in_flight)
20{ 19{
21 struct tcp_sock *tp = tcp_sk(sk); 20 struct tcp_sock *tp = tcp_sk(sk);
22 21
23 if (!tcp_is_cwnd_limited(sk, in_flight)) 22 if (!tcp_is_cwnd_limited(sk))
24 return; 23 return;
25 24
26 if (tp->snd_cwnd <= tp->snd_ssthresh) 25 if (tp->snd_cwnd <= tp->snd_ssthresh)
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 48539fff6357..9a5e05f27f4f 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -163,14 +163,13 @@ static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
163 return min(tp->snd_ssthresh, tp->snd_cwnd-1); 163 return min(tp->snd_ssthresh, tp->snd_cwnd-1);
164} 164}
165 165
166static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked, 166static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
167 u32 in_flight)
168{ 167{
169 struct tcp_sock *tp = tcp_sk(sk); 168 struct tcp_sock *tp = tcp_sk(sk);
170 struct vegas *vegas = inet_csk_ca(sk); 169 struct vegas *vegas = inet_csk_ca(sk);
171 170
172 if (!vegas->doing_vegas_now) { 171 if (!vegas->doing_vegas_now) {
173 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 172 tcp_reno_cong_avoid(sk, ack, acked);
174 return; 173 return;
175 } 174 }
176 175
@@ -195,7 +194,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
195 /* We don't have enough RTT samples to do the Vegas 194 /* We don't have enough RTT samples to do the Vegas
196 * calculation, so we'll behave like Reno. 195 * calculation, so we'll behave like Reno.
197 */ 196 */
198 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 197 tcp_reno_cong_avoid(sk, ack, acked);
199 } else { 198 } else {
200 u32 rtt, diff; 199 u32 rtt, diff;
201 u64 target_cwnd; 200 u64 target_cwnd;
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 1b8e28fcd7e1..27b9825753d1 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -114,19 +114,18 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
114 tcp_veno_init(sk); 114 tcp_veno_init(sk);
115} 115}
116 116
117static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked, 117static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
118 u32 in_flight)
119{ 118{
120 struct tcp_sock *tp = tcp_sk(sk); 119 struct tcp_sock *tp = tcp_sk(sk);
121 struct veno *veno = inet_csk_ca(sk); 120 struct veno *veno = inet_csk_ca(sk);
122 121
123 if (!veno->doing_veno_now) { 122 if (!veno->doing_veno_now) {
124 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 123 tcp_reno_cong_avoid(sk, ack, acked);
125 return; 124 return;
126 } 125 }
127 126
128 /* limited by applications */ 127 /* limited by applications */
129 if (!tcp_is_cwnd_limited(sk, in_flight)) 128 if (!tcp_is_cwnd_limited(sk))
130 return; 129 return;
131 130
132 /* We do the Veno calculations only if we got enough rtt samples */ 131 /* We do the Veno calculations only if we got enough rtt samples */
@@ -134,7 +133,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
134 /* We don't have enough rtt samples to do the Veno 133 /* We don't have enough rtt samples to do the Veno
135 * calculation, so we'll behave like Reno. 134 * calculation, so we'll behave like Reno.
136 */ 135 */
137 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 136 tcp_reno_cong_avoid(sk, ack, acked);
138 } else { 137 } else {
139 u64 target_cwnd; 138 u64 target_cwnd;
140 u32 rtt; 139 u32 rtt;
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 5ede0e727945..599b79b8eac0 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -69,13 +69,12 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
69 tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us); 69 tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
70} 70}
71 71
72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked, 72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
73 u32 in_flight)
74{ 73{
75 struct tcp_sock *tp = tcp_sk(sk); 74 struct tcp_sock *tp = tcp_sk(sk);
76 struct yeah *yeah = inet_csk_ca(sk); 75 struct yeah *yeah = inet_csk_ca(sk);
77 76
78 if (!tcp_is_cwnd_limited(sk, in_flight)) 77 if (!tcp_is_cwnd_limited(sk))
79 return; 78 return;
80 79
81 if (tp->snd_cwnd <= tp->snd_ssthresh) 80 if (tp->snd_cwnd <= tp->snd_ssthresh)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4468e1adc094..185ed3e59802 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -246,7 +246,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
246 do { 246 do {
247 if (low <= snum && snum <= high && 247 if (low <= snum && snum <= high &&
248 !test_bit(snum >> udptable->log, bitmap) && 248 !test_bit(snum >> udptable->log, bitmap) &&
249 !inet_is_reserved_local_port(snum)) 249 !inet_is_local_reserved_port(net, snum))
250 goto found; 250 goto found;
251 snum += rand; 251 snum += rand;
252 } while (snum != first); 252 } while (snum != first);
@@ -727,13 +727,12 @@ EXPORT_SYMBOL(udp_flush_pending_frames);
727void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) 727void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
728{ 728{
729 struct udphdr *uh = udp_hdr(skb); 729 struct udphdr *uh = udp_hdr(skb);
730 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
731 int offset = skb_transport_offset(skb); 730 int offset = skb_transport_offset(skb);
732 int len = skb->len - offset; 731 int len = skb->len - offset;
733 int hlen = len; 732 int hlen = len;
734 __wsum csum = 0; 733 __wsum csum = 0;
735 734
736 if (!frags) { 735 if (!skb_has_frag_list(skb)) {
737 /* 736 /*
738 * Only one fragment on the socket. 737 * Only one fragment on the socket.
739 */ 738 */
@@ -742,15 +741,17 @@ void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
742 uh->check = ~csum_tcpudp_magic(src, dst, len, 741 uh->check = ~csum_tcpudp_magic(src, dst, len,
743 IPPROTO_UDP, 0); 742 IPPROTO_UDP, 0);
744 } else { 743 } else {
744 struct sk_buff *frags;
745
745 /* 746 /*
746 * HW-checksum won't work as there are two or more 747 * HW-checksum won't work as there are two or more
747 * fragments on the socket so that all csums of sk_buffs 748 * fragments on the socket so that all csums of sk_buffs
748 * should be together 749 * should be together
749 */ 750 */
750 do { 751 skb_walk_frags(skb, frags) {
751 csum = csum_add(csum, frags->csum); 752 csum = csum_add(csum, frags->csum);
752 hlen -= frags->len; 753 hlen -= frags->len;
753 } while ((frags = frags->next)); 754 }
754 755
755 csum = skb_checksum(skb, offset, hlen, csum); 756 csum = skb_checksum(skb, offset, hlen, csum);
756 skb->ip_summed = CHECKSUM_NONE; 757 skb->ip_summed = CHECKSUM_NONE;
@@ -762,6 +763,43 @@ void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
762} 763}
763EXPORT_SYMBOL_GPL(udp4_hwcsum); 764EXPORT_SYMBOL_GPL(udp4_hwcsum);
764 765
766/* Function to set UDP checksum for an IPv4 UDP packet. This is intended
767 * for the simple case like when setting the checksum for a UDP tunnel.
768 */
769void udp_set_csum(bool nocheck, struct sk_buff *skb,
770 __be32 saddr, __be32 daddr, int len)
771{
772 struct udphdr *uh = udp_hdr(skb);
773
774 if (nocheck)
775 uh->check = 0;
776 else if (skb_is_gso(skb))
777 uh->check = ~udp_v4_check(len, saddr, daddr, 0);
778 else if (skb_dst(skb) && skb_dst(skb)->dev &&
779 (skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
780
781 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
782
783 skb->ip_summed = CHECKSUM_PARTIAL;
784 skb->csum_start = skb_transport_header(skb) - skb->head;
785 skb->csum_offset = offsetof(struct udphdr, check);
786 uh->check = ~udp_v4_check(len, saddr, daddr, 0);
787 } else {
788 __wsum csum;
789
790 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
791
792 uh->check = 0;
793 csum = skb_checksum(skb, 0, len, 0);
794 uh->check = udp_v4_check(len, saddr, daddr, csum);
795 if (uh->check == 0)
796 uh->check = CSUM_MANGLED_0;
797
798 skb->ip_summed = CHECKSUM_UNNECESSARY;
799 }
800}
801EXPORT_SYMBOL(udp_set_csum);
802
765static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) 803static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
766{ 804{
767 struct sock *sk = skb->sk; 805 struct sock *sk = skb->sk;
@@ -785,7 +823,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
785 if (is_udplite) /* UDP-Lite */ 823 if (is_udplite) /* UDP-Lite */
786 csum = udplite_csum(skb); 824 csum = udplite_csum(skb);
787 825
788 else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ 826 else if (sk->sk_no_check_tx) { /* UDP csum disabled */
789 827
790 skb->ip_summed = CHECKSUM_NONE; 828 skb->ip_summed = CHECKSUM_NONE;
791 goto send; 829 goto send;
@@ -1495,6 +1533,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1495 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { 1533 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
1496 int ret; 1534 int ret;
1497 1535
1536 /* Verify checksum before giving to encap */
1537 if (udp_lib_checksum_complete(skb))
1538 goto csum_error;
1539
1498 ret = encap_rcv(sk, skb); 1540 ret = encap_rcv(sk, skb);
1499 if (ret <= 0) { 1541 if (ret <= 0) {
1500 UDP_INC_STATS_BH(sock_net(sk), 1542 UDP_INC_STATS_BH(sock_net(sk),
@@ -1672,7 +1714,6 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1672static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, 1714static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1673 int proto) 1715 int proto)
1674{ 1716{
1675 const struct iphdr *iph;
1676 int err; 1717 int err;
1677 1718
1678 UDP_SKB_CB(skb)->partial_cov = 0; 1719 UDP_SKB_CB(skb)->partial_cov = 0;
@@ -1684,22 +1725,8 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1684 return err; 1725 return err;
1685 } 1726 }
1686 1727
1687 iph = ip_hdr(skb); 1728 return skb_checksum_init_zero_check(skb, proto, uh->check,
1688 if (uh->check == 0) { 1729 inet_compute_pseudo);
1689 skb->ip_summed = CHECKSUM_UNNECESSARY;
1690 } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
1691 if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
1692 proto, skb->csum))
1693 skb->ip_summed = CHECKSUM_UNNECESSARY;
1694 }
1695 if (!skb_csum_unnecessary(skb))
1696 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1697 skb->len, proto, 0);
1698 /* Probably, we should checksum udp header (it should be in cache
1699 * in any case) and data in tiny packets (< rx copybreak).
1700 */
1701
1702 return 0;
1703} 1730}
1704 1731
1705/* 1732/*
@@ -1886,7 +1913,7 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
1886 unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum); 1913 unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
1887 unsigned int slot2 = hash2 & udp_table.mask; 1914 unsigned int slot2 = hash2 & udp_table.mask;
1888 struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; 1915 struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
1889 INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr) 1916 INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
1890 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); 1917 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
1891 1918
1892 rcu_read_lock(); 1919 rcu_read_lock();
@@ -1979,7 +2006,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1979 int (*push_pending_frames)(struct sock *)) 2006 int (*push_pending_frames)(struct sock *))
1980{ 2007{
1981 struct udp_sock *up = udp_sk(sk); 2008 struct udp_sock *up = udp_sk(sk);
1982 int val; 2009 int val, valbool;
1983 int err = 0; 2010 int err = 0;
1984 int is_udplite = IS_UDPLITE(sk); 2011 int is_udplite = IS_UDPLITE(sk);
1985 2012
@@ -1989,6 +2016,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1989 if (get_user(val, (int __user *)optval)) 2016 if (get_user(val, (int __user *)optval))
1990 return -EFAULT; 2017 return -EFAULT;
1991 2018
2019 valbool = val ? 1 : 0;
2020
1992 switch (optname) { 2021 switch (optname) {
1993 case UDP_CORK: 2022 case UDP_CORK:
1994 if (val != 0) { 2023 if (val != 0) {
@@ -2018,6 +2047,14 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
2018 } 2047 }
2019 break; 2048 break;
2020 2049
2050 case UDP_NO_CHECK6_TX:
2051 up->no_check6_tx = valbool;
2052 break;
2053
2054 case UDP_NO_CHECK6_RX:
2055 up->no_check6_rx = valbool;
2056 break;
2057
2021 /* 2058 /*
2022 * UDP-Lite's partial checksum coverage (RFC 3828). 2059 * UDP-Lite's partial checksum coverage (RFC 3828).
2023 */ 2060 */
@@ -2100,6 +2137,14 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
2100 val = up->encap_type; 2137 val = up->encap_type;
2101 break; 2138 break;
2102 2139
2140 case UDP_NO_CHECK6_TX:
2141 val = up->no_check6_tx;
2142 break;
2143
2144 case UDP_NO_CHECK6_RX:
2145 val = up->no_check6_rx;
2146 break;
2147
2103 /* The following two cannot be changed on UDP sockets, the return is 2148 /* The following two cannot be changed on UDP sockets, the return is
2104 * always 0 (which corresponds to the full checksum coverage of UDP). */ 2149 * always 0 (which corresponds to the full checksum coverage of UDP). */
2105 case UDPLITE_SEND_CSCOV: 2150 case UDPLITE_SEND_CSCOV:
@@ -2484,7 +2529,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2484 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); 2529 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
2485 __be16 protocol = skb->protocol; 2530 __be16 protocol = skb->protocol;
2486 netdev_features_t enc_features; 2531 netdev_features_t enc_features;
2487 int outer_hlen; 2532 int udp_offset, outer_hlen;
2533 unsigned int oldlen;
2534 bool need_csum;
2535
2536 oldlen = (u16)~skb->len;
2488 2537
2489 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) 2538 if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
2490 goto out; 2539 goto out;
@@ -2496,6 +2545,10 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2496 skb->mac_len = skb_inner_network_offset(skb); 2545 skb->mac_len = skb_inner_network_offset(skb);
2497 skb->protocol = htons(ETH_P_TEB); 2546 skb->protocol = htons(ETH_P_TEB);
2498 2547
2548 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
2549 if (need_csum)
2550 skb->encap_hdr_csum = 1;
2551
2499 /* segment inner packet. */ 2552 /* segment inner packet. */
2500 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 2553 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
2501 segs = skb_mac_gso_segment(skb, enc_features); 2554 segs = skb_mac_gso_segment(skb, enc_features);
@@ -2506,10 +2559,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2506 } 2559 }
2507 2560
2508 outer_hlen = skb_tnl_header_len(skb); 2561 outer_hlen = skb_tnl_header_len(skb);
2562 udp_offset = outer_hlen - tnl_hlen;
2509 skb = segs; 2563 skb = segs;
2510 do { 2564 do {
2511 struct udphdr *uh; 2565 struct udphdr *uh;
2512 int udp_offset = outer_hlen - tnl_hlen; 2566 int len;
2513 2567
2514 skb_reset_inner_headers(skb); 2568 skb_reset_inner_headers(skb);
2515 skb->encapsulation = 1; 2569 skb->encapsulation = 1;
@@ -2520,31 +2574,20 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2520 skb_reset_mac_header(skb); 2574 skb_reset_mac_header(skb);
2521 skb_set_network_header(skb, mac_len); 2575 skb_set_network_header(skb, mac_len);
2522 skb_set_transport_header(skb, udp_offset); 2576 skb_set_transport_header(skb, udp_offset);
2577 len = skb->len - udp_offset;
2523 uh = udp_hdr(skb); 2578 uh = udp_hdr(skb);
2524 uh->len = htons(skb->len - udp_offset); 2579 uh->len = htons(len);
2525
2526 /* csum segment if tunnel sets skb with csum. */
2527 if (protocol == htons(ETH_P_IP) && unlikely(uh->check)) {
2528 struct iphdr *iph = ip_hdr(skb);
2529 2580
2530 uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 2581 if (need_csum) {
2531 skb->len - udp_offset, 2582 __be32 delta = htonl(oldlen + len);
2532 IPPROTO_UDP, 0);
2533 uh->check = csum_fold(skb_checksum(skb, udp_offset,
2534 skb->len - udp_offset, 0));
2535 if (uh->check == 0)
2536 uh->check = CSUM_MANGLED_0;
2537 2583
2538 } else if (protocol == htons(ETH_P_IPV6)) { 2584 uh->check = ~csum_fold((__force __wsum)
2539 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 2585 ((__force u32)uh->check +
2540 u32 len = skb->len - udp_offset; 2586 (__force u32)delta));
2587 uh->check = gso_make_checksum(skb, ~uh->check);
2541 2588
2542 uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2543 len, IPPROTO_UDP, 0);
2544 uh->check = csum_fold(skb_checksum(skb, udp_offset, len, 0));
2545 if (uh->check == 0) 2589 if (uh->check == 0)
2546 uh->check = CSUM_MANGLED_0; 2590 uh->check = CSUM_MANGLED_0;
2547 skb->ip_summed = CHECKSUM_NONE;
2548 } 2591 }
2549 2592
2550 skb->protocol = protocol; 2593 skb->protocol = protocol;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 88b4023ecfcf..546d2d439dda 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -56,7 +56,8 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
56 __wsum csum; 56 __wsum csum;
57 57
58 if (skb->encapsulation && 58 if (skb->encapsulation &&
59 skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) { 59 (skb_shinfo(skb)->gso_type &
60 (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
60 segs = skb_udp_tunnel_segment(skb, features); 61 segs = skb_udp_tunnel_segment(skb, features);
61 goto out; 62 goto out;
62 } 63 }
@@ -71,8 +72,10 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
71 72
72 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | 73 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
73 SKB_GSO_UDP_TUNNEL | 74 SKB_GSO_UDP_TUNNEL |
75 SKB_GSO_UDP_TUNNEL_CSUM |
74 SKB_GSO_IPIP | 76 SKB_GSO_IPIP |
75 SKB_GSO_GRE | SKB_GSO_MPLS) || 77 SKB_GSO_GRE | SKB_GSO_GRE_CSUM |
78 SKB_GSO_MPLS) ||
76 !(type & (SKB_GSO_UDP)))) 79 !(type & (SKB_GSO_UDP))))
77 goto out; 80 goto out;
78 81
@@ -197,6 +200,7 @@ unflush:
197 } 200 }
198 201
199 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ 202 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
203 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
200 pp = uo_priv->offload->callbacks.gro_receive(head, skb); 204 pp = uo_priv->offload->callbacks.gro_receive(head, skb);
201 205
202out_unlock: 206out_unlock:
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 2c46acd4cc36..3b3efbda48e1 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -70,7 +70,6 @@ static struct inet_protosw udplite4_protosw = {
70 .protocol = IPPROTO_UDPLITE, 70 .protocol = IPPROTO_UDPLITE,
71 .prot = &udplite_prot, 71 .prot = &udplite_prot,
72 .ops = &inet_dgram_ops, 72 .ops = &inet_dgram_ops,
73 .no_check = 0, /* must checksum (RFC 3828) */
74 .flags = INET_PROTOSW_PERMANENT, 73 .flags = INET_PROTOSW_PERMANENT,
75}; 74};
76 75
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 05f2b484954f..91771a7c802f 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -58,12 +58,12 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
58 58
59 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ? 59 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
60 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF)); 60 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
61 ip_select_ident(skb, dst->child, NULL);
62 61
63 top_iph->ttl = ip4_dst_hoplimit(dst->child); 62 top_iph->ttl = ip4_dst_hoplimit(dst->child);
64 63
65 top_iph->saddr = x->props.saddr.a4; 64 top_iph->saddr = x->props.saddr.a4;
66 top_iph->daddr = x->id.daddr.a4; 65 top_iph->daddr = x->id.daddr.a4;
66 ip_select_ident(skb, NULL);
67 67
68 return 0; 68 return 0;
69} 69}
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 186a8ecf92fa..d5f6bd9a210a 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -25,7 +25,7 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
25 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) 25 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
26 goto out; 26 goto out;
27 27
28 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) 28 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
29 goto out; 29 goto out;
30 30
31 mtu = dst_mtu(skb_dst(skb)); 31 mtu = dst_mtu(skb_dst(skb));
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6c7fa0853fc7..5667b3003af9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -275,19 +275,14 @@ static int snmp6_alloc_dev(struct inet6_dev *idev)
275{ 275{
276 int i; 276 int i;
277 277
278 if (snmp_mib_init((void __percpu **)idev->stats.ipv6, 278 idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
279 sizeof(struct ipstats_mib), 279 if (!idev->stats.ipv6)
280 __alignof__(struct ipstats_mib)) < 0)
281 goto err_ip; 280 goto err_ip;
282 281
283 for_each_possible_cpu(i) { 282 for_each_possible_cpu(i) {
284 struct ipstats_mib *addrconf_stats; 283 struct ipstats_mib *addrconf_stats;
285 addrconf_stats = per_cpu_ptr(idev->stats.ipv6[0], i); 284 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
286 u64_stats_init(&addrconf_stats->syncp); 285 u64_stats_init(&addrconf_stats->syncp);
287#if SNMP_ARRAY_SZ == 2
288 addrconf_stats = per_cpu_ptr(idev->stats.ipv6[1], i);
289 u64_stats_init(&addrconf_stats->syncp);
290#endif
291 } 286 }
292 287
293 288
@@ -305,7 +300,7 @@ static int snmp6_alloc_dev(struct inet6_dev *idev)
305err_icmpmsg: 300err_icmpmsg:
306 kfree(idev->stats.icmpv6dev); 301 kfree(idev->stats.icmpv6dev);
307err_icmp: 302err_icmp:
308 snmp_mib_free((void __percpu **)idev->stats.ipv6); 303 free_percpu(idev->stats.ipv6);
309err_ip: 304err_ip:
310 return -ENOMEM; 305 return -ENOMEM;
311} 306}
@@ -2504,8 +2499,8 @@ static int inet6_addr_add(struct net *net, int ifindex,
2504 return PTR_ERR(ifp); 2499 return PTR_ERR(ifp);
2505} 2500}
2506 2501
2507static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *pfx, 2502static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
2508 unsigned int plen) 2503 const struct in6_addr *pfx, unsigned int plen)
2509{ 2504{
2510 struct inet6_ifaddr *ifp; 2505 struct inet6_ifaddr *ifp;
2511 struct inet6_dev *idev; 2506 struct inet6_dev *idev;
@@ -2528,7 +2523,12 @@ static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *p
2528 in6_ifa_hold(ifp); 2523 in6_ifa_hold(ifp);
2529 read_unlock_bh(&idev->lock); 2524 read_unlock_bh(&idev->lock);
2530 2525
2526 if (!(ifp->flags & IFA_F_TEMPORARY) &&
2527 (ifa_flags & IFA_F_MANAGETEMPADDR))
2528 manage_tempaddrs(idev, ifp, 0, 0, false,
2529 jiffies);
2531 ipv6_del_addr(ifp); 2530 ipv6_del_addr(ifp);
2531 addrconf_verify_rtnl();
2532 return 0; 2532 return 0;
2533 } 2533 }
2534 } 2534 }
@@ -2568,7 +2568,7 @@ int addrconf_del_ifaddr(struct net *net, void __user *arg)
2568 return -EFAULT; 2568 return -EFAULT;
2569 2569
2570 rtnl_lock(); 2570 rtnl_lock();
2571 err = inet6_addr_del(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, 2571 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
2572 ireq.ifr6_prefixlen); 2572 ireq.ifr6_prefixlen);
2573 rtnl_unlock(); 2573 rtnl_unlock();
2574 return err; 2574 return err;
@@ -2813,18 +2813,6 @@ static void addrconf_gre_config(struct net_device *dev)
2813} 2813}
2814#endif 2814#endif
2815 2815
2816static inline int
2817ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
2818{
2819 struct in6_addr lladdr;
2820
2821 if (!ipv6_get_lladdr(link_dev, &lladdr, IFA_F_TENTATIVE)) {
2822 addrconf_add_linklocal(idev, &lladdr);
2823 return 0;
2824 }
2825 return -1;
2826}
2827
2828static int addrconf_notify(struct notifier_block *this, unsigned long event, 2816static int addrconf_notify(struct notifier_block *this, unsigned long event,
2829 void *ptr) 2817 void *ptr)
2830{ 2818{
@@ -3743,6 +3731,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
3743 struct ifaddrmsg *ifm; 3731 struct ifaddrmsg *ifm;
3744 struct nlattr *tb[IFA_MAX+1]; 3732 struct nlattr *tb[IFA_MAX+1];
3745 struct in6_addr *pfx, *peer_pfx; 3733 struct in6_addr *pfx, *peer_pfx;
3734 u32 ifa_flags;
3746 int err; 3735 int err;
3747 3736
3748 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); 3737 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
@@ -3754,7 +3743,13 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
3754 if (pfx == NULL) 3743 if (pfx == NULL)
3755 return -EINVAL; 3744 return -EINVAL;
3756 3745
3757 return inet6_addr_del(net, ifm->ifa_index, pfx, ifm->ifa_prefixlen); 3746 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
3747
3748 /* We ignore other flags so far. */
3749 ifa_flags &= IFA_F_MANAGETEMPADDR;
3750
3751 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
3752 ifm->ifa_prefixlen);
3758} 3753}
3759 3754
3760static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags, 3755static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
@@ -4363,7 +4358,7 @@ static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
4363 memset(&stats[items], 0, pad); 4358 memset(&stats[items], 0, pad);
4364} 4359}
4365 4360
4366static inline void __snmp6_fill_stats64(u64 *stats, void __percpu **mib, 4361static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
4367 int items, int bytes, size_t syncpoff) 4362 int items, int bytes, size_t syncpoff)
4368{ 4363{
4369 int i; 4364 int i;
@@ -4383,7 +4378,7 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
4383{ 4378{
4384 switch (attrtype) { 4379 switch (attrtype) {
4385 case IFLA_INET6_STATS: 4380 case IFLA_INET6_STATS:
4386 __snmp6_fill_stats64(stats, (void __percpu **)idev->stats.ipv6, 4381 __snmp6_fill_stats64(stats, idev->stats.ipv6,
4387 IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp)); 4382 IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
4388 break; 4383 break;
4389 case IFLA_INET6_ICMP6STATS: 4384 case IFLA_INET6_ICMP6STATS:
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 4c11cbcf8308..e6960457f625 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -123,7 +123,7 @@ static void snmp6_free_dev(struct inet6_dev *idev)
123{ 123{
124 kfree(idev->stats.icmpv6msgdev); 124 kfree(idev->stats.icmpv6msgdev);
125 kfree(idev->stats.icmpv6dev); 125 kfree(idev->stats.icmpv6dev);
126 snmp_mib_free((void __percpu **)idev->stats.ipv6); 126 free_percpu(idev->stats.ipv6);
127} 127}
128 128
129/* Nobody refers to this device, we may destroy it. */ 129/* Nobody refers to this device, we may destroy it. */
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index d935889f1008..7cb4392690dd 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -106,7 +106,6 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
106 struct inet_protosw *answer; 106 struct inet_protosw *answer;
107 struct proto *answer_prot; 107 struct proto *answer_prot;
108 unsigned char answer_flags; 108 unsigned char answer_flags;
109 char answer_no_check;
110 int try_loading_module = 0; 109 int try_loading_module = 0;
111 int err; 110 int err;
112 111
@@ -162,7 +161,6 @@ lookup_protocol:
162 161
163 sock->ops = answer->ops; 162 sock->ops = answer->ops;
164 answer_prot = answer->prot; 163 answer_prot = answer->prot;
165 answer_no_check = answer->no_check;
166 answer_flags = answer->flags; 164 answer_flags = answer->flags;
167 rcu_read_unlock(); 165 rcu_read_unlock();
168 166
@@ -176,7 +174,6 @@ lookup_protocol:
176 sock_init_data(sock, sk); 174 sock_init_data(sock, sk);
177 175
178 err = 0; 176 err = 0;
179 sk->sk_no_check = answer_no_check;
180 if (INET_PROTOSW_REUSE & answer_flags) 177 if (INET_PROTOSW_REUSE & answer_flags)
181 sk->sk_reuse = SK_CAN_REUSE; 178 sk->sk_reuse = SK_CAN_REUSE;
182 179
@@ -715,33 +712,25 @@ static int __net_init ipv6_init_mibs(struct net *net)
715{ 712{
716 int i; 713 int i;
717 714
718 if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6, 715 net->mib.udp_stats_in6 = alloc_percpu(struct udp_mib);
719 sizeof(struct udp_mib), 716 if (!net->mib.udp_stats_in6)
720 __alignof__(struct udp_mib)) < 0)
721 return -ENOMEM; 717 return -ENOMEM;
722 if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6, 718 net->mib.udplite_stats_in6 = alloc_percpu(struct udp_mib);
723 sizeof(struct udp_mib), 719 if (!net->mib.udplite_stats_in6)
724 __alignof__(struct udp_mib)) < 0)
725 goto err_udplite_mib; 720 goto err_udplite_mib;
726 if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics, 721 net->mib.ipv6_statistics = alloc_percpu(struct ipstats_mib);
727 sizeof(struct ipstats_mib), 722 if (!net->mib.ipv6_statistics)
728 __alignof__(struct ipstats_mib)) < 0)
729 goto err_ip_mib; 723 goto err_ip_mib;
730 724
731 for_each_possible_cpu(i) { 725 for_each_possible_cpu(i) {
732 struct ipstats_mib *af_inet6_stats; 726 struct ipstats_mib *af_inet6_stats;
733 af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[0], i); 727 af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics, i);
734 u64_stats_init(&af_inet6_stats->syncp); 728 u64_stats_init(&af_inet6_stats->syncp);
735#if SNMP_ARRAY_SZ == 2
736 af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[1], i);
737 u64_stats_init(&af_inet6_stats->syncp);
738#endif
739 } 729 }
740 730
741 731
742 if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics, 732 net->mib.icmpv6_statistics = alloc_percpu(struct icmpv6_mib);
743 sizeof(struct icmpv6_mib), 733 if (!net->mib.icmpv6_statistics)
744 __alignof__(struct icmpv6_mib)) < 0)
745 goto err_icmp_mib; 734 goto err_icmp_mib;
746 net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib), 735 net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib),
747 GFP_KERNEL); 736 GFP_KERNEL);
@@ -750,22 +739,22 @@ static int __net_init ipv6_init_mibs(struct net *net)
750 return 0; 739 return 0;
751 740
752err_icmpmsg_mib: 741err_icmpmsg_mib:
753 snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics); 742 free_percpu(net->mib.icmpv6_statistics);
754err_icmp_mib: 743err_icmp_mib:
755 snmp_mib_free((void __percpu **)net->mib.ipv6_statistics); 744 free_percpu(net->mib.ipv6_statistics);
756err_ip_mib: 745err_ip_mib:
757 snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6); 746 free_percpu(net->mib.udplite_stats_in6);
758err_udplite_mib: 747err_udplite_mib:
759 snmp_mib_free((void __percpu **)net->mib.udp_stats_in6); 748 free_percpu(net->mib.udp_stats_in6);
760 return -ENOMEM; 749 return -ENOMEM;
761} 750}
762 751
763static void ipv6_cleanup_mibs(struct net *net) 752static void ipv6_cleanup_mibs(struct net *net)
764{ 753{
765 snmp_mib_free((void __percpu **)net->mib.udp_stats_in6); 754 free_percpu(net->mib.udp_stats_in6);
766 snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6); 755 free_percpu(net->mib.udplite_stats_in6);
767 snmp_mib_free((void __percpu **)net->mib.ipv6_statistics); 756 free_percpu(net->mib.ipv6_statistics);
768 snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics); 757 free_percpu(net->mib.icmpv6_statistics);
769 kfree(net->mib.icmpv6msg_statistics); 758 kfree(net->mib.icmpv6msg_statistics);
770} 759}
771 760
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 7b326529e6a2..f6c84a6eb238 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -400,6 +400,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
400 int len; 400 int len;
401 int hlimit; 401 int hlimit;
402 int err = 0; 402 int err = 0;
403 u32 mark = IP6_REPLY_MARK(net, skb->mark);
403 404
404 if ((u8 *)hdr < skb->head || 405 if ((u8 *)hdr < skb->head ||
405 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb)) 406 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
@@ -466,6 +467,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
466 fl6.daddr = hdr->saddr; 467 fl6.daddr = hdr->saddr;
467 if (saddr) 468 if (saddr)
468 fl6.saddr = *saddr; 469 fl6.saddr = *saddr;
470 fl6.flowi6_mark = mark;
469 fl6.flowi6_oif = iif; 471 fl6.flowi6_oif = iif;
470 fl6.fl6_icmp_type = type; 472 fl6.fl6_icmp_type = type;
471 fl6.fl6_icmp_code = code; 473 fl6.fl6_icmp_code = code;
@@ -474,6 +476,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
474 sk = icmpv6_xmit_lock(net); 476 sk = icmpv6_xmit_lock(net);
475 if (sk == NULL) 477 if (sk == NULL)
476 return; 478 return;
479 sk->sk_mark = mark;
477 np = inet6_sk(sk); 480 np = inet6_sk(sk);
478 481
479 if (!icmpv6_xrlim_allow(sk, type, &fl6)) 482 if (!icmpv6_xrlim_allow(sk, type, &fl6))
@@ -493,12 +496,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
493 if (IS_ERR(dst)) 496 if (IS_ERR(dst))
494 goto out; 497 goto out;
495 498
496 if (ipv6_addr_is_multicast(&fl6.daddr)) 499 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
497 hlimit = np->mcast_hops;
498 else
499 hlimit = np->hop_limit;
500 if (hlimit < 0)
501 hlimit = ip6_dst_hoplimit(dst);
502 500
503 msg.skb = skb; 501 msg.skb = skb;
504 msg.offset = skb_network_offset(skb); 502 msg.offset = skb_network_offset(skb);
@@ -556,6 +554,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
556 int err = 0; 554 int err = 0;
557 int hlimit; 555 int hlimit;
558 u8 tclass; 556 u8 tclass;
557 u32 mark = IP6_REPLY_MARK(net, skb->mark);
559 558
560 saddr = &ipv6_hdr(skb)->daddr; 559 saddr = &ipv6_hdr(skb)->daddr;
561 560
@@ -574,11 +573,13 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
574 fl6.saddr = *saddr; 573 fl6.saddr = *saddr;
575 fl6.flowi6_oif = skb->dev->ifindex; 574 fl6.flowi6_oif = skb->dev->ifindex;
576 fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; 575 fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
576 fl6.flowi6_mark = mark;
577 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 577 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
578 578
579 sk = icmpv6_xmit_lock(net); 579 sk = icmpv6_xmit_lock(net);
580 if (sk == NULL) 580 if (sk == NULL)
581 return; 581 return;
582 sk->sk_mark = mark;
582 np = inet6_sk(sk); 583 np = inet6_sk(sk);
583 584
584 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) 585 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
@@ -593,12 +594,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
593 if (IS_ERR(dst)) 594 if (IS_ERR(dst))
594 goto out; 595 goto out;
595 596
596 if (ipv6_addr_is_multicast(&fl6.daddr)) 597 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
597 hlimit = np->mcast_hops;
598 else
599 hlimit = np->hop_limit;
600 if (hlimit < 0)
601 hlimit = ip6_dst_hoplimit(dst);
602 598
603 idev = __in6_dev_get(skb->dev); 599 idev = __in6_dev_get(skb->dev);
604 600
@@ -702,22 +698,11 @@ static int icmpv6_rcv(struct sk_buff *skb)
702 saddr = &ipv6_hdr(skb)->saddr; 698 saddr = &ipv6_hdr(skb)->saddr;
703 daddr = &ipv6_hdr(skb)->daddr; 699 daddr = &ipv6_hdr(skb)->daddr;
704 700
705 /* Perform checksum. */ 701 if (skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo)) {
706 switch (skb->ip_summed) { 702 LIMIT_NETDEBUG(KERN_DEBUG
707 case CHECKSUM_COMPLETE: 703 "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
708 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6, 704 saddr, daddr);
709 skb->csum)) 705 goto csum_error;
710 break;
711 /* fall through */
712 case CHECKSUM_NONE:
713 skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
714 IPPROTO_ICMPV6, 0));
715 if (__skb_checksum_complete(skb)) {
716 LIMIT_NETDEBUG(KERN_DEBUG
717 "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
718 saddr, daddr);
719 goto csum_error;
720 }
721 } 706 }
722 707
723 if (!pskb_pull(skb, sizeof(*hdr))) 708 if (!pskb_pull(skb, sizeof(*hdr)))
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index d4ade34ab375..a245e5ddffbd 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -81,7 +81,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
81 final_p = fl6_update_dst(fl6, np->opt, &final); 81 final_p = fl6_update_dst(fl6, np->opt, &final);
82 fl6->saddr = ireq->ir_v6_loc_addr; 82 fl6->saddr = ireq->ir_v6_loc_addr;
83 fl6->flowi6_oif = ireq->ir_iif; 83 fl6->flowi6_oif = ireq->ir_iif;
84 fl6->flowi6_mark = sk->sk_mark; 84 fl6->flowi6_mark = ireq->ir_mark;
85 fl6->fl6_dport = ireq->ir_rmt_port; 85 fl6->fl6_dport = ireq->ir_rmt_port;
86 fl6->fl6_sport = htons(ireq->ir_num); 86 fl6->fl6_sport = htons(ireq->ir_num);
87 security_req_classify_flow(req, flowi6_to_flowi(fl6)); 87 security_req_classify_flow(req, flowi6_to_flowi(fl6));
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
index ee7a97f510cb..9a4d7322fb22 100644
--- a/net/ipv6/ip6_checksum.c
+++ b/net/ipv6/ip6_checksum.c
@@ -75,25 +75,50 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
75 return err; 75 return err;
76 } 76 }
77 77
78 if (uh->check == 0) { 78 /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
79 /* RFC 2460 section 8.1 says that we SHOULD log 79 * we accept a checksum of zero here. When we find the socket
80 this error. Well, it is reasonable. 80 * for the UDP packet we'll check if that socket allows zero checksum
81 */ 81 * for IPv6 (set by socket option).
82 LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 82 */
83 &ipv6_hdr(skb)->saddr, ntohs(uh->source), 83 return skb_checksum_init_zero_check(skb, proto, uh->check,
84 &ipv6_hdr(skb)->daddr, ntohs(uh->dest)); 84 ip6_compute_pseudo);
85 return 1; 85}
86 } 86EXPORT_SYMBOL(udp6_csum_init);
87 if (skb->ip_summed == CHECKSUM_COMPLETE && 87
88 !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 88/* Function to set UDP checksum for an IPv6 UDP packet. This is intended
89 skb->len, proto, skb->csum)) 89 * for the simple case like when setting the checksum for a UDP tunnel.
90 skb->ip_summed = CHECKSUM_UNNECESSARY; 90 */
91void udp6_set_csum(bool nocheck, struct sk_buff *skb,
92 const struct in6_addr *saddr,
93 const struct in6_addr *daddr, int len)
94{
95 struct udphdr *uh = udp_hdr(skb);
96
97 if (nocheck)
98 uh->check = 0;
99 else if (skb_is_gso(skb))
100 uh->check = ~udp_v6_check(len, saddr, daddr, 0);
101 else if (skb_dst(skb) && skb_dst(skb)->dev &&
102 (skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
91 103
92 if (!skb_csum_unnecessary(skb)) 104 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
93 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
94 &ipv6_hdr(skb)->daddr,
95 skb->len, proto, 0));
96 105
97 return 0; 106 skb->ip_summed = CHECKSUM_PARTIAL;
107 skb->csum_start = skb_transport_header(skb) - skb->head;
108 skb->csum_offset = offsetof(struct udphdr, check);
109 uh->check = ~udp_v6_check(len, saddr, daddr, 0);
110 } else {
111 __wsum csum;
112
113 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
114
115 uh->check = 0;
116 csum = skb_checksum(skb, 0, len, 0);
117 uh->check = udp_v6_check(len, saddr, daddr, csum);
118 if (uh->check == 0)
119 uh->check = CSUM_MANGLED_0;
120
121 skb->ip_summed = CHECKSUM_UNNECESSARY;
122 }
98} 123}
99EXPORT_SYMBOL(udp6_csum_init); 124EXPORT_SYMBOL(udp6_set_csum);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 87891f5f57b5..cb4459bd1d29 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -71,8 +71,7 @@ static DEFINE_RWLOCK(fib6_walker_lock);
71#define FWS_INIT FWS_L 71#define FWS_INIT FWS_L
72#endif 72#endif
73 73
74static void fib6_prune_clones(struct net *net, struct fib6_node *fn, 74static void fib6_prune_clones(struct net *net, struct fib6_node *fn);
75 struct rt6_info *rt);
76static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn); 75static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn);
77static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn); 76static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn);
78static int fib6_walk(struct fib6_walker_t *w); 77static int fib6_walk(struct fib6_walker_t *w);
@@ -941,7 +940,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
941 if (!err) { 940 if (!err) {
942 fib6_start_gc(info->nl_net, rt); 941 fib6_start_gc(info->nl_net, rt);
943 if (!(rt->rt6i_flags & RTF_CACHE)) 942 if (!(rt->rt6i_flags & RTF_CACHE))
944 fib6_prune_clones(info->nl_net, pn, rt); 943 fib6_prune_clones(info->nl_net, pn);
945 } 944 }
946 945
947out: 946out:
@@ -1375,7 +1374,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
1375 pn = pn->parent; 1374 pn = pn->parent;
1376 } 1375 }
1377#endif 1376#endif
1378 fib6_prune_clones(info->nl_net, pn, rt); 1377 fib6_prune_clones(info->nl_net, pn);
1379 } 1378 }
1380 1379
1381 /* 1380 /*
@@ -1601,10 +1600,9 @@ static int fib6_prune_clone(struct rt6_info *rt, void *arg)
1601 return 0; 1600 return 0;
1602} 1601}
1603 1602
1604static void fib6_prune_clones(struct net *net, struct fib6_node *fn, 1603static void fib6_prune_clones(struct net *net, struct fib6_node *fn)
1605 struct rt6_info *rt)
1606{ 1604{
1607 fib6_clean_tree(net, fn, fib6_prune_clone, 1, rt); 1605 fib6_clean_tree(net, fn, fib6_prune_clone, 1, NULL);
1608} 1606}
1609 1607
1610/* 1608/*
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 0961b5ef866d..4052694c6f2c 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -26,7 +26,6 @@
26#include <net/sock.h> 26#include <net/sock.h>
27 27
28#include <net/ipv6.h> 28#include <net/ipv6.h>
29#include <net/addrconf.h>
30#include <net/rawv6.h> 29#include <net/rawv6.h>
31#include <net/transp_v6.h> 30#include <net/transp_v6.h>
32 31
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 9d921462b57f..3873181ed856 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -72,6 +72,7 @@ struct ip6gre_net {
72}; 72};
73 73
74static struct rtnl_link_ops ip6gre_link_ops __read_mostly; 74static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
75static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
75static int ip6gre_tunnel_init(struct net_device *dev); 76static int ip6gre_tunnel_init(struct net_device *dev);
76static void ip6gre_tunnel_setup(struct net_device *dev); 77static void ip6gre_tunnel_setup(struct net_device *dev);
77static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t); 78static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
@@ -353,10 +354,10 @@ failed_free:
353 354
354static void ip6gre_tunnel_uninit(struct net_device *dev) 355static void ip6gre_tunnel_uninit(struct net_device *dev)
355{ 356{
356 struct net *net = dev_net(dev); 357 struct ip6_tnl *t = netdev_priv(dev);
357 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 358 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
358 359
359 ip6gre_tunnel_unlink(ign, netdev_priv(dev)); 360 ip6gre_tunnel_unlink(ign, t);
360 dev_put(dev); 361 dev_put(dev);
361} 362}
362 363
@@ -467,17 +468,7 @@ static int ip6gre_rcv(struct sk_buff *skb)
467 goto drop; 468 goto drop;
468 469
469 if (flags&GRE_CSUM) { 470 if (flags&GRE_CSUM) {
470 switch (skb->ip_summed) { 471 csum = skb_checksum_simple_validate(skb);
471 case CHECKSUM_COMPLETE:
472 csum = csum_fold(skb->csum);
473 if (!csum)
474 break;
475 /* fall through */
476 case CHECKSUM_NONE:
477 skb->csum = 0;
478 csum = __skb_checksum_complete(skb);
479 skb->ip_summed = CHECKSUM_COMPLETE;
480 }
481 offset += 4; 472 offset += 4;
482 } 473 }
483 if (flags&GRE_KEY) { 474 if (flags&GRE_KEY) {
@@ -611,8 +602,8 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
611 int encap_limit, 602 int encap_limit,
612 __u32 *pmtu) 603 __u32 *pmtu)
613{ 604{
614 struct net *net = dev_net(dev);
615 struct ip6_tnl *tunnel = netdev_priv(dev); 605 struct ip6_tnl *tunnel = netdev_priv(dev);
606 struct net *net = tunnel->net;
616 struct net_device *tdev; /* Device to other host */ 607 struct net_device *tdev; /* Device to other host */
617 struct ipv6hdr *ipv6h; /* Our new IP header */ 608 struct ipv6hdr *ipv6h; /* Our new IP header */
618 unsigned int max_headroom = 0; /* The extra header space needed */ 609 unsigned int max_headroom = 0; /* The extra header space needed */
@@ -979,7 +970,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
979 int strict = (ipv6_addr_type(&p->raddr) & 970 int strict = (ipv6_addr_type(&p->raddr) &
980 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 971 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
981 972
982 struct rt6_info *rt = rt6_lookup(dev_net(dev), 973 struct rt6_info *rt = rt6_lookup(t->net,
983 &p->raddr, &p->laddr, 974 &p->raddr, &p->laddr,
984 p->link, strict); 975 p->link, strict);
985 976
@@ -1063,13 +1054,12 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
1063 int err = 0; 1054 int err = 0;
1064 struct ip6_tnl_parm2 p; 1055 struct ip6_tnl_parm2 p;
1065 struct __ip6_tnl_parm p1; 1056 struct __ip6_tnl_parm p1;
1066 struct ip6_tnl *t; 1057 struct ip6_tnl *t = netdev_priv(dev);
1067 struct net *net = dev_net(dev); 1058 struct net *net = t->net;
1068 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1059 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1069 1060
1070 switch (cmd) { 1061 switch (cmd) {
1071 case SIOCGETTUNNEL: 1062 case SIOCGETTUNNEL:
1072 t = NULL;
1073 if (dev == ign->fb_tunnel_dev) { 1063 if (dev == ign->fb_tunnel_dev) {
1074 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 1064 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1075 err = -EFAULT; 1065 err = -EFAULT;
@@ -1077,9 +1067,9 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
1077 } 1067 }
1078 ip6gre_tnl_parm_from_user(&p1, &p); 1068 ip6gre_tnl_parm_from_user(&p1, &p);
1079 t = ip6gre_tunnel_locate(net, &p1, 0); 1069 t = ip6gre_tunnel_locate(net, &p1, 0);
1070 if (t == NULL)
1071 t = netdev_priv(dev);
1080 } 1072 }
1081 if (t == NULL)
1082 t = netdev_priv(dev);
1083 memset(&p, 0, sizeof(p)); 1073 memset(&p, 0, sizeof(p));
1084 ip6gre_tnl_parm_to_user(&p, &t->parms); 1074 ip6gre_tnl_parm_to_user(&p, &t->parms);
1085 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1075 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
@@ -1242,7 +1232,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
1242 dev->flags |= IFF_NOARP; 1232 dev->flags |= IFF_NOARP;
1243 dev->iflink = 0; 1233 dev->iflink = 0;
1244 dev->addr_len = sizeof(struct in6_addr); 1234 dev->addr_len = sizeof(struct in6_addr);
1245 dev->features |= NETIF_F_NETNS_LOCAL;
1246 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1235 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1247} 1236}
1248 1237
@@ -1297,11 +1286,17 @@ static struct inet6_protocol ip6gre_protocol __read_mostly = {
1297 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1286 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1298}; 1287};
1299 1288
1300static void ip6gre_destroy_tunnels(struct ip6gre_net *ign, 1289static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
1301 struct list_head *head)
1302{ 1290{
1291 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1292 struct net_device *dev, *aux;
1303 int prio; 1293 int prio;
1304 1294
1295 for_each_netdev_safe(net, dev, aux)
1296 if (dev->rtnl_link_ops == &ip6gre_link_ops ||
1297 dev->rtnl_link_ops == &ip6gre_tap_ops)
1298 unregister_netdevice_queue(dev, head);
1299
1305 for (prio = 0; prio < 4; prio++) { 1300 for (prio = 0; prio < 4; prio++) {
1306 int h; 1301 int h;
1307 for (h = 0; h < HASH_SIZE; h++) { 1302 for (h = 0; h < HASH_SIZE; h++) {
@@ -1310,7 +1305,12 @@ static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
1310 t = rtnl_dereference(ign->tunnels[prio][h]); 1305 t = rtnl_dereference(ign->tunnels[prio][h]);
1311 1306
1312 while (t != NULL) { 1307 while (t != NULL) {
1313 unregister_netdevice_queue(t->dev, head); 1308 /* If dev is in the same netns, it has already
1309 * been added to the list by the previous loop.
1310 */
1311 if (!net_eq(dev_net(t->dev), net))
1312 unregister_netdevice_queue(t->dev,
1313 head);
1314 t = rtnl_dereference(t->next); 1314 t = rtnl_dereference(t->next);
1315 } 1315 }
1316 } 1316 }
@@ -1329,6 +1329,11 @@ static int __net_init ip6gre_init_net(struct net *net)
1329 goto err_alloc_dev; 1329 goto err_alloc_dev;
1330 } 1330 }
1331 dev_net_set(ign->fb_tunnel_dev, net); 1331 dev_net_set(ign->fb_tunnel_dev, net);
1332 /* FB netdevice is special: we have one, and only one per netns.
1333 * Allowing to move it to another netns is clearly unsafe.
1334 */
1335 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1336
1332 1337
1333 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev); 1338 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1334 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops; 1339 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
@@ -1349,12 +1354,10 @@ err_alloc_dev:
1349 1354
1350static void __net_exit ip6gre_exit_net(struct net *net) 1355static void __net_exit ip6gre_exit_net(struct net *net)
1351{ 1356{
1352 struct ip6gre_net *ign;
1353 LIST_HEAD(list); 1357 LIST_HEAD(list);
1354 1358
1355 ign = net_generic(net, ip6gre_net_id);
1356 rtnl_lock(); 1359 rtnl_lock();
1357 ip6gre_destroy_tunnels(ign, &list); 1360 ip6gre_destroy_tunnels(net, &list);
1358 unregister_netdevice_many(&list); 1361 unregister_netdevice_many(&list);
1359 rtnl_unlock(); 1362 rtnl_unlock();
1360} 1363}
@@ -1531,15 +1534,14 @@ out:
1531static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], 1534static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
1532 struct nlattr *data[]) 1535 struct nlattr *data[])
1533{ 1536{
1534 struct ip6_tnl *t, *nt; 1537 struct ip6_tnl *t, *nt = netdev_priv(dev);
1535 struct net *net = dev_net(dev); 1538 struct net *net = nt->net;
1536 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1539 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1537 struct __ip6_tnl_parm p; 1540 struct __ip6_tnl_parm p;
1538 1541
1539 if (dev == ign->fb_tunnel_dev) 1542 if (dev == ign->fb_tunnel_dev)
1540 return -EINVAL; 1543 return -EINVAL;
1541 1544
1542 nt = netdev_priv(dev);
1543 ip6gre_netlink_parms(data, &p); 1545 ip6gre_netlink_parms(data, &p);
1544 1546
1545 t = ip6gre_tunnel_locate(net, &p, 0); 1547 t = ip6gre_tunnel_locate(net, &p, 0);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index b2f091566f88..65eda2a8af48 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -97,9 +97,11 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
97 SKB_GSO_DODGY | 97 SKB_GSO_DODGY |
98 SKB_GSO_TCP_ECN | 98 SKB_GSO_TCP_ECN |
99 SKB_GSO_GRE | 99 SKB_GSO_GRE |
100 SKB_GSO_GRE_CSUM |
100 SKB_GSO_IPIP | 101 SKB_GSO_IPIP |
101 SKB_GSO_SIT | 102 SKB_GSO_SIT |
102 SKB_GSO_UDP_TUNNEL | 103 SKB_GSO_UDP_TUNNEL |
104 SKB_GSO_UDP_TUNNEL_CSUM |
103 SKB_GSO_MPLS | 105 SKB_GSO_MPLS |
104 SKB_GSO_TCPV6 | 106 SKB_GSO_TCPV6 |
105 0))) 107 0)))
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index fbf11562b54c..cb9df0eb4023 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -219,7 +219,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
219 skb->mark = sk->sk_mark; 219 skb->mark = sk->sk_mark;
220 220
221 mtu = dst_mtu(dst); 221 mtu = dst_mtu(dst);
222 if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { 222 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
223 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), 223 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
224 IPSTATS_MIB_OUT, skb->len); 224 IPSTATS_MIB_OUT, skb->len);
225 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, 225 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
@@ -347,11 +347,11 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
347 if (skb->len <= mtu) 347 if (skb->len <= mtu)
348 return false; 348 return false;
349 349
350 /* ipv6 conntrack defrag sets max_frag_size + local_df */ 350 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
351 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) 351 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
352 return true; 352 return true;
353 353
354 if (skb->local_df) 354 if (skb->ignore_df)
355 return false; 355 return false;
356 356
357 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 357 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
@@ -537,6 +537,18 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
537 skb_copy_secmark(to, from); 537 skb_copy_secmark(to, from);
538} 538}
539 539
540static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
541{
542 static u32 ip6_idents_hashrnd __read_mostly;
543 u32 hash, id;
544
545 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
546
547 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
548 id = ip_idents_reserve(hash, 1);
549 fhdr->identification = htonl(id);
550}
551
540int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 552int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
541{ 553{
542 struct sk_buff *frag; 554 struct sk_buff *frag;
@@ -559,7 +571,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
559 /* We must not fragment if the socket is set to force MTU discovery 571 /* We must not fragment if the socket is set to force MTU discovery
560 * or if the skb it not generated by a local socket. 572 * or if the skb it not generated by a local socket.
561 */ 573 */
562 if (unlikely(!skb->local_df && skb->len > mtu) || 574 if (unlikely(!skb->ignore_df && skb->len > mtu) ||
563 (IP6CB(skb)->frag_max_size && 575 (IP6CB(skb)->frag_max_size &&
564 IP6CB(skb)->frag_max_size > mtu)) { 576 IP6CB(skb)->frag_max_size > mtu)) {
565 if (skb->sk && dst_allfrag(skb_dst(skb))) 577 if (skb->sk && dst_allfrag(skb_dst(skb)))
@@ -1234,7 +1246,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1234 sizeof(struct frag_hdr) : 0) + 1246 sizeof(struct frag_hdr) : 0) +
1235 rt->rt6i_nfheader_len; 1247 rt->rt6i_nfheader_len;
1236 1248
1237 if (ip6_sk_local_df(sk)) 1249 if (ip6_sk_ignore_df(sk))
1238 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN; 1250 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1239 else 1251 else
1240 maxnonfragsize = mtu; 1252 maxnonfragsize = mtu;
@@ -1544,7 +1556,7 @@ int ip6_push_pending_frames(struct sock *sk)
1544 } 1556 }
1545 1557
1546 /* Allow local fragmentation. */ 1558 /* Allow local fragmentation. */
1547 skb->local_df = ip6_sk_local_df(sk); 1559 skb->ignore_df = ip6_sk_ignore_df(sk);
1548 1560
1549 *final_dst = fl6->daddr; 1561 *final_dst = fl6->daddr;
1550 __skb_pull(skb, skb_network_header_len(skb)); 1562 __skb_pull(skb, skb_network_header_len(skb));
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index f6a66bb4114d..afa082458360 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -61,6 +61,7 @@
61MODULE_AUTHOR("Ville Nuorvala"); 61MODULE_AUTHOR("Ville Nuorvala");
62MODULE_DESCRIPTION("IPv6 tunneling device"); 62MODULE_DESCRIPTION("IPv6 tunneling device");
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64MODULE_ALIAS_RTNL_LINK("ip6tnl");
64MODULE_ALIAS_NETDEV("ip6tnl0"); 65MODULE_ALIAS_NETDEV("ip6tnl0");
65 66
66#ifdef IP6_TNL_DEBUG 67#ifdef IP6_TNL_DEBUG
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 6cc9f9371cc5..9aaa6bb229e4 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -795,15 +795,12 @@ static const struct net_device_ops vti6_netdev_ops = {
795 **/ 795 **/
796static void vti6_dev_setup(struct net_device *dev) 796static void vti6_dev_setup(struct net_device *dev)
797{ 797{
798 struct ip6_tnl *t;
799
800 dev->netdev_ops = &vti6_netdev_ops; 798 dev->netdev_ops = &vti6_netdev_ops;
801 dev->destructor = vti6_dev_free; 799 dev->destructor = vti6_dev_free;
802 800
803 dev->type = ARPHRD_TUNNEL6; 801 dev->type = ARPHRD_TUNNEL6;
804 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); 802 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
805 dev->mtu = ETH_DATA_LEN; 803 dev->mtu = ETH_DATA_LEN;
806 t = netdev_priv(dev);
807 dev->flags |= IFF_NOARP; 804 dev->flags |= IFF_NOARP;
808 dev->addr_len = sizeof(struct in6_addr); 805 dev->addr_len = sizeof(struct in6_addr);
809 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 806 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 84c7f33d0cf8..387d8b8fc18d 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -90,17 +90,9 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
90 if (nf_ct_is_untracked(ct)) 90 if (nf_ct_is_untracked(ct))
91 return NF_ACCEPT; 91 return NF_ACCEPT;
92 92
93 nat = nfct_nat(ct); 93 nat = nf_ct_nat_ext_add(ct);
94 if (!nat) { 94 if (nat == NULL)
95 /* NAT module was loaded late. */ 95 return NF_ACCEPT;
96 if (nf_ct_is_confirmed(ct))
97 return NF_ACCEPT;
98 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
99 if (nat == NULL) {
100 pr_debug("failed to add NAT extension\n");
101 return NF_ACCEPT;
102 }
103 }
104 96
105 switch (ctinfo) { 97 switch (ctinfo) {
106 case IP_CT_RELATED: 98 case IP_CT_RELATED:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 767ab8da8218..0d5279fd852a 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -451,7 +451,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
451 } 451 }
452 sub_frag_mem_limit(&fq->q, head->truesize); 452 sub_frag_mem_limit(&fq->q, head->truesize);
453 453
454 head->local_df = 1; 454 head->ignore_df = 1;
455 head->next = NULL; 455 head->next = NULL;
456 head->dev = dev; 456 head->dev = dev;
457 head->tstamp = fq->q.stamp; 457 head->tstamp = fq->q.stamp;
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
index 9c3297a768fd..d189fcb437fe 100644
--- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -47,15 +47,9 @@ static unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
47 if (ct == NULL || nf_ct_is_untracked(ct)) 47 if (ct == NULL || nf_ct_is_untracked(ct))
48 return NF_ACCEPT; 48 return NF_ACCEPT;
49 49
50 nat = nfct_nat(ct); 50 nat = nf_ct_nat_ext_add(ct);
51 if (nat == NULL) { 51 if (nat == NULL)
52 /* Conntrack module was loaded late, can't add extension. */ 52 return NF_ACCEPT;
53 if (nf_ct_is_confirmed(ct))
54 return NF_ACCEPT;
55 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
56 if (nat == NULL)
57 return NF_ACCEPT;
58 }
59 53
60 switch (ctinfo) { 54 switch (ctinfo) {
61 case IP_CT_RELATED: 55 case IP_CT_RELATED:
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 56596ce390a1..5ec867e4a8b7 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -8,32 +8,6 @@
8#include <net/addrconf.h> 8#include <net/addrconf.h>
9#include <net/secure_seq.h> 9#include <net/secure_seq.h>
10 10
11void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
12{
13 static atomic_t ipv6_fragmentation_id;
14 struct in6_addr addr;
15 int ident;
16
17#if IS_ENABLED(CONFIG_IPV6)
18 struct inet_peer *peer;
19 struct net *net;
20
21 net = dev_net(rt->dst.dev);
22 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
23 if (peer) {
24 fhdr->identification = htonl(inet_getid(peer, 0));
25 inet_putpeer(peer);
26 return;
27 }
28#endif
29 ident = atomic_inc_return(&ipv6_fragmentation_id);
30
31 addr = rt->rt6i_dst.addr;
32 addr.s6_addr32[0] ^= (__force __be32)ident;
33 fhdr->identification = htonl(secure_ipv6_id(addr.s6_addr32));
34}
35EXPORT_SYMBOL(ipv6_select_ident);
36
37int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 11int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
38{ 12{
39 u16 offset = sizeof(struct ipv6hdr); 13 u16 offset = sizeof(struct ipv6hdr);
@@ -104,6 +78,7 @@ int __ip6_local_out(struct sk_buff *skb)
104 if (len > IPV6_MAXPLEN) 78 if (len > IPV6_MAXPLEN)
105 len = 0; 79 len = 0;
106 ipv6_hdr(skb)->payload_len = htons(len); 80 ipv6_hdr(skb)->payload_len = htons(len);
81 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
107 82
108 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, 83 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
109 skb_dst(skb)->dev, dst_output); 84 skb_dst(skb)->dev, dst_output);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index bda74291c3e0..5b7a1ed2aba9 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -51,7 +51,6 @@ static struct inet_protosw pingv6_protosw = {
51 .protocol = IPPROTO_ICMPV6, 51 .protocol = IPPROTO_ICMPV6,
52 .prot = &pingv6_prot, 52 .prot = &pingv6_prot,
53 .ops = &inet6_dgram_ops, 53 .ops = &inet6_dgram_ops,
54 .no_check = UDP_CSUM_DEFAULT,
55 .flags = INET_PROTOSW_REUSE, 54 .flags = INET_PROTOSW_REUSE,
56}; 55};
57 56
@@ -168,12 +167,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
168 pfh.wcheck = 0; 167 pfh.wcheck = 0;
169 pfh.family = AF_INET6; 168 pfh.family = AF_INET6;
170 169
171 if (ipv6_addr_is_multicast(&fl6.daddr)) 170 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
172 hlimit = np->mcast_hops;
173 else
174 hlimit = np->hop_limit;
175 if (hlimit < 0)
176 hlimit = ip6_dst_hoplimit(dst);
177 171
178 lock_sock(sk); 172 lock_sock(sk);
179 err = ip6_append_data(sk, ping_getfrag, &pfh, len, 173 err = ip6_append_data(sk, ping_getfrag, &pfh, len,
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 091d066a57b3..3317440ea341 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -186,7 +186,7 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
186/* can be called either with percpu mib (pcpumib != NULL), 186/* can be called either with percpu mib (pcpumib != NULL),
187 * or shared one (smib != NULL) 187 * or shared one (smib != NULL)
188 */ 188 */
189static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **pcpumib, 189static void snmp6_seq_show_item(struct seq_file *seq, void __percpu *pcpumib,
190 atomic_long_t *smib, 190 atomic_long_t *smib,
191 const struct snmp_mib *itemlist) 191 const struct snmp_mib *itemlist)
192{ 192{
@@ -201,7 +201,7 @@ static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **pcpumib,
201 } 201 }
202} 202}
203 203
204static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu **mib, 204static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
205 const struct snmp_mib *itemlist, size_t syncpoff) 205 const struct snmp_mib *itemlist, size_t syncpoff)
206{ 206{
207 int i; 207 int i;
@@ -215,14 +215,14 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
215{ 215{
216 struct net *net = (struct net *)seq->private; 216 struct net *net = (struct net *)seq->private;
217 217
218 snmp6_seq_show_item64(seq, (void __percpu **)net->mib.ipv6_statistics, 218 snmp6_seq_show_item64(seq, net->mib.ipv6_statistics,
219 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp)); 219 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
220 snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics, 220 snmp6_seq_show_item(seq, net->mib.icmpv6_statistics,
221 NULL, snmp6_icmp6_list); 221 NULL, snmp6_icmp6_list);
222 snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs); 222 snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs);
223 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6, 223 snmp6_seq_show_item(seq, net->mib.udp_stats_in6,
224 NULL, snmp6_udp6_list); 224 NULL, snmp6_udp6_list);
225 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6, 225 snmp6_seq_show_item(seq, net->mib.udplite_stats_in6,
226 NULL, snmp6_udplite6_list); 226 NULL, snmp6_udplite6_list);
227 return 0; 227 return 0;
228} 228}
@@ -245,7 +245,7 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
245 struct inet6_dev *idev = (struct inet6_dev *)seq->private; 245 struct inet6_dev *idev = (struct inet6_dev *)seq->private;
246 246
247 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex); 247 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
248 snmp6_seq_show_item64(seq, (void __percpu **)idev->stats.ipv6, 248 snmp6_seq_show_item64(seq, idev->stats.ipv6,
249 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp)); 249 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
250 snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs, 250 snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
251 snmp6_icmp6_list); 251 snmp6_icmp6_list);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 1f29996e368a..b2dc60b0c764 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -873,14 +873,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
873 err = PTR_ERR(dst); 873 err = PTR_ERR(dst);
874 goto out; 874 goto out;
875 } 875 }
876 if (hlimit < 0) { 876 if (hlimit < 0)
877 if (ipv6_addr_is_multicast(&fl6.daddr)) 877 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
878 hlimit = np->mcast_hops;
879 else
880 hlimit = np->hop_limit;
881 if (hlimit < 0)
882 hlimit = ip6_dst_hoplimit(dst);
883 }
884 878
885 if (tclass < 0) 879 if (tclass < 0)
886 tclass = np->tclass; 880 tclass = np->tclass;
@@ -1328,7 +1322,6 @@ static struct inet_protosw rawv6_protosw = {
1328 .protocol = IPPROTO_IP, /* wild card */ 1322 .protocol = IPPROTO_IP, /* wild card */
1329 .prot = &rawv6_prot, 1323 .prot = &rawv6_prot,
1330 .ops = &inet6_sockraw_ops, 1324 .ops = &inet6_sockraw_ops,
1331 .no_check = UDP_CSUM_DEFAULT,
1332 .flags = INET_PROTOSW_REUSE, 1325 .flags = INET_PROTOSW_REUSE,
1333}; 1326};
1334 1327
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 6ebdb7b6744c..f23fbd28a501 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1176,7 +1176,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1176 1176
1177 memset(&fl6, 0, sizeof(fl6)); 1177 memset(&fl6, 0, sizeof(fl6));
1178 fl6.flowi6_oif = oif; 1178 fl6.flowi6_oif = oif;
1179 fl6.flowi6_mark = mark; 1179 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1180 fl6.daddr = iph->daddr; 1180 fl6.daddr = iph->daddr;
1181 fl6.saddr = iph->saddr; 1181 fl6.saddr = iph->saddr;
1182 fl6.flowlabel = ip6_flowinfo(iph); 1182 fl6.flowlabel = ip6_flowinfo(iph);
@@ -1455,7 +1455,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
1455 goto out; 1455 goto out;
1456 1456
1457 net->ipv6.ip6_rt_gc_expire++; 1457 net->ipv6.ip6_rt_gc_expire++;
1458 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size); 1458 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1459 entries = dst_entries_get_slow(ops); 1459 entries = dst_entries_get_slow(ops);
1460 if (entries < ops->gc_thresh) 1460 if (entries < ops->gc_thresh)
1461 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1; 1461 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index e5a453ca302e..4f408176dc64 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -560,12 +560,12 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
560 560
561 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 561 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
562 ipv4_update_pmtu(skb, dev_net(skb->dev), info, 562 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
563 t->dev->ifindex, 0, IPPROTO_IPV6, 0); 563 t->parms.link, 0, IPPROTO_IPV6, 0);
564 err = 0; 564 err = 0;
565 goto out; 565 goto out;
566 } 566 }
567 if (type == ICMP_REDIRECT) { 567 if (type == ICMP_REDIRECT) {
568 ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0, 568 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
569 IPPROTO_IPV6, 0); 569 IPPROTO_IPV6, 0);
570 err = 0; 570 err = 0;
571 goto out; 571 goto out;
@@ -1828,4 +1828,5 @@ xfrm_tunnel_failed:
1828module_init(sit_init); 1828module_init(sit_init);
1829module_exit(sit_cleanup); 1829module_exit(sit_cleanup);
1830MODULE_LICENSE("GPL"); 1830MODULE_LICENSE("GPL");
1831MODULE_ALIAS_RTNL_LINK("sit");
1831MODULE_ALIAS_NETDEV("sit0"); 1832MODULE_ALIAS_NETDEV("sit0");
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index bb53a5e73c1a..a822b880689b 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -216,6 +216,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
216 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) 216 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
217 ireq->ir_iif = inet6_iif(skb); 217 ireq->ir_iif = inet6_iif(skb);
218 218
219 ireq->ir_mark = inet_request_mark(sk, skb);
220
219 req->expires = 0UL; 221 req->expires = 0UL;
220 req->num_retrans = 0; 222 req->num_retrans = 0;
221 ireq->ecn_ok = ecn_ok; 223 ireq->ecn_ok = ecn_ok;
@@ -242,7 +244,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
242 final_p = fl6_update_dst(&fl6, np->opt, &final); 244 final_p = fl6_update_dst(&fl6, np->opt, &final);
243 fl6.saddr = ireq->ir_v6_loc_addr; 245 fl6.saddr = ireq->ir_v6_loc_addr;
244 fl6.flowi6_oif = sk->sk_bound_dev_if; 246 fl6.flowi6_oif = sk->sk_bound_dev_if;
245 fl6.flowi6_mark = sk->sk_mark; 247 fl6.flowi6_mark = ireq->ir_mark;
246 fl6.fl6_dport = ireq->ir_rmt_port; 248 fl6.fl6_dport = ireq->ir_rmt_port;
247 fl6.fl6_sport = inet_sk(sk)->inet_sport; 249 fl6.fl6_sport = inet_sk(sk)->inet_sport;
248 security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 250 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 7f405a168822..058f3eca2e53 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -38,6 +38,13 @@ static struct ctl_table ipv6_table_template[] = {
38 .mode = 0644, 38 .mode = 0644,
39 .proc_handler = proc_dointvec 39 .proc_handler = proc_dointvec
40 }, 40 },
41 {
42 .procname = "fwmark_reflect",
43 .data = &init_net.ipv6.sysctl.fwmark_reflect,
44 .maxlen = sizeof(int),
45 .mode = 0644,
46 .proc_handler = proc_dointvec
47 },
41 { } 48 { }
42}; 49};
43 50
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e289830ed6e3..229239ad96b1 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -340,7 +340,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
340 struct sock *sk; 340 struct sock *sk;
341 int err; 341 int err;
342 struct tcp_sock *tp; 342 struct tcp_sock *tp;
343 __u32 seq; 343 struct request_sock *fastopen;
344 __u32 seq, snd_una;
344 struct net *net = dev_net(skb->dev); 345 struct net *net = dev_net(skb->dev);
345 346
346 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr, 347 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
@@ -371,8 +372,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
371 372
372 tp = tcp_sk(sk); 373 tp = tcp_sk(sk);
373 seq = ntohl(th->seq); 374 seq = ntohl(th->seq);
375 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
376 fastopen = tp->fastopen_rsk;
377 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
374 if (sk->sk_state != TCP_LISTEN && 378 if (sk->sk_state != TCP_LISTEN &&
375 !between(seq, tp->snd_una, tp->snd_nxt)) { 379 !between(seq, snd_una, tp->snd_nxt)) {
376 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 380 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
377 goto out; 381 goto out;
378 } 382 }
@@ -436,8 +440,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
436 goto out; 440 goto out;
437 441
438 case TCP_SYN_SENT: 442 case TCP_SYN_SENT:
439 case TCP_SYN_RECV: /* Cannot happen. 443 case TCP_SYN_RECV:
440 It can, it SYNs are crossed. --ANK */ 444 /* Only in fast or simultaneous open. If a fast open socket is
445 * is already accepted it is treated as a connected one below.
446 */
447 if (fastopen && fastopen->sk == NULL)
448 break;
449
441 if (!sock_owned_by_user(sk)) { 450 if (!sock_owned_by_user(sk)) {
442 sk->sk_err = err; 451 sk->sk_err = err;
443 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 452 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
@@ -463,7 +472,8 @@ out:
463static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, 472static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
464 struct flowi6 *fl6, 473 struct flowi6 *fl6,
465 struct request_sock *req, 474 struct request_sock *req,
466 u16 queue_mapping) 475 u16 queue_mapping,
476 struct tcp_fastopen_cookie *foc)
467{ 477{
468 struct inet_request_sock *ireq = inet_rsk(req); 478 struct inet_request_sock *ireq = inet_rsk(req);
469 struct ipv6_pinfo *np = inet6_sk(sk); 479 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -474,7 +484,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
474 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL) 484 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
475 goto done; 485 goto done;
476 486
477 skb = tcp_make_synack(sk, dst, req, NULL); 487 skb = tcp_make_synack(sk, dst, req, foc);
478 488
479 if (skb) { 489 if (skb) {
480 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, 490 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
@@ -498,7 +508,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
498 struct flowi6 fl6; 508 struct flowi6 fl6;
499 int res; 509 int res;
500 510
501 res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0); 511 res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0, NULL);
502 if (!res) { 512 if (!res) {
503 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 513 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
504 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 514 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
@@ -802,6 +812,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
802 fl6.flowi6_oif = inet6_iif(skb); 812 fl6.flowi6_oif = inet6_iif(skb);
803 else 813 else
804 fl6.flowi6_oif = oif; 814 fl6.flowi6_oif = oif;
815 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
805 fl6.fl6_dport = t1->dest; 816 fl6.fl6_dport = t1->dest;
806 fl6.fl6_sport = t1->source; 817 fl6.fl6_sport = t1->source;
807 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 818 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -917,7 +928,12 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
917static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 928static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
918 struct request_sock *req) 929 struct request_sock *req)
919{ 930{
920 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, 931 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
932 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
933 */
934 tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
935 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
936 tcp_rsk(req)->rcv_nxt,
921 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, 937 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
922 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 938 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
923 0, 0); 939 0, 0);
@@ -969,8 +985,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
969 struct tcp_sock *tp = tcp_sk(sk); 985 struct tcp_sock *tp = tcp_sk(sk);
970 __u32 isn = TCP_SKB_CB(skb)->when; 986 __u32 isn = TCP_SKB_CB(skb)->when;
971 struct dst_entry *dst = NULL; 987 struct dst_entry *dst = NULL;
988 struct tcp_fastopen_cookie foc = { .len = -1 };
989 bool want_cookie = false, fastopen;
972 struct flowi6 fl6; 990 struct flowi6 fl6;
973 bool want_cookie = false; 991 int err;
974 992
975 if (skb->protocol == htons(ETH_P_IP)) 993 if (skb->protocol == htons(ETH_P_IP))
976 return tcp_v4_conn_request(sk, skb); 994 return tcp_v4_conn_request(sk, skb);
@@ -1001,7 +1019,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1001 tcp_clear_options(&tmp_opt); 1019 tcp_clear_options(&tmp_opt);
1002 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1020 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1003 tmp_opt.user_mss = tp->rx_opt.user_mss; 1021 tmp_opt.user_mss = tp->rx_opt.user_mss;
1004 tcp_parse_options(skb, &tmp_opt, 0, NULL); 1022 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1005 1023
1006 if (want_cookie && !tmp_opt.saw_tstamp) 1024 if (want_cookie && !tmp_opt.saw_tstamp)
1007 tcp_clear_options(&tmp_opt); 1025 tcp_clear_options(&tmp_opt);
@@ -1016,6 +1034,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1016 TCP_ECN_create_request(req, skb, sock_net(sk)); 1034 TCP_ECN_create_request(req, skb, sock_net(sk));
1017 1035
1018 ireq->ir_iif = sk->sk_bound_dev_if; 1036 ireq->ir_iif = sk->sk_bound_dev_if;
1037 ireq->ir_mark = inet_request_mark(sk, skb);
1019 1038
1020 /* So that link locals have meaning */ 1039 /* So that link locals have meaning */
1021 if (!sk->sk_bound_dev_if && 1040 if (!sk->sk_bound_dev_if &&
@@ -1074,19 +1093,27 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1074 isn = tcp_v6_init_sequence(skb); 1093 isn = tcp_v6_init_sequence(skb);
1075 } 1094 }
1076have_isn: 1095have_isn:
1077 tcp_rsk(req)->snt_isn = isn;
1078 1096
1079 if (security_inet_conn_request(sk, skb, req)) 1097 if (security_inet_conn_request(sk, skb, req))
1080 goto drop_and_release; 1098 goto drop_and_release;
1081 1099
1082 if (tcp_v6_send_synack(sk, dst, &fl6, req, 1100 if (!dst && (dst = inet6_csk_route_req(sk, &fl6, req)) == NULL)
1083 skb_get_queue_mapping(skb)) ||
1084 want_cookie)
1085 goto drop_and_free; 1101 goto drop_and_free;
1086 1102
1103 tcp_rsk(req)->snt_isn = isn;
1087 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1104 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1088 tcp_rsk(req)->listener = NULL; 1105 tcp_openreq_init_rwin(req, sk, dst);
1089 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1106 fastopen = !want_cookie &&
1107 tcp_try_fastopen(sk, skb, req, &foc, dst);
1108 err = tcp_v6_send_synack(sk, dst, &fl6, req,
1109 skb_get_queue_mapping(skb), &foc);
1110 if (!fastopen) {
1111 if (err || want_cookie)
1112 goto drop_and_free;
1113
1114 tcp_rsk(req)->listener = NULL;
1115 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1116 }
1090 return 0; 1117 return 0;
1091 1118
1092drop_and_release: 1119drop_and_release:
@@ -1294,25 +1321,6 @@ out:
1294 return NULL; 1321 return NULL;
1295} 1322}
1296 1323
1297static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1298{
1299 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1300 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1301 &ipv6_hdr(skb)->daddr, skb->csum)) {
1302 skb->ip_summed = CHECKSUM_UNNECESSARY;
1303 return 0;
1304 }
1305 }
1306
1307 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1308 &ipv6_hdr(skb)->saddr,
1309 &ipv6_hdr(skb)->daddr, 0));
1310
1311 if (skb->len <= 76)
1312 return __skb_checksum_complete(skb);
1313 return 0;
1314}
1315
1316/* The socket must have it's spinlock held when we get 1324/* The socket must have it's spinlock held when we get
1317 * here. 1325 * here.
1318 * 1326 *
@@ -1486,7 +1494,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1486 if (!pskb_may_pull(skb, th->doff*4)) 1494 if (!pskb_may_pull(skb, th->doff*4))
1487 goto discard_it; 1495 goto discard_it;
1488 1496
1489 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb)) 1497 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1490 goto csum_error; 1498 goto csum_error;
1491 1499
1492 th = tcp_hdr(skb); 1500 th = tcp_hdr(skb);
@@ -1779,6 +1787,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1779 const struct inet_sock *inet = inet_sk(sp); 1787 const struct inet_sock *inet = inet_sk(sp);
1780 const struct tcp_sock *tp = tcp_sk(sp); 1788 const struct tcp_sock *tp = tcp_sk(sp);
1781 const struct inet_connection_sock *icsk = inet_csk(sp); 1789 const struct inet_connection_sock *icsk = inet_csk(sp);
1790 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1782 1791
1783 dest = &sp->sk_v6_daddr; 1792 dest = &sp->sk_v6_daddr;
1784 src = &sp->sk_v6_rcv_saddr; 1793 src = &sp->sk_v6_rcv_saddr;
@@ -1821,7 +1830,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1821 jiffies_to_clock_t(icsk->icsk_ack.ato), 1830 jiffies_to_clock_t(icsk->icsk_ack.ato),
1822 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 1831 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1823 tp->snd_cwnd, 1832 tp->snd_cwnd,
1824 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh 1833 sp->sk_state == TCP_LISTEN ?
1834 (fastopenq ? fastopenq->max_qlen : 0) :
1835 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1825 ); 1836 );
1826} 1837}
1827 1838
@@ -1981,7 +1992,6 @@ static struct inet_protosw tcpv6_protosw = {
1981 .protocol = IPPROTO_TCP, 1992 .protocol = IPPROTO_TCP,
1982 .prot = &tcpv6_prot, 1993 .prot = &tcpv6_prot,
1983 .ops = &inet6_stream_ops, 1994 .ops = &inet6_stream_ops,
1984 .no_check = 0,
1985 .flags = INET_PROTOSW_PERMANENT | 1995 .flags = INET_PROTOSW_PERMANENT |
1986 INET_PROTOSW_ICSK, 1996 INET_PROTOSW_ICSK,
1987}; 1997};
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 1e586d92260e..95c834799288 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -634,6 +634,10 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
634 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { 634 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
635 int ret; 635 int ret;
636 636
637 /* Verify checksum before giving to encap */
638 if (udp_lib_checksum_complete(skb))
639 goto csum_error;
640
637 ret = encap_rcv(sk, skb); 641 ret = encap_rcv(sk, skb);
638 if (ret <= 0) { 642 if (ret <= 0) {
639 UDP_INC_STATS_BH(sock_net(sk), 643 UDP_INC_STATS_BH(sock_net(sk),
@@ -701,17 +705,16 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
701 int dif) 705 int dif)
702{ 706{
703 struct hlist_nulls_node *node; 707 struct hlist_nulls_node *node;
704 struct sock *s = sk;
705 unsigned short num = ntohs(loc_port); 708 unsigned short num = ntohs(loc_port);
706 709
707 sk_nulls_for_each_from(s, node) { 710 sk_nulls_for_each_from(sk, node) {
708 struct inet_sock *inet = inet_sk(s); 711 struct inet_sock *inet = inet_sk(sk);
709 712
710 if (!net_eq(sock_net(s), net)) 713 if (!net_eq(sock_net(sk), net))
711 continue; 714 continue;
712 715
713 if (udp_sk(s)->udp_port_hash == num && 716 if (udp_sk(sk)->udp_port_hash == num &&
714 s->sk_family == PF_INET6) { 717 sk->sk_family == PF_INET6) {
715 if (inet->inet_dport) { 718 if (inet->inet_dport) {
716 if (inet->inet_dport != rmt_port) 719 if (inet->inet_dport != rmt_port)
717 continue; 720 continue;
@@ -720,16 +723,16 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
720 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) 723 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
721 continue; 724 continue;
722 725
723 if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif) 726 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
724 continue; 727 continue;
725 728
726 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { 729 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
727 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)) 730 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
728 continue; 731 continue;
729 } 732 }
730 if (!inet6_mc_check(s, loc_addr, rmt_addr)) 733 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
731 continue; 734 continue;
732 return s; 735 return sk;
733 } 736 }
734 } 737 }
735 return NULL; 738 return NULL;
@@ -760,6 +763,17 @@ static void flush_stack(struct sock **stack, unsigned int count,
760 if (unlikely(skb1)) 763 if (unlikely(skb1))
761 kfree_skb(skb1); 764 kfree_skb(skb1);
762} 765}
766
767static void udp6_csum_zero_error(struct sk_buff *skb)
768{
769 /* RFC 2460 section 8.1 says that we SHOULD log
770 * this error. Well, it is reasonable.
771 */
772 LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
773 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
774 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
775}
776
763/* 777/*
764 * Note: called only from the BH handler context, 778 * Note: called only from the BH handler context,
765 * so we don't need to lock the hashes. 779 * so we don't need to lock the hashes.
@@ -779,7 +793,12 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
779 dif = inet6_iif(skb); 793 dif = inet6_iif(skb);
780 sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); 794 sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
781 while (sk) { 795 while (sk) {
782 stack[count++] = sk; 796 /* If zero checksum and no_check is not on for
797 * the socket then skip it.
798 */
799 if (uh->check || udp_sk(sk)->no_check6_rx)
800 stack[count++] = sk;
801
783 sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr, 802 sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
784 uh->source, saddr, dif); 803 uh->source, saddr, dif);
785 if (unlikely(count == ARRAY_SIZE(stack))) { 804 if (unlikely(count == ARRAY_SIZE(stack))) {
@@ -867,6 +886,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
867 if (sk != NULL) { 886 if (sk != NULL) {
868 int ret; 887 int ret;
869 888
889 if (!uh->check && !udp_sk(sk)->no_check6_rx) {
890 sock_put(sk);
891 udp6_csum_zero_error(skb);
892 goto csum_error;
893 }
894
870 ret = udpv6_queue_rcv_skb(sk, skb); 895 ret = udpv6_queue_rcv_skb(sk, skb);
871 sock_put(sk); 896 sock_put(sk);
872 897
@@ -879,6 +904,11 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
879 return 0; 904 return 0;
880 } 905 }
881 906
907 if (!uh->check) {
908 udp6_csum_zero_error(skb);
909 goto csum_error;
910 }
911
882 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 912 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
883 goto discard; 913 goto discard;
884 914
@@ -1006,7 +1036,10 @@ static int udp_v6_push_pending_frames(struct sock *sk)
1006 1036
1007 if (is_udplite) 1037 if (is_udplite)
1008 csum = udplite_csum_outgoing(sk, skb); 1038 csum = udplite_csum_outgoing(sk, skb);
1009 else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1039 else if (up->no_check6_tx) { /* UDP csum disabled */
1040 skb->ip_summed = CHECKSUM_NONE;
1041 goto send;
1042 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1010 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, 1043 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr,
1011 up->len); 1044 up->len);
1012 goto send; 1045 goto send;
@@ -1232,14 +1265,8 @@ do_udp_sendmsg:
1232 goto out; 1265 goto out;
1233 } 1266 }
1234 1267
1235 if (hlimit < 0) { 1268 if (hlimit < 0)
1236 if (ipv6_addr_is_multicast(&fl6.daddr)) 1269 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
1237 hlimit = np->mcast_hops;
1238 else
1239 hlimit = np->hop_limit;
1240 if (hlimit < 0)
1241 hlimit = ip6_dst_hoplimit(dst);
1242 }
1243 1270
1244 if (tclass < 0) 1271 if (tclass < 0)
1245 tclass = np->tclass; 1272 tclass = np->tclass;
@@ -1479,7 +1506,6 @@ static struct inet_protosw udpv6_protosw = {
1479 .protocol = IPPROTO_UDP, 1506 .protocol = IPPROTO_UDP,
1480 .prot = &udpv6_prot, 1507 .prot = &udpv6_prot,
1481 .ops = &inet6_dgram_ops, 1508 .ops = &inet6_dgram_ops,
1482 .no_check = UDP_CSUM_DEFAULT,
1483 .flags = INET_PROTOSW_PERMANENT, 1509 .flags = INET_PROTOSW_PERMANENT,
1484}; 1510};
1485 1511
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index b261ee8b83fc..0ae3d98f83e0 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -63,7 +63,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
63 if (unlikely(type & ~(SKB_GSO_UDP | 63 if (unlikely(type & ~(SKB_GSO_UDP |
64 SKB_GSO_DODGY | 64 SKB_GSO_DODGY |
65 SKB_GSO_UDP_TUNNEL | 65 SKB_GSO_UDP_TUNNEL |
66 SKB_GSO_UDP_TUNNEL_CSUM |
66 SKB_GSO_GRE | 67 SKB_GSO_GRE |
68 SKB_GSO_GRE_CSUM |
67 SKB_GSO_IPIP | 69 SKB_GSO_IPIP |
68 SKB_GSO_SIT | 70 SKB_GSO_SIT |
69 SKB_GSO_MPLS) || 71 SKB_GSO_MPLS) ||
@@ -76,7 +78,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
76 goto out; 78 goto out;
77 } 79 }
78 80
79 if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) 81 if (skb->encapsulation && skb_shinfo(skb)->gso_type &
82 (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))
80 segs = skb_udp_tunnel_segment(skb, features); 83 segs = skb_udp_tunnel_segment(skb, features);
81 else { 84 else {
82 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot 85 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index dfcc4be46898..9cf097e206e9 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -64,7 +64,6 @@ static struct inet_protosw udplite6_protosw = {
64 .protocol = IPPROTO_UDPLITE, 64 .protocol = IPPROTO_UDPLITE,
65 .prot = &udplitev6_prot, 65 .prot = &udplitev6_prot,
66 .ops = &inet6_dgram_ops, 66 .ops = &inet6_dgram_ops,
67 .no_check = 0,
68 .flags = INET_PROTOSW_PERMANENT, 67 .flags = INET_PROTOSW_PERMANENT,
69}; 68};
70 69
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index b930d080c66f..433672d07d0b 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -78,7 +78,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
78 if (mtu < IPV6_MIN_MTU) 78 if (mtu < IPV6_MIN_MTU)
79 mtu = IPV6_MIN_MTU; 79 mtu = IPV6_MIN_MTU;
80 80
81 if (!skb->local_df && skb->len > mtu) { 81 if (!skb->ignore_df && skb->len > mtu) {
82 skb->dev = dst->dev; 82 skb->dev = dst->dev;
83 83
84 if (xfrm6_local_dontfrag(skb)) 84 if (xfrm6_local_dontfrag(skb))
@@ -114,7 +114,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
114 if (err) 114 if (err)
115 return err; 115 return err;
116 116
117 skb->local_df = 1; 117 skb->ignore_df = 1;
118 118
119 return x->outer_mode->output2(x, skb); 119 return x->outer_mode->output2(x, skb);
120} 120}
@@ -153,7 +153,7 @@ static int __xfrm6_output(struct sk_buff *skb)
153 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { 153 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
154 xfrm6_local_rxpmtu(skb, mtu); 154 xfrm6_local_rxpmtu(skb, mtu);
155 return -EMSGSIZE; 155 return -EMSGSIZE;
156 } else if (!skb->local_df && skb->len > mtu && skb->sk) { 156 } else if (!skb->ignore_df && skb->len > mtu && skb->sk) {
157 xfrm_local_error(skb, mtu); 157 xfrm_local_error(skb, mtu);
158 return -EMSGSIZE; 158 return -EMSGSIZE;
159 } 159 }
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 41e4e93cb3aa..91729b807c7d 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1353,7 +1353,7 @@ static int ipx_create(struct net *net, struct socket *sock, int protocol,
1353 1353
1354 sk_refcnt_debug_inc(sk); 1354 sk_refcnt_debug_inc(sk);
1355 sock_init_data(sock, sk); 1355 sock_init_data(sock, sk);
1356 sk->sk_no_check = 1; /* Checksum off by default */ 1356 sk->sk_no_check_tx = 1; /* Checksum off by default */
1357 sock->ops = &ipx_dgram_ops; 1357 sock->ops = &ipx_dgram_ops;
1358 rc = 0; 1358 rc = 0;
1359out: 1359out:
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index c1f03185c5e1..67e7ad3d46b1 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -236,7 +236,8 @@ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
236 } 236 }
237 237
238 /* Apply checksum. Not allowed on 802.3 links. */ 238 /* Apply checksum. Not allowed on 802.3 links. */
239 if (sk->sk_no_check || intrfc->if_dlink_type == htons(IPX_FRAME_8023)) 239 if (sk->sk_no_check_tx ||
240 intrfc->if_dlink_type == htons(IPX_FRAME_8023))
240 ipx->ipx_checksum = htons(0xFFFF); 241 ipx->ipx_checksum = htons(0xFFFF);
241 else 242 else
242 ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr)); 243 ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr));
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 8c9d7302c846..7a95fa4a3de1 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -682,6 +682,18 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
682 return NULL; 682 return NULL;
683} 683}
684 684
685static void __iucv_auto_name(struct iucv_sock *iucv)
686{
687 char name[12];
688
689 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
690 while (__iucv_get_sock_by_name(name)) {
691 sprintf(name, "%08x",
692 atomic_inc_return(&iucv_sk_list.autobind_name));
693 }
694 memcpy(iucv->src_name, name, 8);
695}
696
685/* Bind an unbound socket */ 697/* Bind an unbound socket */
686static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, 698static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
687 int addr_len) 699 int addr_len)
@@ -724,8 +736,12 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
724 rcu_read_lock(); 736 rcu_read_lock();
725 for_each_netdev_rcu(&init_net, dev) { 737 for_each_netdev_rcu(&init_net, dev) {
726 if (!memcmp(dev->perm_addr, uid, 8)) { 738 if (!memcmp(dev->perm_addr, uid, 8)) {
727 memcpy(iucv->src_name, sa->siucv_name, 8);
728 memcpy(iucv->src_user_id, sa->siucv_user_id, 8); 739 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
740 /* Check for unitialized siucv_name */
741 if (strncmp(sa->siucv_name, " ", 8) == 0)
742 __iucv_auto_name(iucv);
743 else
744 memcpy(iucv->src_name, sa->siucv_name, 8);
729 sk->sk_bound_dev_if = dev->ifindex; 745 sk->sk_bound_dev_if = dev->ifindex;
730 iucv->hs_dev = dev; 746 iucv->hs_dev = dev;
731 dev_hold(dev); 747 dev_hold(dev);
@@ -763,7 +779,6 @@ done:
763static int iucv_sock_autobind(struct sock *sk) 779static int iucv_sock_autobind(struct sock *sk)
764{ 780{
765 struct iucv_sock *iucv = iucv_sk(sk); 781 struct iucv_sock *iucv = iucv_sk(sk);
766 char name[12];
767 int err = 0; 782 int err = 0;
768 783
769 if (unlikely(!pr_iucv)) 784 if (unlikely(!pr_iucv))
@@ -772,17 +787,9 @@ static int iucv_sock_autobind(struct sock *sk)
772 memcpy(iucv->src_user_id, iucv_userid, 8); 787 memcpy(iucv->src_user_id, iucv_userid, 8);
773 788
774 write_lock_bh(&iucv_sk_list.lock); 789 write_lock_bh(&iucv_sk_list.lock);
775 790 __iucv_auto_name(iucv);
776 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
777 while (__iucv_get_sock_by_name(name)) {
778 sprintf(name, "%08x",
779 atomic_inc_return(&iucv_sk_list.autobind_name));
780 }
781
782 write_unlock_bh(&iucv_sk_list.lock); 791 write_unlock_bh(&iucv_sk_list.lock);
783 792
784 memcpy(&iucv->src_name, name, 8);
785
786 if (!iucv->msglimit) 793 if (!iucv->msglimit)
787 iucv->msglimit = IUCV_QUEUELEN_DEFAULT; 794 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
788 795
@@ -1936,11 +1943,10 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1936 sk_acceptq_is_full(sk) || 1943 sk_acceptq_is_full(sk) ||
1937 !nsk) { 1944 !nsk) {
1938 /* error on server socket - connection refused */ 1945 /* error on server socket - connection refused */
1939 if (nsk)
1940 sk_free(nsk);
1941 afiucv_swap_src_dest(skb); 1946 afiucv_swap_src_dest(skb);
1942 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; 1947 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1943 err = dev_queue_xmit(skb); 1948 err = dev_queue_xmit(skb);
1949 iucv_sock_kill(nsk);
1944 bh_unlock_sock(sk); 1950 bh_unlock_sock(sk);
1945 goto out; 1951 goto out;
1946 } 1952 }
diff --git a/net/key/af_key.c b/net/key/af_key.c
index f3c83073afc4..ba2a2f95911c 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1476,9 +1476,7 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
1476 else 1476 else
1477 err = xfrm_state_update(x); 1477 err = xfrm_state_update(x);
1478 1478
1479 xfrm_audit_state_add(x, err ? 0 : 1, 1479 xfrm_audit_state_add(x, err ? 0 : 1, true);
1480 audit_get_loginuid(current),
1481 audit_get_sessionid(current), 0);
1482 1480
1483 if (err < 0) { 1481 if (err < 0) {
1484 x->km.state = XFRM_STATE_DEAD; 1482 x->km.state = XFRM_STATE_DEAD;
@@ -1532,9 +1530,7 @@ static int pfkey_delete(struct sock *sk, struct sk_buff *skb, const struct sadb_
1532 c.event = XFRM_MSG_DELSA; 1530 c.event = XFRM_MSG_DELSA;
1533 km_state_notify(x, &c); 1531 km_state_notify(x, &c);
1534out: 1532out:
1535 xfrm_audit_state_delete(x, err ? 0 : 1, 1533 xfrm_audit_state_delete(x, err ? 0 : 1, true);
1536 audit_get_loginuid(current),
1537 audit_get_sessionid(current), 0);
1538 xfrm_state_put(x); 1534 xfrm_state_put(x);
1539 1535
1540 return err; 1536 return err;
@@ -1726,17 +1722,13 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
1726 struct net *net = sock_net(sk); 1722 struct net *net = sock_net(sk);
1727 unsigned int proto; 1723 unsigned int proto;
1728 struct km_event c; 1724 struct km_event c;
1729 struct xfrm_audit audit_info;
1730 int err, err2; 1725 int err, err2;
1731 1726
1732 proto = pfkey_satype2proto(hdr->sadb_msg_satype); 1727 proto = pfkey_satype2proto(hdr->sadb_msg_satype);
1733 if (proto == 0) 1728 if (proto == 0)
1734 return -EINVAL; 1729 return -EINVAL;
1735 1730
1736 audit_info.loginuid = audit_get_loginuid(current); 1731 err = xfrm_state_flush(net, proto, true);
1737 audit_info.sessionid = audit_get_sessionid(current);
1738 audit_info.secid = 0;
1739 err = xfrm_state_flush(net, proto, &audit_info);
1740 err2 = unicast_flush_resp(sk, hdr); 1732 err2 = unicast_flush_resp(sk, hdr);
1741 if (err || err2) { 1733 if (err || err2) {
1742 if (err == -ESRCH) /* empty table - go quietly */ 1734 if (err == -ESRCH) /* empty table - go quietly */
@@ -2288,9 +2280,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
2288 err = xfrm_policy_insert(pol->sadb_x_policy_dir-1, xp, 2280 err = xfrm_policy_insert(pol->sadb_x_policy_dir-1, xp,
2289 hdr->sadb_msg_type != SADB_X_SPDUPDATE); 2281 hdr->sadb_msg_type != SADB_X_SPDUPDATE);
2290 2282
2291 xfrm_audit_policy_add(xp, err ? 0 : 1, 2283 xfrm_audit_policy_add(xp, err ? 0 : 1, true);
2292 audit_get_loginuid(current),
2293 audit_get_sessionid(current), 0);
2294 2284
2295 if (err) 2285 if (err)
2296 goto out; 2286 goto out;
@@ -2372,9 +2362,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
2372 if (xp == NULL) 2362 if (xp == NULL)
2373 return -ENOENT; 2363 return -ENOENT;
2374 2364
2375 xfrm_audit_policy_delete(xp, err ? 0 : 1, 2365 xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
2376 audit_get_loginuid(current),
2377 audit_get_sessionid(current), 0);
2378 2366
2379 if (err) 2367 if (err)
2380 goto out; 2368 goto out;
@@ -2553,7 +2541,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
2553 sel.sport_mask = htons(0xffff); 2541 sel.sport_mask = htons(0xffff);
2554 2542
2555 /* set destination address info of selector */ 2543 /* set destination address info of selector */
2556 sa = ext_hdrs[SADB_EXT_ADDRESS_DST - 1], 2544 sa = ext_hdrs[SADB_EXT_ADDRESS_DST - 1];
2557 pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); 2545 pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr);
2558 sel.prefixlen_d = sa->sadb_address_prefixlen; 2546 sel.prefixlen_d = sa->sadb_address_prefixlen;
2559 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); 2547 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
@@ -2622,9 +2610,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
2622 return -ENOENT; 2610 return -ENOENT;
2623 2611
2624 if (delete) { 2612 if (delete) {
2625 xfrm_audit_policy_delete(xp, err ? 0 : 1, 2613 xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
2626 audit_get_loginuid(current),
2627 audit_get_sessionid(current), 0);
2628 2614
2629 if (err) 2615 if (err)
2630 goto out; 2616 goto out;
@@ -2733,13 +2719,9 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
2733{ 2719{
2734 struct net *net = sock_net(sk); 2720 struct net *net = sock_net(sk);
2735 struct km_event c; 2721 struct km_event c;
2736 struct xfrm_audit audit_info;
2737 int err, err2; 2722 int err, err2;
2738 2723
2739 audit_info.loginuid = audit_get_loginuid(current); 2724 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true);
2740 audit_info.sessionid = audit_get_sessionid(current);
2741 audit_info.secid = 0;
2742 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2743 err2 = unicast_flush_resp(sk, hdr); 2725 err2 = unicast_flush_resp(sk, hdr);
2744 if (err || err2) { 2726 if (err || err2) {
2745 if (err == -ESRCH) /* empty table - old silent behavior */ 2727 if (err == -ESRCH) /* empty table - old silent behavior */
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index a4e37d7158dc..bea259043205 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -495,52 +495,6 @@ out:
495 spin_unlock_bh(&session->reorder_q.lock); 495 spin_unlock_bh(&session->reorder_q.lock);
496} 496}
497 497
498static inline int l2tp_verify_udp_checksum(struct sock *sk,
499 struct sk_buff *skb)
500{
501 struct udphdr *uh = udp_hdr(skb);
502 u16 ulen = ntohs(uh->len);
503 __wsum psum;
504
505 if (sk->sk_no_check || skb_csum_unnecessary(skb))
506 return 0;
507
508#if IS_ENABLED(CONFIG_IPV6)
509 if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
510 if (!uh->check) {
511 LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
512 return 1;
513 }
514 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
515 !csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
516 &ipv6_hdr(skb)->daddr, ulen,
517 IPPROTO_UDP, skb->csum)) {
518 skb->ip_summed = CHECKSUM_UNNECESSARY;
519 return 0;
520 }
521 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
522 &ipv6_hdr(skb)->daddr,
523 skb->len, IPPROTO_UDP,
524 0));
525 } else
526#endif
527 {
528 struct inet_sock *inet;
529 if (!uh->check)
530 return 0;
531 inet = inet_sk(sk);
532 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr,
533 ulen, IPPROTO_UDP, 0);
534
535 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
536 !csum_fold(csum_add(psum, skb->csum)))
537 return 0;
538 skb->csum = psum;
539 }
540
541 return __skb_checksum_complete(skb);
542}
543
544static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr) 498static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
545{ 499{
546 u32 nws; 500 u32 nws;
@@ -895,8 +849,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
895 u16 version; 849 u16 version;
896 int length; 850 int length;
897 851
898 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) 852 /* UDP has verifed checksum */
899 goto discard_bad_csum;
900 853
901 /* UDP always verifies the packet length. */ 854 /* UDP always verifies the packet length. */
902 __skb_pull(skb, sizeof(struct udphdr)); 855 __skb_pull(skb, sizeof(struct udphdr));
@@ -979,14 +932,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
979 932
980 return 0; 933 return 0;
981 934
982discard_bad_csum:
983 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
984 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
985 atomic_long_inc(&tunnel->stats.rx_errors);
986 kfree_skb(skb);
987
988 return 0;
989
990error: 935error:
991 /* Put UDP header back */ 936 /* Put UDP header back */
992 __skb_push(skb, sizeof(struct udphdr)); 937 __skb_push(skb, sizeof(struct udphdr));
@@ -1128,7 +1073,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1128 } 1073 }
1129 1074
1130 /* Queue the packet to IP for output */ 1075 /* Queue the packet to IP for output */
1131 skb->local_df = 1; 1076 skb->ignore_df = 1;
1132#if IS_ENABLED(CONFIG_IPV6) 1077#if IS_ENABLED(CONFIG_IPV6)
1133 if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped) 1078 if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped)
1134 error = inet6_csk_xmit(tunnel->sock, skb, NULL); 1079 error = inet6_csk_xmit(tunnel->sock, skb, NULL);
@@ -1150,31 +1095,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1150 return 0; 1095 return 0;
1151} 1096}
1152 1097
1153#if IS_ENABLED(CONFIG_IPV6)
1154static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
1155 int udp_len)
1156{
1157 struct ipv6_pinfo *np = inet6_sk(sk);
1158 struct udphdr *uh = udp_hdr(skb);
1159
1160 if (!skb_dst(skb) || !skb_dst(skb)->dev ||
1161 !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
1162 __wsum csum = skb_checksum(skb, 0, udp_len, 0);
1163 skb->ip_summed = CHECKSUM_UNNECESSARY;
1164 uh->check = csum_ipv6_magic(&np->saddr, &sk->sk_v6_daddr, udp_len,
1165 IPPROTO_UDP, csum);
1166 if (uh->check == 0)
1167 uh->check = CSUM_MANGLED_0;
1168 } else {
1169 skb->ip_summed = CHECKSUM_PARTIAL;
1170 skb->csum_start = skb_transport_header(skb) - skb->head;
1171 skb->csum_offset = offsetof(struct udphdr, check);
1172 uh->check = ~csum_ipv6_magic(&np->saddr, &sk->sk_v6_daddr,
1173 udp_len, IPPROTO_UDP, 0);
1174 }
1175}
1176#endif
1177
1178/* If caller requires the skb to have a ppp header, the header must be 1098/* If caller requires the skb to have a ppp header, the header must be
1179 * inserted in the skb data before calling this function. 1099 * inserted in the skb data before calling this function.
1180 */ 1100 */
@@ -1186,7 +1106,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1186 struct flowi *fl; 1106 struct flowi *fl;
1187 struct udphdr *uh; 1107 struct udphdr *uh;
1188 struct inet_sock *inet; 1108 struct inet_sock *inet;
1189 __wsum csum;
1190 int headroom; 1109 int headroom;
1191 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 1110 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1192 int udp_len; 1111 int udp_len;
@@ -1235,33 +1154,17 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1235 uh->dest = inet->inet_dport; 1154 uh->dest = inet->inet_dport;
1236 udp_len = uhlen + hdr_len + data_len; 1155 udp_len = uhlen + hdr_len + data_len;
1237 uh->len = htons(udp_len); 1156 uh->len = htons(udp_len);
1238 uh->check = 0;
1239 1157
1240 /* Calculate UDP checksum if configured to do so */ 1158 /* Calculate UDP checksum if configured to do so */
1241#if IS_ENABLED(CONFIG_IPV6) 1159#if IS_ENABLED(CONFIG_IPV6)
1242 if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) 1160 if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1243 l2tp_xmit_ipv6_csum(sk, skb, udp_len); 1161 udp6_set_csum(udp_get_no_check6_tx(sk),
1162 skb, &inet6_sk(sk)->saddr,
1163 &sk->sk_v6_daddr, udp_len);
1244 else 1164 else
1245#endif 1165#endif
1246 if (sk->sk_no_check == UDP_CSUM_NOXMIT) 1166 udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1247 skb->ip_summed = CHECKSUM_NONE; 1167 inet->inet_daddr, udp_len);
1248 else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
1249 (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
1250 skb->ip_summed = CHECKSUM_COMPLETE;
1251 csum = skb_checksum(skb, 0, udp_len, 0);
1252 uh->check = csum_tcpudp_magic(inet->inet_saddr,
1253 inet->inet_daddr,
1254 udp_len, IPPROTO_UDP, csum);
1255 if (uh->check == 0)
1256 uh->check = CSUM_MANGLED_0;
1257 } else {
1258 skb->ip_summed = CHECKSUM_PARTIAL;
1259 skb->csum_start = skb_transport_header(skb) - skb->head;
1260 skb->csum_offset = offsetof(struct udphdr, check);
1261 uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
1262 inet->inet_daddr,
1263 udp_len, IPPROTO_UDP, 0);
1264 }
1265 break; 1168 break;
1266 1169
1267 case L2TP_ENCAPTYPE_IP: 1170 case L2TP_ENCAPTYPE_IP:
@@ -1490,6 +1393,11 @@ static int l2tp_tunnel_sock_create(struct net *net,
1490 sizeof(udp6_addr), 0); 1393 sizeof(udp6_addr), 0);
1491 if (err < 0) 1394 if (err < 0)
1492 goto out; 1395 goto out;
1396
1397 if (cfg->udp6_zero_tx_checksums)
1398 udp_set_no_check6_tx(sock->sk, true);
1399 if (cfg->udp6_zero_rx_checksums)
1400 udp_set_no_check6_rx(sock->sk, true);
1493 } else 1401 } else
1494#endif 1402#endif
1495 { 1403 {
@@ -1518,7 +1426,7 @@ static int l2tp_tunnel_sock_create(struct net *net,
1518 } 1426 }
1519 1427
1520 if (!cfg->use_udp_checksums) 1428 if (!cfg->use_udp_checksums)
1521 sock->sk->sk_no_check = UDP_CSUM_NOXMIT; 1429 sock->sk->sk_no_check_tx = 1;
1522 1430
1523 break; 1431 break;
1524 1432
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 3f93ccd6ba97..68aa9ffd4ae4 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -162,7 +162,9 @@ struct l2tp_tunnel_cfg {
162#endif 162#endif
163 u16 local_udp_port; 163 u16 local_udp_port;
164 u16 peer_udp_port; 164 u16 peer_udp_port;
165 unsigned int use_udp_checksums:1; 165 unsigned int use_udp_checksums:1,
166 udp6_zero_tx_checksums:1,
167 udp6_zero_rx_checksums:1;
166}; 168};
167 169
168struct l2tp_tunnel { 170struct l2tp_tunnel {
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 3397fe6897c0..369a9822488c 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -606,7 +606,6 @@ static struct inet_protosw l2tp_ip_protosw = {
606 .protocol = IPPROTO_L2TP, 606 .protocol = IPPROTO_L2TP,
607 .prot = &l2tp_ip_prot, 607 .prot = &l2tp_ip_prot,
608 .ops = &l2tp_ip_ops, 608 .ops = &l2tp_ip_ops,
609 .no_check = 0,
610}; 609};
611 610
612static struct net_protocol l2tp_ip_protocol __read_mostly = { 611static struct net_protocol l2tp_ip_protocol __read_mostly = {
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 7704ea9502fd..f3f98a156cee 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -605,14 +605,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
605 goto out; 605 goto out;
606 } 606 }
607 607
608 if (hlimit < 0) { 608 if (hlimit < 0)
609 if (ipv6_addr_is_multicast(&fl6.daddr)) 609 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
610 hlimit = np->mcast_hops;
611 else
612 hlimit = np->hop_limit;
613 if (hlimit < 0)
614 hlimit = ip6_dst_hoplimit(dst);
615 }
616 610
617 if (tclass < 0) 611 if (tclass < 0)
618 tclass = np->tclass; 612 tclass = np->tclass;
@@ -761,7 +755,6 @@ static struct inet_protosw l2tp_ip6_protosw = {
761 .protocol = IPPROTO_L2TP, 755 .protocol = IPPROTO_L2TP,
762 .prot = &l2tp_ip6_prot, 756 .prot = &l2tp_ip6_prot,
763 .ops = &l2tp_ip6_ops, 757 .ops = &l2tp_ip6_ops,
764 .no_check = 0,
765}; 758};
766 759
767static struct inet6_protocol l2tp_ip6_protocol __read_mostly = { 760static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index bd7387adea9e..0ac907adb2f4 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -161,6 +161,13 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
161 cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]); 161 cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]);
162 if (info->attrs[L2TP_ATTR_UDP_CSUM]) 162 if (info->attrs[L2TP_ATTR_UDP_CSUM])
163 cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]); 163 cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]);
164
165#if IS_ENABLED(CONFIG_IPV6)
166 if (info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX])
167 cfg.udp6_zero_tx_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX]);
168 if (info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX])
169 cfg.udp6_zero_rx_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX]);
170#endif
164 } 171 }
165 172
166 if (info->attrs[L2TP_ATTR_DEBUG]) 173 if (info->attrs[L2TP_ATTR_DEBUG])
@@ -297,8 +304,7 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
297 case L2TP_ENCAPTYPE_UDP: 304 case L2TP_ENCAPTYPE_UDP:
298 if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) || 305 if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
299 nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) || 306 nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) ||
300 nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, 307 nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, !sk->sk_no_check_tx))
301 (sk->sk_no_check != UDP_CSUM_NOXMIT)))
302 goto nla_put_failure; 308 goto nla_put_failure;
303 /* NOBREAK */ 309 /* NOBREAK */
304 case L2TP_ENCAPTYPE_IP: 310 case L2TP_ENCAPTYPE_IP:
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 9d7d840aac6d..1e46ffa69167 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -25,7 +25,8 @@ mac80211-y := \
25 wme.o \ 25 wme.o \
26 event.o \ 26 event.o \
27 chan.o \ 27 chan.o \
28 trace.o mlme.o 28 trace.o mlme.o \
29 tdls.o
29 30
30mac80211-$(CONFIG_MAC80211_LEDS) += led.o 31mac80211-$(CONFIG_MAC80211_LEDS) += led.o
31mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ 32mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 7c7df475a401..ec24378caaaf 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -23,12 +23,13 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
23 u8 *data, size_t data_len, u8 *mic) 23 u8 *data, size_t data_len, u8 *mic)
24{ 24{
25 struct scatterlist assoc, pt, ct[2]; 25 struct scatterlist assoc, pt, ct[2];
26 struct {
27 struct aead_request req;
28 u8 priv[crypto_aead_reqsize(tfm)];
29 } aead_req;
30 26
31 memset(&aead_req, 0, sizeof(aead_req)); 27 char aead_req_data[sizeof(struct aead_request) +
28 crypto_aead_reqsize(tfm)]
29 __aligned(__alignof__(struct aead_request));
30 struct aead_request *aead_req = (void *) aead_req_data;
31
32 memset(aead_req, 0, sizeof(aead_req_data));
32 33
33 sg_init_one(&pt, data, data_len); 34 sg_init_one(&pt, data, data_len);
34 sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad)); 35 sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
@@ -36,23 +37,23 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
36 sg_set_buf(&ct[0], data, data_len); 37 sg_set_buf(&ct[0], data, data_len);
37 sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN); 38 sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
38 39
39 aead_request_set_tfm(&aead_req.req, tfm); 40 aead_request_set_tfm(aead_req, tfm);
40 aead_request_set_assoc(&aead_req.req, &assoc, assoc.length); 41 aead_request_set_assoc(aead_req, &assoc, assoc.length);
41 aead_request_set_crypt(&aead_req.req, &pt, ct, data_len, b_0); 42 aead_request_set_crypt(aead_req, &pt, ct, data_len, b_0);
42 43
43 crypto_aead_encrypt(&aead_req.req); 44 crypto_aead_encrypt(aead_req);
44} 45}
45 46
46int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, 47int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
47 u8 *data, size_t data_len, u8 *mic) 48 u8 *data, size_t data_len, u8 *mic)
48{ 49{
49 struct scatterlist assoc, pt, ct[2]; 50 struct scatterlist assoc, pt, ct[2];
50 struct { 51 char aead_req_data[sizeof(struct aead_request) +
51 struct aead_request req; 52 crypto_aead_reqsize(tfm)]
52 u8 priv[crypto_aead_reqsize(tfm)]; 53 __aligned(__alignof__(struct aead_request));
53 } aead_req; 54 struct aead_request *aead_req = (void *) aead_req_data;
54 55
55 memset(&aead_req, 0, sizeof(aead_req)); 56 memset(aead_req, 0, sizeof(aead_req_data));
56 57
57 sg_init_one(&pt, data, data_len); 58 sg_init_one(&pt, data, data_len);
58 sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad)); 59 sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
@@ -60,12 +61,12 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
60 sg_set_buf(&ct[0], data, data_len); 61 sg_set_buf(&ct[0], data, data_len);
61 sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN); 62 sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
62 63
63 aead_request_set_tfm(&aead_req.req, tfm); 64 aead_request_set_tfm(aead_req, tfm);
64 aead_request_set_assoc(&aead_req.req, &assoc, assoc.length); 65 aead_request_set_assoc(aead_req, &assoc, assoc.length);
65 aead_request_set_crypt(&aead_req.req, ct, &pt, 66 aead_request_set_crypt(aead_req, ct, &pt,
66 data_len + IEEE80211_CCMP_MIC_LEN, b_0); 67 data_len + IEEE80211_CCMP_MIC_LEN, b_0);
67 68
68 return crypto_aead_decrypt(&aead_req.req); 69 return crypto_aead_decrypt(aead_req);
69} 70}
70 71
71struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[]) 72struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[])
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index aaa59d719592..d7513a503be1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -109,6 +109,15 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
109static int ieee80211_start_p2p_device(struct wiphy *wiphy, 109static int ieee80211_start_p2p_device(struct wiphy *wiphy,
110 struct wireless_dev *wdev) 110 struct wireless_dev *wdev)
111{ 111{
112 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
113 int ret;
114
115 mutex_lock(&sdata->local->chanctx_mtx);
116 ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
117 mutex_unlock(&sdata->local->chanctx_mtx);
118 if (ret < 0)
119 return ret;
120
112 return ieee80211_do_open(wdev, true); 121 return ieee80211_do_open(wdev, true);
113} 122}
114 123
@@ -463,8 +472,10 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
463{ 472{
464 struct ieee80211_sub_if_data *sdata = sta->sdata; 473 struct ieee80211_sub_if_data *sdata = sta->sdata;
465 struct ieee80211_local *local = sdata->local; 474 struct ieee80211_local *local = sdata->local;
475 struct rate_control_ref *ref = local->rate_ctrl;
466 struct timespec uptime; 476 struct timespec uptime;
467 u64 packets = 0; 477 u64 packets = 0;
478 u32 thr = 0;
468 int i, ac; 479 int i, ac;
469 480
470 sinfo->generation = sdata->local->sta_generation; 481 sinfo->generation = sdata->local->sta_generation;
@@ -578,6 +589,17 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
578 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED); 589 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED);
579 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) 590 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
580 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); 591 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
592
593 /* check if the driver has a SW RC implementation */
594 if (ref && ref->ops->get_expected_throughput)
595 thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv);
596 else
597 thr = drv_get_expected_throughput(local, &sta->sta);
598
599 if (thr != 0) {
600 sinfo->filled |= STATION_INFO_EXPECTED_THROUGHPUT;
601 sinfo->expected_throughput = thr;
602 }
581} 603}
582 604
583static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = { 605static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
@@ -768,7 +790,7 @@ static void ieee80211_get_et_strings(struct wiphy *wiphy,
768} 790}
769 791
770static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, 792static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
771 int idx, u8 *mac, struct station_info *sinfo) 793 int idx, u8 *mac, struct station_info *sinfo)
772{ 794{
773 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 795 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
774 struct ieee80211_local *local = sdata->local; 796 struct ieee80211_local *local = sdata->local;
@@ -798,7 +820,7 @@ static int ieee80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
798} 820}
799 821
800static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, 822static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
801 u8 *mac, struct station_info *sinfo) 823 const u8 *mac, struct station_info *sinfo)
802{ 824{
803 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 825 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
804 struct ieee80211_local *local = sdata->local; 826 struct ieee80211_local *local = sdata->local;
@@ -972,13 +994,13 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
972 sdata->needed_rx_chains = sdata->local->rx_chains; 994 sdata->needed_rx_chains = sdata->local->rx_chains;
973 995
974 mutex_lock(&local->mtx); 996 mutex_lock(&local->mtx);
975 sdata->radar_required = params->radar_required;
976 err = ieee80211_vif_use_channel(sdata, &params->chandef, 997 err = ieee80211_vif_use_channel(sdata, &params->chandef,
977 IEEE80211_CHANCTX_SHARED); 998 IEEE80211_CHANCTX_SHARED);
999 if (!err)
1000 ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
978 mutex_unlock(&local->mtx); 1001 mutex_unlock(&local->mtx);
979 if (err) 1002 if (err)
980 return err; 1003 return err;
981 ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
982 1004
983 /* 1005 /*
984 * Apply control port protocol, this allows us to 1006 * Apply control port protocol, this allows us to
@@ -1075,6 +1097,31 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
1075 return 0; 1097 return 0;
1076} 1098}
1077 1099
1100bool ieee80211_csa_needs_block_tx(struct ieee80211_local *local)
1101{
1102 struct ieee80211_sub_if_data *sdata;
1103
1104 lockdep_assert_held(&local->mtx);
1105
1106 rcu_read_lock();
1107 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1108 if (!ieee80211_sdata_running(sdata))
1109 continue;
1110
1111 if (!sdata->vif.csa_active)
1112 continue;
1113
1114 if (!sdata->csa_block_tx)
1115 continue;
1116
1117 rcu_read_unlock();
1118 return true;
1119 }
1120 rcu_read_unlock();
1121
1122 return false;
1123}
1124
1078static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) 1125static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1079{ 1126{
1080 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1127 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -1092,7 +1139,14 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1092 old_probe_resp = sdata_dereference(sdata->u.ap.probe_resp, sdata); 1139 old_probe_resp = sdata_dereference(sdata->u.ap.probe_resp, sdata);
1093 1140
1094 /* abort any running channel switch */ 1141 /* abort any running channel switch */
1142 mutex_lock(&local->mtx);
1095 sdata->vif.csa_active = false; 1143 sdata->vif.csa_active = false;
1144 if (!ieee80211_csa_needs_block_tx(local))
1145 ieee80211_wake_queues_by_reason(&local->hw,
1146 IEEE80211_MAX_QUEUE_MAP,
1147 IEEE80211_QUEUE_STOP_REASON_CSA);
1148 mutex_unlock(&local->mtx);
1149
1096 kfree(sdata->u.ap.next_beacon); 1150 kfree(sdata->u.ap.next_beacon);
1097 sdata->u.ap.next_beacon = NULL; 1151 sdata->u.ap.next_beacon = NULL;
1098 1152
@@ -1131,8 +1185,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1131 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); 1185 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
1132 skb_queue_purge(&sdata->u.ap.ps.bc_buf); 1186 skb_queue_purge(&sdata->u.ap.ps.bc_buf);
1133 1187
1134 ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
1135 mutex_lock(&local->mtx); 1188 mutex_lock(&local->mtx);
1189 ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
1136 ieee80211_vif_release_channel(sdata); 1190 ieee80211_vif_release_channel(sdata);
1137 mutex_unlock(&local->mtx); 1191 mutex_unlock(&local->mtx);
1138 1192
@@ -1416,7 +1470,8 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1416} 1470}
1417 1471
1418static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, 1472static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1419 u8 *mac, struct station_parameters *params) 1473 const u8 *mac,
1474 struct station_parameters *params)
1420{ 1475{
1421 struct ieee80211_local *local = wiphy_priv(wiphy); 1476 struct ieee80211_local *local = wiphy_priv(wiphy);
1422 struct sta_info *sta; 1477 struct sta_info *sta;
@@ -1450,6 +1505,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1450 if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) { 1505 if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) {
1451 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); 1506 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
1452 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); 1507 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
1508 } else {
1509 sta->sta.tdls = true;
1453 } 1510 }
1454 1511
1455 err = sta_apply_parameters(local, sta, params); 1512 err = sta_apply_parameters(local, sta, params);
@@ -1483,7 +1540,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1483} 1540}
1484 1541
1485static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev, 1542static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
1486 u8 *mac) 1543 const u8 *mac)
1487{ 1544{
1488 struct ieee80211_sub_if_data *sdata; 1545 struct ieee80211_sub_if_data *sdata;
1489 1546
@@ -1497,7 +1554,7 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
1497} 1554}
1498 1555
1499static int ieee80211_change_station(struct wiphy *wiphy, 1556static int ieee80211_change_station(struct wiphy *wiphy,
1500 struct net_device *dev, u8 *mac, 1557 struct net_device *dev, const u8 *mac,
1501 struct station_parameters *params) 1558 struct station_parameters *params)
1502{ 1559{
1503 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1560 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -1566,7 +1623,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1566 1623
1567 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1624 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1568 sta->sdata->u.vlan.sta) { 1625 sta->sdata->u.vlan.sta) {
1569 rcu_assign_pointer(sta->sdata->u.vlan.sta, NULL); 1626 RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
1570 prev_4addr = true; 1627 prev_4addr = true;
1571 } 1628 }
1572 1629
@@ -1622,7 +1679,7 @@ out_err:
1622 1679
1623#ifdef CONFIG_MAC80211_MESH 1680#ifdef CONFIG_MAC80211_MESH
1624static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, 1681static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
1625 u8 *dst, u8 *next_hop) 1682 const u8 *dst, const u8 *next_hop)
1626{ 1683{
1627 struct ieee80211_sub_if_data *sdata; 1684 struct ieee80211_sub_if_data *sdata;
1628 struct mesh_path *mpath; 1685 struct mesh_path *mpath;
@@ -1650,7 +1707,7 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
1650} 1707}
1651 1708
1652static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, 1709static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
1653 u8 *dst) 1710 const u8 *dst)
1654{ 1711{
1655 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1712 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1656 1713
@@ -1661,9 +1718,8 @@ static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
1661 return 0; 1718 return 0;
1662} 1719}
1663 1720
1664static int ieee80211_change_mpath(struct wiphy *wiphy, 1721static int ieee80211_change_mpath(struct wiphy *wiphy, struct net_device *dev,
1665 struct net_device *dev, 1722 const u8 *dst, const u8 *next_hop)
1666 u8 *dst, u8 *next_hop)
1667{ 1723{
1668 struct ieee80211_sub_if_data *sdata; 1724 struct ieee80211_sub_if_data *sdata;
1669 struct mesh_path *mpath; 1725 struct mesh_path *mpath;
@@ -1755,8 +1811,8 @@ static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
1755} 1811}
1756 1812
1757static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, 1813static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
1758 int idx, u8 *dst, u8 *next_hop, 1814 int idx, u8 *dst, u8 *next_hop,
1759 struct mpath_info *pinfo) 1815 struct mpath_info *pinfo)
1760{ 1816{
1761 struct ieee80211_sub_if_data *sdata; 1817 struct ieee80211_sub_if_data *sdata;
1762 struct mesh_path *mpath; 1818 struct mesh_path *mpath;
@@ -2930,7 +2986,6 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
2930 /* whatever, but channel contexts should not complain about that one */ 2986 /* whatever, but channel contexts should not complain about that one */
2931 sdata->smps_mode = IEEE80211_SMPS_OFF; 2987 sdata->smps_mode = IEEE80211_SMPS_OFF;
2932 sdata->needed_rx_chains = local->rx_chains; 2988 sdata->needed_rx_chains = local->rx_chains;
2933 sdata->radar_required = true;
2934 2989
2935 err = ieee80211_vif_use_channel(sdata, chandef, 2990 err = ieee80211_vif_use_channel(sdata, chandef,
2936 IEEE80211_CHANCTX_SHARED); 2991 IEEE80211_CHANCTX_SHARED);
@@ -3011,26 +3066,11 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif)
3011} 3066}
3012EXPORT_SYMBOL(ieee80211_csa_finish); 3067EXPORT_SYMBOL(ieee80211_csa_finish);
3013 3068
3014static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata) 3069static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
3070 u32 *changed)
3015{ 3071{
3016 struct ieee80211_local *local = sdata->local; 3072 int err;
3017 int err, changed = 0;
3018
3019 sdata_assert_lock(sdata);
3020
3021 mutex_lock(&local->mtx);
3022 sdata->radar_required = sdata->csa_radar_required;
3023 err = ieee80211_vif_change_channel(sdata, &changed);
3024 mutex_unlock(&local->mtx);
3025 if (WARN_ON(err < 0))
3026 return;
3027
3028 if (!local->use_chanctx) {
3029 local->_oper_chandef = sdata->csa_chandef;
3030 ieee80211_hw_config(local, 0);
3031 }
3032 3073
3033 sdata->vif.csa_active = false;
3034 switch (sdata->vif.type) { 3074 switch (sdata->vif.type) {
3035 case NL80211_IFTYPE_AP: 3075 case NL80211_IFTYPE_AP:
3036 err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon); 3076 err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
@@ -3038,35 +3078,74 @@ static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
3038 sdata->u.ap.next_beacon = NULL; 3078 sdata->u.ap.next_beacon = NULL;
3039 3079
3040 if (err < 0) 3080 if (err < 0)
3041 return; 3081 return err;
3042 changed |= err; 3082 *changed |= err;
3043 break; 3083 break;
3044 case NL80211_IFTYPE_ADHOC: 3084 case NL80211_IFTYPE_ADHOC:
3045 err = ieee80211_ibss_finish_csa(sdata); 3085 err = ieee80211_ibss_finish_csa(sdata);
3046 if (err < 0) 3086 if (err < 0)
3047 return; 3087 return err;
3048 changed |= err; 3088 *changed |= err;
3049 break; 3089 break;
3050#ifdef CONFIG_MAC80211_MESH 3090#ifdef CONFIG_MAC80211_MESH
3051 case NL80211_IFTYPE_MESH_POINT: 3091 case NL80211_IFTYPE_MESH_POINT:
3052 err = ieee80211_mesh_finish_csa(sdata); 3092 err = ieee80211_mesh_finish_csa(sdata);
3053 if (err < 0) 3093 if (err < 0)
3054 return; 3094 return err;
3055 changed |= err; 3095 *changed |= err;
3056 break; 3096 break;
3057#endif 3097#endif
3058 default: 3098 default:
3059 WARN_ON(1); 3099 WARN_ON(1);
3060 return; 3100 return -EINVAL;
3061 } 3101 }
3062 3102
3103 return 0;
3104}
3105
3106static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
3107{
3108 struct ieee80211_local *local = sdata->local;
3109 u32 changed = 0;
3110 int err;
3111
3112 sdata_assert_lock(sdata);
3113 lockdep_assert_held(&local->mtx);
3114
3115 sdata->radar_required = sdata->csa_radar_required;
3116 err = ieee80211_vif_change_channel(sdata, &changed);
3117 if (err < 0)
3118 return err;
3119
3120 if (!local->use_chanctx) {
3121 local->_oper_chandef = sdata->csa_chandef;
3122 ieee80211_hw_config(local, 0);
3123 }
3124
3125 sdata->vif.csa_active = false;
3126
3127 err = ieee80211_set_after_csa_beacon(sdata, &changed);
3128 if (err)
3129 return err;
3130
3063 ieee80211_bss_info_change_notify(sdata, changed); 3131 ieee80211_bss_info_change_notify(sdata, changed);
3132 cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef);
3064 3133
3065 ieee80211_wake_queues_by_reason(&sdata->local->hw, 3134 if (!ieee80211_csa_needs_block_tx(local))
3135 ieee80211_wake_queues_by_reason(&local->hw,
3066 IEEE80211_MAX_QUEUE_MAP, 3136 IEEE80211_MAX_QUEUE_MAP,
3067 IEEE80211_QUEUE_STOP_REASON_CSA); 3137 IEEE80211_QUEUE_STOP_REASON_CSA);
3068 3138
3069 cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef); 3139 return 0;
3140}
3141
3142static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
3143{
3144 if (__ieee80211_csa_finalize(sdata)) {
3145 sdata_info(sdata, "failed to finalize CSA, disconnecting\n");
3146 cfg80211_stop_iface(sdata->local->hw.wiphy, &sdata->wdev,
3147 GFP_KERNEL);
3148 }
3070} 3149}
3071 3150
3072void ieee80211_csa_finalize_work(struct work_struct *work) 3151void ieee80211_csa_finalize_work(struct work_struct *work)
@@ -3074,8 +3153,11 @@ void ieee80211_csa_finalize_work(struct work_struct *work)
3074 struct ieee80211_sub_if_data *sdata = 3153 struct ieee80211_sub_if_data *sdata =
3075 container_of(work, struct ieee80211_sub_if_data, 3154 container_of(work, struct ieee80211_sub_if_data,
3076 csa_finalize_work); 3155 csa_finalize_work);
3156 struct ieee80211_local *local = sdata->local;
3077 3157
3078 sdata_lock(sdata); 3158 sdata_lock(sdata);
3159 mutex_lock(&local->mtx);
3160
3079 /* AP might have been stopped while waiting for the lock. */ 3161 /* AP might have been stopped while waiting for the lock. */
3080 if (!sdata->vif.csa_active) 3162 if (!sdata->vif.csa_active)
3081 goto unlock; 3163 goto unlock;
@@ -3086,6 +3168,7 @@ void ieee80211_csa_finalize_work(struct work_struct *work)
3086 ieee80211_csa_finalize(sdata); 3168 ieee80211_csa_finalize(sdata);
3087 3169
3088unlock: 3170unlock:
3171 mutex_unlock(&local->mtx);
3089 sdata_unlock(sdata); 3172 sdata_unlock(sdata);
3090} 3173}
3091 3174
@@ -3121,9 +3204,25 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
3121 if (params->count <= 1) 3204 if (params->count <= 1)
3122 break; 3205 break;
3123 3206
3124 sdata->csa_counter_offset_beacon = 3207 if ((params->n_counter_offsets_beacon >
3125 params->counter_offset_beacon; 3208 IEEE80211_MAX_CSA_COUNTERS_NUM) ||
3126 sdata->csa_counter_offset_presp = params->counter_offset_presp; 3209 (params->n_counter_offsets_presp >
3210 IEEE80211_MAX_CSA_COUNTERS_NUM))
3211 return -EINVAL;
3212
3213 /* make sure we don't have garbage in other counters */
3214 memset(sdata->csa_counter_offset_beacon, 0,
3215 sizeof(sdata->csa_counter_offset_beacon));
3216 memset(sdata->csa_counter_offset_presp, 0,
3217 sizeof(sdata->csa_counter_offset_presp));
3218
3219 memcpy(sdata->csa_counter_offset_beacon,
3220 params->counter_offsets_beacon,
3221 params->n_counter_offsets_beacon * sizeof(u16));
3222 memcpy(sdata->csa_counter_offset_presp,
3223 params->counter_offsets_presp,
3224 params->n_counter_offsets_presp * sizeof(u16));
3225
3127 err = ieee80211_assign_beacon(sdata, &params->beacon_csa); 3226 err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
3128 if (err < 0) { 3227 if (err < 0) {
3129 kfree(sdata->u.ap.next_beacon); 3228 kfree(sdata->u.ap.next_beacon);
@@ -3212,16 +3311,18 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
3212 return 0; 3311 return 0;
3213} 3312}
3214 3313
3215int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, 3314static int
3216 struct cfg80211_csa_settings *params) 3315__ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
3316 struct cfg80211_csa_settings *params)
3217{ 3317{
3218 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 3318 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3219 struct ieee80211_local *local = sdata->local; 3319 struct ieee80211_local *local = sdata->local;
3220 struct ieee80211_chanctx_conf *chanctx_conf; 3320 struct ieee80211_chanctx_conf *conf;
3221 struct ieee80211_chanctx *chanctx; 3321 struct ieee80211_chanctx *chanctx;
3222 int err, num_chanctx, changed = 0; 3322 int err, num_chanctx, changed = 0;
3223 3323
3224 sdata_assert_lock(sdata); 3324 sdata_assert_lock(sdata);
3325 lockdep_assert_held(&local->mtx);
3225 3326
3226 if (!list_empty(&local->roc_list) || local->scanning) 3327 if (!list_empty(&local->roc_list) || local->scanning)
3227 return -EBUSY; 3328 return -EBUSY;
@@ -3233,23 +3334,24 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
3233 &sdata->vif.bss_conf.chandef)) 3334 &sdata->vif.bss_conf.chandef))
3234 return -EINVAL; 3335 return -EINVAL;
3235 3336
3236 rcu_read_lock(); 3337 mutex_lock(&local->chanctx_mtx);
3237 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 3338 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
3238 if (!chanctx_conf) { 3339 lockdep_is_held(&local->chanctx_mtx));
3239 rcu_read_unlock(); 3340 if (!conf) {
3341 mutex_unlock(&local->chanctx_mtx);
3240 return -EBUSY; 3342 return -EBUSY;
3241 } 3343 }
3242 3344
3243 /* don't handle for multi-VIF cases */ 3345 /* don't handle for multi-VIF cases */
3244 chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf); 3346 chanctx = container_of(conf, struct ieee80211_chanctx, conf);
3245 if (chanctx->refcount > 1) { 3347 if (ieee80211_chanctx_refcount(local, chanctx) > 1) {
3246 rcu_read_unlock(); 3348 mutex_unlock(&local->chanctx_mtx);
3247 return -EBUSY; 3349 return -EBUSY;
3248 } 3350 }
3249 num_chanctx = 0; 3351 num_chanctx = 0;
3250 list_for_each_entry_rcu(chanctx, &local->chanctx_list, list) 3352 list_for_each_entry_rcu(chanctx, &local->chanctx_list, list)
3251 num_chanctx++; 3353 num_chanctx++;
3252 rcu_read_unlock(); 3354 mutex_unlock(&local->chanctx_mtx);
3253 3355
3254 if (num_chanctx > 1) 3356 if (num_chanctx > 1)
3255 return -EBUSY; 3357 return -EBUSY;
@@ -3263,15 +3365,16 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
3263 return err; 3365 return err;
3264 3366
3265 sdata->csa_radar_required = params->radar_required; 3367 sdata->csa_radar_required = params->radar_required;
3266
3267 if (params->block_tx)
3268 ieee80211_stop_queues_by_reason(&local->hw,
3269 IEEE80211_MAX_QUEUE_MAP,
3270 IEEE80211_QUEUE_STOP_REASON_CSA);
3271
3272 sdata->csa_chandef = params->chandef; 3368 sdata->csa_chandef = params->chandef;
3369 sdata->csa_block_tx = params->block_tx;
3370 sdata->csa_current_counter = params->count;
3273 sdata->vif.csa_active = true; 3371 sdata->vif.csa_active = true;
3274 3372
3373 if (sdata->csa_block_tx)
3374 ieee80211_stop_queues_by_reason(&local->hw,
3375 IEEE80211_MAX_QUEUE_MAP,
3376 IEEE80211_QUEUE_STOP_REASON_CSA);
3377
3275 if (changed) { 3378 if (changed) {
3276 ieee80211_bss_info_change_notify(sdata, changed); 3379 ieee80211_bss_info_change_notify(sdata, changed);
3277 drv_channel_switch_beacon(sdata, &params->chandef); 3380 drv_channel_switch_beacon(sdata, &params->chandef);
@@ -3283,6 +3386,20 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
3283 return 0; 3386 return 0;
3284} 3387}
3285 3388
3389int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
3390 struct cfg80211_csa_settings *params)
3391{
3392 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3393 struct ieee80211_local *local = sdata->local;
3394 int err;
3395
3396 mutex_lock(&local->mtx);
3397 err = __ieee80211_channel_switch(wiphy, dev, params);
3398 mutex_unlock(&local->mtx);
3399
3400 return err;
3401}
3402
3286static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, 3403static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3287 struct cfg80211_mgmt_tx_params *params, 3404 struct cfg80211_mgmt_tx_params *params,
3288 u64 *cookie) 3405 u64 *cookie)
@@ -3295,6 +3412,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3295 bool need_offchan = false; 3412 bool need_offchan = false;
3296 u32 flags; 3413 u32 flags;
3297 int ret; 3414 int ret;
3415 u8 *data;
3298 3416
3299 if (params->dont_wait_for_ack) 3417 if (params->dont_wait_for_ack)
3300 flags = IEEE80211_TX_CTL_NO_ACK; 3418 flags = IEEE80211_TX_CTL_NO_ACK;
@@ -3388,7 +3506,20 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3388 } 3506 }
3389 skb_reserve(skb, local->hw.extra_tx_headroom); 3507 skb_reserve(skb, local->hw.extra_tx_headroom);
3390 3508
3391 memcpy(skb_put(skb, params->len), params->buf, params->len); 3509 data = skb_put(skb, params->len);
3510 memcpy(data, params->buf, params->len);
3511
3512 /* Update CSA counters */
3513 if (sdata->vif.csa_active &&
3514 (sdata->vif.type == NL80211_IFTYPE_AP ||
3515 sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
3516 params->n_csa_offsets) {
3517 int i;
3518 u8 c = sdata->csa_current_counter;
3519
3520 for (i = 0; i < params->n_csa_offsets; i++)
3521 data[params->csa_offsets[i]] = c;
3522 }
3392 3523
3393 IEEE80211_SKB_CB(skb)->flags = flags; 3524 IEEE80211_SKB_CB(skb)->flags = flags;
3394 3525
@@ -3497,320 +3628,6 @@ static int ieee80211_set_rekey_data(struct wiphy *wiphy,
3497 return 0; 3628 return 0;
3498} 3629}
3499 3630
3500static void ieee80211_tdls_add_ext_capab(struct sk_buff *skb)
3501{
3502 u8 *pos = (void *)skb_put(skb, 7);
3503
3504 *pos++ = WLAN_EID_EXT_CAPABILITY;
3505 *pos++ = 5; /* len */
3506 *pos++ = 0x0;
3507 *pos++ = 0x0;
3508 *pos++ = 0x0;
3509 *pos++ = 0x0;
3510 *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
3511}
3512
3513static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata)
3514{
3515 struct ieee80211_local *local = sdata->local;
3516 u16 capab;
3517
3518 capab = 0;
3519 if (ieee80211_get_sdata_band(sdata) != IEEE80211_BAND_2GHZ)
3520 return capab;
3521
3522 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
3523 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
3524 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
3525 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
3526
3527 return capab;
3528}
3529
3530static void ieee80211_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr,
3531 u8 *peer, u8 *bssid)
3532{
3533 struct ieee80211_tdls_lnkie *lnkid;
3534
3535 lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
3536
3537 lnkid->ie_type = WLAN_EID_LINK_ID;
3538 lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
3539
3540 memcpy(lnkid->bssid, bssid, ETH_ALEN);
3541 memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
3542 memcpy(lnkid->resp_sta, peer, ETH_ALEN);
3543}
3544
3545static int
3546ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
3547 u8 *peer, u8 action_code, u8 dialog_token,
3548 u16 status_code, struct sk_buff *skb)
3549{
3550 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3551 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
3552 struct ieee80211_tdls_data *tf;
3553
3554 tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
3555
3556 memcpy(tf->da, peer, ETH_ALEN);
3557 memcpy(tf->sa, sdata->vif.addr, ETH_ALEN);
3558 tf->ether_type = cpu_to_be16(ETH_P_TDLS);
3559 tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
3560
3561 switch (action_code) {
3562 case WLAN_TDLS_SETUP_REQUEST:
3563 tf->category = WLAN_CATEGORY_TDLS;
3564 tf->action_code = WLAN_TDLS_SETUP_REQUEST;
3565
3566 skb_put(skb, sizeof(tf->u.setup_req));
3567 tf->u.setup_req.dialog_token = dialog_token;
3568 tf->u.setup_req.capability =
3569 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
3570
3571 ieee80211_add_srates_ie(sdata, skb, false, band);
3572 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
3573 ieee80211_tdls_add_ext_capab(skb);
3574 break;
3575 case WLAN_TDLS_SETUP_RESPONSE:
3576 tf->category = WLAN_CATEGORY_TDLS;
3577 tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
3578
3579 skb_put(skb, sizeof(tf->u.setup_resp));
3580 tf->u.setup_resp.status_code = cpu_to_le16(status_code);
3581 tf->u.setup_resp.dialog_token = dialog_token;
3582 tf->u.setup_resp.capability =
3583 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
3584
3585 ieee80211_add_srates_ie(sdata, skb, false, band);
3586 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
3587 ieee80211_tdls_add_ext_capab(skb);
3588 break;
3589 case WLAN_TDLS_SETUP_CONFIRM:
3590 tf->category = WLAN_CATEGORY_TDLS;
3591 tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
3592
3593 skb_put(skb, sizeof(tf->u.setup_cfm));
3594 tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
3595 tf->u.setup_cfm.dialog_token = dialog_token;
3596 break;
3597 case WLAN_TDLS_TEARDOWN:
3598 tf->category = WLAN_CATEGORY_TDLS;
3599 tf->action_code = WLAN_TDLS_TEARDOWN;
3600
3601 skb_put(skb, sizeof(tf->u.teardown));
3602 tf->u.teardown.reason_code = cpu_to_le16(status_code);
3603 break;
3604 case WLAN_TDLS_DISCOVERY_REQUEST:
3605 tf->category = WLAN_CATEGORY_TDLS;
3606 tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
3607
3608 skb_put(skb, sizeof(tf->u.discover_req));
3609 tf->u.discover_req.dialog_token = dialog_token;
3610 break;
3611 default:
3612 return -EINVAL;
3613 }
3614
3615 return 0;
3616}
3617
3618static int
3619ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
3620 u8 *peer, u8 action_code, u8 dialog_token,
3621 u16 status_code, struct sk_buff *skb)
3622{
3623 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3624 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
3625 struct ieee80211_mgmt *mgmt;
3626
3627 mgmt = (void *)skb_put(skb, 24);
3628 memset(mgmt, 0, 24);
3629 memcpy(mgmt->da, peer, ETH_ALEN);
3630 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
3631 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
3632
3633 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3634 IEEE80211_STYPE_ACTION);
3635
3636 switch (action_code) {
3637 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
3638 skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp));
3639 mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
3640 mgmt->u.action.u.tdls_discover_resp.action_code =
3641 WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
3642 mgmt->u.action.u.tdls_discover_resp.dialog_token =
3643 dialog_token;
3644 mgmt->u.action.u.tdls_discover_resp.capability =
3645 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
3646
3647 ieee80211_add_srates_ie(sdata, skb, false, band);
3648 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
3649 ieee80211_tdls_add_ext_capab(skb);
3650 break;
3651 default:
3652 return -EINVAL;
3653 }
3654
3655 return 0;
3656}
3657
3658static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
3659 u8 *peer, u8 action_code, u8 dialog_token,
3660 u16 status_code, u32 peer_capability,
3661 const u8 *extra_ies, size_t extra_ies_len)
3662{
3663 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3664 struct ieee80211_local *local = sdata->local;
3665 struct sk_buff *skb = NULL;
3666 bool send_direct;
3667 int ret;
3668
3669 if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
3670 return -ENOTSUPP;
3671
3672 /* make sure we are in managed mode, and associated */
3673 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
3674 !sdata->u.mgd.associated)
3675 return -EINVAL;
3676
3677 tdls_dbg(sdata, "TDLS mgmt action %d peer %pM\n",
3678 action_code, peer);
3679
3680 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
3681 max(sizeof(struct ieee80211_mgmt),
3682 sizeof(struct ieee80211_tdls_data)) +
3683 50 + /* supported rates */
3684 7 + /* ext capab */
3685 extra_ies_len +
3686 sizeof(struct ieee80211_tdls_lnkie));
3687 if (!skb)
3688 return -ENOMEM;
3689
3690 skb_reserve(skb, local->hw.extra_tx_headroom);
3691
3692 switch (action_code) {
3693 case WLAN_TDLS_SETUP_REQUEST:
3694 case WLAN_TDLS_SETUP_RESPONSE:
3695 case WLAN_TDLS_SETUP_CONFIRM:
3696 case WLAN_TDLS_TEARDOWN:
3697 case WLAN_TDLS_DISCOVERY_REQUEST:
3698 ret = ieee80211_prep_tdls_encap_data(wiphy, dev, peer,
3699 action_code, dialog_token,
3700 status_code, skb);
3701 send_direct = false;
3702 break;
3703 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
3704 ret = ieee80211_prep_tdls_direct(wiphy, dev, peer, action_code,
3705 dialog_token, status_code,
3706 skb);
3707 send_direct = true;
3708 break;
3709 default:
3710 ret = -ENOTSUPP;
3711 break;
3712 }
3713
3714 if (ret < 0)
3715 goto fail;
3716
3717 if (extra_ies_len)
3718 memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
3719
3720 /* the TDLS link IE is always added last */
3721 switch (action_code) {
3722 case WLAN_TDLS_SETUP_REQUEST:
3723 case WLAN_TDLS_SETUP_CONFIRM:
3724 case WLAN_TDLS_TEARDOWN:
3725 case WLAN_TDLS_DISCOVERY_REQUEST:
3726 /* we are the initiator */
3727 ieee80211_tdls_add_link_ie(skb, sdata->vif.addr, peer,
3728 sdata->u.mgd.bssid);
3729 break;
3730 case WLAN_TDLS_SETUP_RESPONSE:
3731 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
3732 /* we are the responder */
3733 ieee80211_tdls_add_link_ie(skb, peer, sdata->vif.addr,
3734 sdata->u.mgd.bssid);
3735 break;
3736 default:
3737 ret = -ENOTSUPP;
3738 goto fail;
3739 }
3740
3741 if (send_direct) {
3742 ieee80211_tx_skb(sdata, skb);
3743 return 0;
3744 }
3745
3746 /*
3747 * According to 802.11z: Setup req/resp are sent in AC_BK, otherwise
3748 * we should default to AC_VI.
3749 */
3750 switch (action_code) {
3751 case WLAN_TDLS_SETUP_REQUEST:
3752 case WLAN_TDLS_SETUP_RESPONSE:
3753 skb_set_queue_mapping(skb, IEEE80211_AC_BK);
3754 skb->priority = 2;
3755 break;
3756 default:
3757 skb_set_queue_mapping(skb, IEEE80211_AC_VI);
3758 skb->priority = 5;
3759 break;
3760 }
3761
3762 /* disable bottom halves when entering the Tx path */
3763 local_bh_disable();
3764 ret = ieee80211_subif_start_xmit(skb, dev);
3765 local_bh_enable();
3766
3767 return ret;
3768
3769fail:
3770 dev_kfree_skb(skb);
3771 return ret;
3772}
3773
3774static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
3775 u8 *peer, enum nl80211_tdls_operation oper)
3776{
3777 struct sta_info *sta;
3778 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3779
3780 if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
3781 return -ENOTSUPP;
3782
3783 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3784 return -EINVAL;
3785
3786 tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
3787
3788 switch (oper) {
3789 case NL80211_TDLS_ENABLE_LINK:
3790 rcu_read_lock();
3791 sta = sta_info_get(sdata, peer);
3792 if (!sta) {
3793 rcu_read_unlock();
3794 return -ENOLINK;
3795 }
3796
3797 set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
3798 rcu_read_unlock();
3799 break;
3800 case NL80211_TDLS_DISABLE_LINK:
3801 return sta_info_destroy_addr(sdata, peer);
3802 case NL80211_TDLS_TEARDOWN:
3803 case NL80211_TDLS_SETUP:
3804 case NL80211_TDLS_DISCOVERY_REQ:
3805 /* We don't support in-driver setup/teardown/discovery */
3806 return -ENOTSUPP;
3807 default:
3808 return -ENOTSUPP;
3809 }
3810
3811 return 0;
3812}
3813
3814static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, 3631static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
3815 const u8 *peer, u64 *cookie) 3632 const u8 *peer, u64 *cookie)
3816{ 3633{
@@ -3949,6 +3766,21 @@ static int ieee80211_set_qos_map(struct wiphy *wiphy,
3949 return 0; 3766 return 0;
3950} 3767}
3951 3768
3769static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy,
3770 struct net_device *dev,
3771 struct cfg80211_chan_def *chandef)
3772{
3773 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3774 int ret;
3775 u32 changed = 0;
3776
3777 ret = ieee80211_vif_change_bandwidth(sdata, chandef, &changed);
3778 if (ret == 0)
3779 ieee80211_bss_info_change_notify(sdata, changed);
3780
3781 return ret;
3782}
3783
3952const struct cfg80211_ops mac80211_config_ops = { 3784const struct cfg80211_ops mac80211_config_ops = {
3953 .add_virtual_intf = ieee80211_add_iface, 3785 .add_virtual_intf = ieee80211_add_iface,
3954 .del_virtual_intf = ieee80211_del_iface, 3786 .del_virtual_intf = ieee80211_del_iface,
@@ -4029,4 +3861,5 @@ const struct cfg80211_ops mac80211_config_ops = {
4029 .start_radar_detection = ieee80211_start_radar_detection, 3861 .start_radar_detection = ieee80211_start_radar_detection,
4030 .channel_switch = ieee80211_channel_switch, 3862 .channel_switch = ieee80211_channel_switch,
4031 .set_qos_map = ieee80211_set_qos_map, 3863 .set_qos_map = ieee80211_set_qos_map,
3864 .set_ap_chanwidth = ieee80211_set_ap_chanwidth,
4032}; 3865};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 75b5dd2c9267..a310e33972de 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -9,6 +9,170 @@
9#include "ieee80211_i.h" 9#include "ieee80211_i.h"
10#include "driver-ops.h" 10#include "driver-ops.h"
11 11
12static int ieee80211_chanctx_num_assigned(struct ieee80211_local *local,
13 struct ieee80211_chanctx *ctx)
14{
15 struct ieee80211_sub_if_data *sdata;
16 int num = 0;
17
18 lockdep_assert_held(&local->chanctx_mtx);
19
20 list_for_each_entry(sdata, &ctx->assigned_vifs, assigned_chanctx_list)
21 num++;
22
23 return num;
24}
25
26static int ieee80211_chanctx_num_reserved(struct ieee80211_local *local,
27 struct ieee80211_chanctx *ctx)
28{
29 struct ieee80211_sub_if_data *sdata;
30 int num = 0;
31
32 lockdep_assert_held(&local->chanctx_mtx);
33
34 list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list)
35 num++;
36
37 return num;
38}
39
40int ieee80211_chanctx_refcount(struct ieee80211_local *local,
41 struct ieee80211_chanctx *ctx)
42{
43 return ieee80211_chanctx_num_assigned(local, ctx) +
44 ieee80211_chanctx_num_reserved(local, ctx);
45}
46
47static int ieee80211_num_chanctx(struct ieee80211_local *local)
48{
49 struct ieee80211_chanctx *ctx;
50 int num = 0;
51
52 lockdep_assert_held(&local->chanctx_mtx);
53
54 list_for_each_entry(ctx, &local->chanctx_list, list)
55 num++;
56
57 return num;
58}
59
60static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local)
61{
62 lockdep_assert_held(&local->chanctx_mtx);
63 return ieee80211_num_chanctx(local) < ieee80211_max_num_channels(local);
64}
65
66static const struct cfg80211_chan_def *
67ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local,
68 struct ieee80211_chanctx *ctx,
69 const struct cfg80211_chan_def *compat)
70{
71 struct ieee80211_sub_if_data *sdata;
72
73 lockdep_assert_held(&local->chanctx_mtx);
74
75 list_for_each_entry(sdata, &ctx->reserved_vifs,
76 reserved_chanctx_list) {
77 if (!compat)
78 compat = &sdata->reserved_chandef;
79
80 compat = cfg80211_chandef_compatible(&sdata->reserved_chandef,
81 compat);
82 if (!compat)
83 break;
84 }
85
86 return compat;
87}
88
89static const struct cfg80211_chan_def *
90ieee80211_chanctx_non_reserved_chandef(struct ieee80211_local *local,
91 struct ieee80211_chanctx *ctx,
92 const struct cfg80211_chan_def *compat)
93{
94 struct ieee80211_sub_if_data *sdata;
95
96 lockdep_assert_held(&local->chanctx_mtx);
97
98 list_for_each_entry(sdata, &ctx->assigned_vifs,
99 assigned_chanctx_list) {
100 if (sdata->reserved_chanctx != NULL)
101 continue;
102
103 if (!compat)
104 compat = &sdata->vif.bss_conf.chandef;
105
106 compat = cfg80211_chandef_compatible(
107 &sdata->vif.bss_conf.chandef, compat);
108 if (!compat)
109 break;
110 }
111
112 return compat;
113}
114
115static const struct cfg80211_chan_def *
116ieee80211_chanctx_combined_chandef(struct ieee80211_local *local,
117 struct ieee80211_chanctx *ctx,
118 const struct cfg80211_chan_def *compat)
119{
120 lockdep_assert_held(&local->chanctx_mtx);
121
122 compat = ieee80211_chanctx_reserved_chandef(local, ctx, compat);
123 if (!compat)
124 return NULL;
125
126 compat = ieee80211_chanctx_non_reserved_chandef(local, ctx, compat);
127 if (!compat)
128 return NULL;
129
130 return compat;
131}
132
133static bool
134ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local,
135 struct ieee80211_chanctx *ctx,
136 const struct cfg80211_chan_def *def)
137{
138 lockdep_assert_held(&local->chanctx_mtx);
139
140 if (ieee80211_chanctx_combined_chandef(local, ctx, def))
141 return true;
142
143 if (!list_empty(&ctx->reserved_vifs) &&
144 ieee80211_chanctx_reserved_chandef(local, ctx, def))
145 return true;
146
147 return false;
148}
149
150static struct ieee80211_chanctx *
151ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
152 const struct cfg80211_chan_def *chandef,
153 enum ieee80211_chanctx_mode mode)
154{
155 struct ieee80211_chanctx *ctx;
156
157 lockdep_assert_held(&local->chanctx_mtx);
158
159 if (mode == IEEE80211_CHANCTX_EXCLUSIVE)
160 return NULL;
161
162 list_for_each_entry(ctx, &local->chanctx_list, list) {
163 if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
164 continue;
165
166 if (!ieee80211_chanctx_can_reserve_chandef(local, ctx,
167 chandef))
168 continue;
169
170 return ctx;
171 }
172
173 return NULL;
174}
175
12static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta) 176static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
13{ 177{
14 switch (sta->bandwidth) { 178 switch (sta->bandwidth) {
@@ -190,6 +354,11 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
190 if (!compat) 354 if (!compat)
191 continue; 355 continue;
192 356
357 compat = ieee80211_chanctx_reserved_chandef(local, ctx,
358 compat);
359 if (!compat)
360 continue;
361
193 ieee80211_change_chanctx(local, ctx, compat); 362 ieee80211_change_chanctx(local, ctx, compat);
194 363
195 return ctx; 364 return ctx;
@@ -217,62 +386,91 @@ static bool ieee80211_is_radar_required(struct ieee80211_local *local)
217} 386}
218 387
219static struct ieee80211_chanctx * 388static struct ieee80211_chanctx *
220ieee80211_new_chanctx(struct ieee80211_local *local, 389ieee80211_alloc_chanctx(struct ieee80211_local *local,
221 const struct cfg80211_chan_def *chandef, 390 const struct cfg80211_chan_def *chandef,
222 enum ieee80211_chanctx_mode mode) 391 enum ieee80211_chanctx_mode mode)
223{ 392{
224 struct ieee80211_chanctx *ctx; 393 struct ieee80211_chanctx *ctx;
225 u32 changed;
226 int err;
227 394
228 lockdep_assert_held(&local->chanctx_mtx); 395 lockdep_assert_held(&local->chanctx_mtx);
229 396
230 ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL); 397 ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL);
231 if (!ctx) 398 if (!ctx)
232 return ERR_PTR(-ENOMEM); 399 return NULL;
233 400
401 INIT_LIST_HEAD(&ctx->assigned_vifs);
402 INIT_LIST_HEAD(&ctx->reserved_vifs);
234 ctx->conf.def = *chandef; 403 ctx->conf.def = *chandef;
235 ctx->conf.rx_chains_static = 1; 404 ctx->conf.rx_chains_static = 1;
236 ctx->conf.rx_chains_dynamic = 1; 405 ctx->conf.rx_chains_dynamic = 1;
237 ctx->mode = mode; 406 ctx->mode = mode;
238 ctx->conf.radar_enabled = ieee80211_is_radar_required(local); 407 ctx->conf.radar_enabled = ieee80211_is_radar_required(local);
239 ieee80211_recalc_chanctx_min_def(local, ctx); 408 ieee80211_recalc_chanctx_min_def(local, ctx);
409
410 return ctx;
411}
412
413static int ieee80211_add_chanctx(struct ieee80211_local *local,
414 struct ieee80211_chanctx *ctx)
415{
416 u32 changed;
417 int err;
418
419 lockdep_assert_held(&local->mtx);
420 lockdep_assert_held(&local->chanctx_mtx);
421
240 if (!local->use_chanctx) 422 if (!local->use_chanctx)
241 local->hw.conf.radar_enabled = ctx->conf.radar_enabled; 423 local->hw.conf.radar_enabled = ctx->conf.radar_enabled;
242 424
243 /* we hold the mutex to prevent idle from changing */
244 lockdep_assert_held(&local->mtx);
245 /* turn idle off *before* setting channel -- some drivers need that */ 425 /* turn idle off *before* setting channel -- some drivers need that */
246 changed = ieee80211_idle_off(local); 426 changed = ieee80211_idle_off(local);
247 if (changed) 427 if (changed)
248 ieee80211_hw_config(local, changed); 428 ieee80211_hw_config(local, changed);
249 429
250 if (!local->use_chanctx) { 430 if (!local->use_chanctx) {
251 local->_oper_chandef = *chandef; 431 local->_oper_chandef = ctx->conf.def;
252 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 432 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
253 } else { 433 } else {
254 err = drv_add_chanctx(local, ctx); 434 err = drv_add_chanctx(local, ctx);
255 if (err) { 435 if (err) {
256 kfree(ctx);
257 ieee80211_recalc_idle(local); 436 ieee80211_recalc_idle(local);
258 return ERR_PTR(err); 437 return err;
259 } 438 }
260 } 439 }
261 440
262 /* and keep the mutex held until the new chanctx is on the list */ 441 return 0;
263 list_add_rcu(&ctx->list, &local->chanctx_list); 442}
264 443
444static struct ieee80211_chanctx *
445ieee80211_new_chanctx(struct ieee80211_local *local,
446 const struct cfg80211_chan_def *chandef,
447 enum ieee80211_chanctx_mode mode)
448{
449 struct ieee80211_chanctx *ctx;
450 int err;
451
452 lockdep_assert_held(&local->mtx);
453 lockdep_assert_held(&local->chanctx_mtx);
454
455 ctx = ieee80211_alloc_chanctx(local, chandef, mode);
456 if (!ctx)
457 return ERR_PTR(-ENOMEM);
458
459 err = ieee80211_add_chanctx(local, ctx);
460 if (err) {
461 kfree(ctx);
462 return ERR_PTR(err);
463 }
464
465 list_add_rcu(&ctx->list, &local->chanctx_list);
265 return ctx; 466 return ctx;
266} 467}
267 468
268static void ieee80211_free_chanctx(struct ieee80211_local *local, 469static void ieee80211_del_chanctx(struct ieee80211_local *local,
269 struct ieee80211_chanctx *ctx) 470 struct ieee80211_chanctx *ctx)
270{ 471{
271 bool check_single_channel = false;
272 lockdep_assert_held(&local->chanctx_mtx); 472 lockdep_assert_held(&local->chanctx_mtx);
273 473
274 WARN_ON_ONCE(ctx->refcount != 0);
275
276 if (!local->use_chanctx) { 474 if (!local->use_chanctx) {
277 struct cfg80211_chan_def *chandef = &local->_oper_chandef; 475 struct cfg80211_chan_def *chandef = &local->_oper_chandef;
278 chandef->width = NL80211_CHAN_WIDTH_20_NOHT; 476 chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
@@ -282,8 +480,9 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
282 /* NOTE: Disabling radar is only valid here for 480 /* NOTE: Disabling radar is only valid here for
283 * single channel context. To be sure, check it ... 481 * single channel context. To be sure, check it ...
284 */ 482 */
285 if (local->hw.conf.radar_enabled) 483 WARN_ON(local->hw.conf.radar_enabled &&
286 check_single_channel = true; 484 !list_empty(&local->chanctx_list));
485
287 local->hw.conf.radar_enabled = false; 486 local->hw.conf.radar_enabled = false;
288 487
289 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 488 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
@@ -291,39 +490,19 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
291 drv_remove_chanctx(local, ctx); 490 drv_remove_chanctx(local, ctx);
292 } 491 }
293 492
294 list_del_rcu(&ctx->list);
295 kfree_rcu(ctx, rcu_head);
296
297 /* throw a warning if this wasn't the only channel context. */
298 WARN_ON(check_single_channel && !list_empty(&local->chanctx_list));
299
300 ieee80211_recalc_idle(local); 493 ieee80211_recalc_idle(local);
301} 494}
302 495
303static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata, 496static void ieee80211_free_chanctx(struct ieee80211_local *local,
304 struct ieee80211_chanctx *ctx) 497 struct ieee80211_chanctx *ctx)
305{ 498{
306 struct ieee80211_local *local = sdata->local;
307 int ret;
308
309 lockdep_assert_held(&local->chanctx_mtx); 499 lockdep_assert_held(&local->chanctx_mtx);
310 500
311 ret = drv_assign_vif_chanctx(local, sdata, ctx); 501 WARN_ON_ONCE(ieee80211_chanctx_refcount(local, ctx) != 0);
312 if (ret)
313 return ret;
314 502
315 rcu_assign_pointer(sdata->vif.chanctx_conf, &ctx->conf); 503 list_del_rcu(&ctx->list);
316 ctx->refcount++; 504 ieee80211_del_chanctx(local, ctx);
317 505 kfree_rcu(ctx, rcu_head);
318 ieee80211_recalc_txpower(sdata);
319 ieee80211_recalc_chanctx_min_def(local, ctx);
320 sdata->vif.bss_conf.idle = false;
321
322 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
323 sdata->vif.type != NL80211_IFTYPE_MONITOR)
324 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
325
326 return 0;
327} 506}
328 507
329static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, 508static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
@@ -384,30 +563,58 @@ static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
384 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR); 563 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
385} 564}
386 565
387static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata, 566static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
388 struct ieee80211_chanctx *ctx) 567 struct ieee80211_chanctx *new_ctx)
389{ 568{
390 struct ieee80211_local *local = sdata->local; 569 struct ieee80211_local *local = sdata->local;
570 struct ieee80211_chanctx_conf *conf;
571 struct ieee80211_chanctx *curr_ctx = NULL;
572 int ret = 0;
391 573
392 lockdep_assert_held(&local->chanctx_mtx); 574 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
575 lockdep_is_held(&local->chanctx_mtx));
393 576
394 ctx->refcount--; 577 if (conf) {
395 rcu_assign_pointer(sdata->vif.chanctx_conf, NULL); 578 curr_ctx = container_of(conf, struct ieee80211_chanctx, conf);
396 579
397 sdata->vif.bss_conf.idle = true; 580 drv_unassign_vif_chanctx(local, sdata, curr_ctx);
581 conf = NULL;
582 list_del(&sdata->assigned_chanctx_list);
583 }
398 584
399 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && 585 if (new_ctx) {
400 sdata->vif.type != NL80211_IFTYPE_MONITOR) 586 ret = drv_assign_vif_chanctx(local, sdata, new_ctx);
401 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE); 587 if (ret)
588 goto out;
402 589
403 drv_unassign_vif_chanctx(local, sdata, ctx); 590 conf = &new_ctx->conf;
591 list_add(&sdata->assigned_chanctx_list,
592 &new_ctx->assigned_vifs);
593 }
594
595out:
596 rcu_assign_pointer(sdata->vif.chanctx_conf, conf);
597
598 sdata->vif.bss_conf.idle = !conf;
599
600 if (curr_ctx && ieee80211_chanctx_num_assigned(local, curr_ctx) > 0) {
601 ieee80211_recalc_chanctx_chantype(local, curr_ctx);
602 ieee80211_recalc_smps_chanctx(local, curr_ctx);
603 ieee80211_recalc_radar_chanctx(local, curr_ctx);
604 ieee80211_recalc_chanctx_min_def(local, curr_ctx);
605 }
404 606
405 if (ctx->refcount > 0) { 607 if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) {
406 ieee80211_recalc_chanctx_chantype(sdata->local, ctx); 608 ieee80211_recalc_txpower(sdata);
407 ieee80211_recalc_smps_chanctx(local, ctx); 609 ieee80211_recalc_chanctx_min_def(local, new_ctx);
408 ieee80211_recalc_radar_chanctx(local, ctx);
409 ieee80211_recalc_chanctx_min_def(local, ctx);
410 } 610 }
611
612 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
613 sdata->vif.type != NL80211_IFTYPE_MONITOR)
614 ieee80211_bss_info_change_notify(sdata,
615 BSS_CHANGED_IDLE);
616
617 return ret;
411} 618}
412 619
413static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata) 620static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
@@ -425,8 +632,11 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
425 632
426 ctx = container_of(conf, struct ieee80211_chanctx, conf); 633 ctx = container_of(conf, struct ieee80211_chanctx, conf);
427 634
428 ieee80211_unassign_vif_chanctx(sdata, ctx); 635 if (sdata->reserved_chanctx)
429 if (ctx->refcount == 0) 636 ieee80211_vif_unreserve_chanctx(sdata);
637
638 ieee80211_assign_vif_chanctx(sdata, NULL);
639 if (ieee80211_chanctx_refcount(local, ctx) == 0)
430 ieee80211_free_chanctx(local, ctx); 640 ieee80211_free_chanctx(local, ctx);
431} 641}
432 642
@@ -526,6 +736,7 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
526{ 736{
527 struct ieee80211_local *local = sdata->local; 737 struct ieee80211_local *local = sdata->local;
528 struct ieee80211_chanctx *ctx; 738 struct ieee80211_chanctx *ctx;
739 u8 radar_detect_width = 0;
529 int ret; 740 int ret;
530 741
531 lockdep_assert_held(&local->mtx); 742 lockdep_assert_held(&local->mtx);
@@ -533,6 +744,22 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
533 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev)); 744 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
534 745
535 mutex_lock(&local->chanctx_mtx); 746 mutex_lock(&local->chanctx_mtx);
747
748 ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
749 chandef,
750 sdata->wdev.iftype);
751 if (ret < 0)
752 goto out;
753 if (ret > 0)
754 radar_detect_width = BIT(chandef->width);
755
756 sdata->radar_required = ret;
757
758 ret = ieee80211_check_combinations(sdata, chandef, mode,
759 radar_detect_width);
760 if (ret < 0)
761 goto out;
762
536 __ieee80211_vif_release_channel(sdata); 763 __ieee80211_vif_release_channel(sdata);
537 764
538 ctx = ieee80211_find_chanctx(local, chandef, mode); 765 ctx = ieee80211_find_chanctx(local, chandef, mode);
@@ -548,7 +775,7 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
548 ret = ieee80211_assign_vif_chanctx(sdata, ctx); 775 ret = ieee80211_assign_vif_chanctx(sdata, ctx);
549 if (ret) { 776 if (ret) {
550 /* if assign fails refcount stays the same */ 777 /* if assign fails refcount stays the same */
551 if (ctx->refcount == 0) 778 if (ieee80211_chanctx_refcount(local, ctx) == 0)
552 ieee80211_free_chanctx(local, ctx); 779 ieee80211_free_chanctx(local, ctx);
553 goto out; 780 goto out;
554 } 781 }
@@ -560,15 +787,47 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
560 return ret; 787 return ret;
561} 788}
562 789
790static int __ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
791 struct ieee80211_chanctx *ctx,
792 u32 *changed)
793{
794 struct ieee80211_local *local = sdata->local;
795 const struct cfg80211_chan_def *chandef = &sdata->csa_chandef;
796 u32 chanctx_changed = 0;
797
798 if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
799 IEEE80211_CHAN_DISABLED))
800 return -EINVAL;
801
802 if (ieee80211_chanctx_refcount(local, ctx) != 1)
803 return -EINVAL;
804
805 if (sdata->vif.bss_conf.chandef.width != chandef->width) {
806 chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH;
807 *changed |= BSS_CHANGED_BANDWIDTH;
808 }
809
810 sdata->vif.bss_conf.chandef = *chandef;
811 ctx->conf.def = *chandef;
812
813 chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
814 drv_change_chanctx(local, ctx, chanctx_changed);
815
816 ieee80211_recalc_chanctx_chantype(local, ctx);
817 ieee80211_recalc_smps_chanctx(local, ctx);
818 ieee80211_recalc_radar_chanctx(local, ctx);
819 ieee80211_recalc_chanctx_min_def(local, ctx);
820
821 return 0;
822}
823
563int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata, 824int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
564 u32 *changed) 825 u32 *changed)
565{ 826{
566 struct ieee80211_local *local = sdata->local; 827 struct ieee80211_local *local = sdata->local;
567 struct ieee80211_chanctx_conf *conf; 828 struct ieee80211_chanctx_conf *conf;
568 struct ieee80211_chanctx *ctx; 829 struct ieee80211_chanctx *ctx;
569 const struct cfg80211_chan_def *chandef = &sdata->csa_chandef;
570 int ret; 830 int ret;
571 u32 chanctx_changed = 0;
572 831
573 lockdep_assert_held(&local->mtx); 832 lockdep_assert_held(&local->mtx);
574 833
@@ -576,11 +835,94 @@ int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
576 if (WARN_ON(!sdata->vif.csa_active)) 835 if (WARN_ON(!sdata->vif.csa_active))
577 return -EINVAL; 836 return -EINVAL;
578 837
579 if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, 838 mutex_lock(&local->chanctx_mtx);
580 IEEE80211_CHAN_DISABLED)) 839 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
840 lockdep_is_held(&local->chanctx_mtx));
841 if (!conf) {
842 ret = -EINVAL;
843 goto out;
844 }
845
846 ctx = container_of(conf, struct ieee80211_chanctx, conf);
847
848 ret = __ieee80211_vif_change_channel(sdata, ctx, changed);
849 out:
850 mutex_unlock(&local->chanctx_mtx);
851 return ret;
852}
853
854static void
855__ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
856 bool clear)
857{
858 struct ieee80211_local *local __maybe_unused = sdata->local;
859 struct ieee80211_sub_if_data *vlan;
860 struct ieee80211_chanctx_conf *conf;
861
862 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
863 return;
864
865 lockdep_assert_held(&local->mtx);
866
867 /* Check that conf exists, even when clearing this function
868 * must be called with the AP's channel context still there
869 * as it would otherwise cause VLANs to have an invalid
870 * channel context pointer for a while, possibly pointing
871 * to a channel context that has already been freed.
872 */
873 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
874 lockdep_is_held(&local->chanctx_mtx));
875 WARN_ON(!conf);
876
877 if (clear)
878 conf = NULL;
879
880 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
881 rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
882}
883
884void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
885 bool clear)
886{
887 struct ieee80211_local *local = sdata->local;
888
889 mutex_lock(&local->chanctx_mtx);
890
891 __ieee80211_vif_copy_chanctx_to_vlans(sdata, clear);
892
893 mutex_unlock(&local->chanctx_mtx);
894}
895
896int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata)
897{
898 struct ieee80211_chanctx *ctx = sdata->reserved_chanctx;
899
900 lockdep_assert_held(&sdata->local->chanctx_mtx);
901
902 if (WARN_ON(!ctx))
581 return -EINVAL; 903 return -EINVAL;
582 904
905 list_del(&sdata->reserved_chanctx_list);
906 sdata->reserved_chanctx = NULL;
907
908 if (ieee80211_chanctx_refcount(sdata->local, ctx) == 0)
909 ieee80211_free_chanctx(sdata->local, ctx);
910
911 return 0;
912}
913
914int ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
915 const struct cfg80211_chan_def *chandef,
916 enum ieee80211_chanctx_mode mode,
917 bool radar_required)
918{
919 struct ieee80211_local *local = sdata->local;
920 struct ieee80211_chanctx_conf *conf;
921 struct ieee80211_chanctx *new_ctx, *curr_ctx;
922 int ret = 0;
923
583 mutex_lock(&local->chanctx_mtx); 924 mutex_lock(&local->chanctx_mtx);
925
584 conf = rcu_dereference_protected(sdata->vif.chanctx_conf, 926 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
585 lockdep_is_held(&local->chanctx_mtx)); 927 lockdep_is_held(&local->chanctx_mtx));
586 if (!conf) { 928 if (!conf) {
@@ -588,30 +930,108 @@ int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
588 goto out; 930 goto out;
589 } 931 }
590 932
591 ctx = container_of(conf, struct ieee80211_chanctx, conf); 933 curr_ctx = container_of(conf, struct ieee80211_chanctx, conf);
592 if (ctx->refcount != 1) { 934
935 new_ctx = ieee80211_find_reservation_chanctx(local, chandef, mode);
936 if (!new_ctx) {
937 if (ieee80211_chanctx_refcount(local, curr_ctx) == 1 &&
938 (local->hw.flags & IEEE80211_HW_CHANGE_RUNNING_CHANCTX)) {
939 /* if we're the only users of the chanctx and
940 * the driver supports changing a running
941 * context, reserve our current context
942 */
943 new_ctx = curr_ctx;
944 } else if (ieee80211_can_create_new_chanctx(local)) {
945 /* create a new context and reserve it */
946 new_ctx = ieee80211_new_chanctx(local, chandef, mode);
947 if (IS_ERR(new_ctx)) {
948 ret = PTR_ERR(new_ctx);
949 goto out;
950 }
951 } else {
952 ret = -EBUSY;
953 goto out;
954 }
955 }
956
957 list_add(&sdata->reserved_chanctx_list, &new_ctx->reserved_vifs);
958 sdata->reserved_chanctx = new_ctx;
959 sdata->reserved_chandef = *chandef;
960 sdata->reserved_radar_required = radar_required;
961out:
962 mutex_unlock(&local->chanctx_mtx);
963 return ret;
964}
965
966int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata,
967 u32 *changed)
968{
969 struct ieee80211_local *local = sdata->local;
970 struct ieee80211_chanctx *ctx;
971 struct ieee80211_chanctx *old_ctx;
972 struct ieee80211_chanctx_conf *conf;
973 int ret;
974 u32 tmp_changed = *changed;
975
976 /* TODO: need to recheck if the chandef is usable etc.? */
977
978 lockdep_assert_held(&local->mtx);
979
980 mutex_lock(&local->chanctx_mtx);
981
982 ctx = sdata->reserved_chanctx;
983 if (WARN_ON(!ctx)) {
593 ret = -EINVAL; 984 ret = -EINVAL;
594 goto out; 985 goto out;
595 } 986 }
596 987
597 if (sdata->vif.bss_conf.chandef.width != chandef->width) { 988 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
598 chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH; 989 lockdep_is_held(&local->chanctx_mtx));
599 *changed |= BSS_CHANGED_BANDWIDTH; 990 if (!conf) {
991 ret = -EINVAL;
992 goto out;
600 } 993 }
601 994
602 sdata->vif.bss_conf.chandef = *chandef; 995 old_ctx = container_of(conf, struct ieee80211_chanctx, conf);
603 ctx->conf.def = *chandef;
604 996
605 chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL; 997 if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width)
606 drv_change_chanctx(local, ctx, chanctx_changed); 998 tmp_changed |= BSS_CHANGED_BANDWIDTH;
999
1000 sdata->vif.bss_conf.chandef = sdata->reserved_chandef;
1001
1002 /* unref our reservation */
1003 sdata->reserved_chanctx = NULL;
1004 sdata->radar_required = sdata->reserved_radar_required;
1005 list_del(&sdata->reserved_chanctx_list);
1006
1007 if (old_ctx == ctx) {
1008 /* This is our own context, just change it */
1009 ret = __ieee80211_vif_change_channel(sdata, old_ctx,
1010 &tmp_changed);
1011 if (ret)
1012 goto out;
1013 } else {
1014 ret = ieee80211_assign_vif_chanctx(sdata, ctx);
1015 if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
1016 ieee80211_free_chanctx(local, old_ctx);
1017 if (ret) {
1018 /* if assign fails refcount stays the same */
1019 if (ieee80211_chanctx_refcount(local, ctx) == 0)
1020 ieee80211_free_chanctx(local, ctx);
1021 goto out;
1022 }
1023
1024 if (sdata->vif.type == NL80211_IFTYPE_AP)
1025 __ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
1026 }
1027
1028 *changed = tmp_changed;
607 1029
608 ieee80211_recalc_chanctx_chantype(local, ctx); 1030 ieee80211_recalc_chanctx_chantype(local, ctx);
609 ieee80211_recalc_smps_chanctx(local, ctx); 1031 ieee80211_recalc_smps_chanctx(local, ctx);
610 ieee80211_recalc_radar_chanctx(local, ctx); 1032 ieee80211_recalc_radar_chanctx(local, ctx);
611 ieee80211_recalc_chanctx_min_def(local, ctx); 1033 ieee80211_recalc_chanctx_min_def(local, ctx);
612 1034out:
613 ret = 0;
614 out:
615 mutex_unlock(&local->chanctx_mtx); 1035 mutex_unlock(&local->chanctx_mtx);
616 return ret; 1036 return ret;
617} 1037}
@@ -695,40 +1115,6 @@ void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
695 mutex_unlock(&local->chanctx_mtx); 1115 mutex_unlock(&local->chanctx_mtx);
696} 1116}
697 1117
698void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
699 bool clear)
700{
701 struct ieee80211_local *local = sdata->local;
702 struct ieee80211_sub_if_data *vlan;
703 struct ieee80211_chanctx_conf *conf;
704
705 ASSERT_RTNL();
706
707 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
708 return;
709
710 mutex_lock(&local->chanctx_mtx);
711
712 /*
713 * Check that conf exists, even when clearing this function
714 * must be called with the AP's channel context still there
715 * as it would otherwise cause VLANs to have an invalid
716 * channel context pointer for a while, possibly pointing
717 * to a channel context that has already been freed.
718 */
719 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
720 lockdep_is_held(&local->chanctx_mtx));
721 WARN_ON(!conf);
722
723 if (clear)
724 conf = NULL;
725
726 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
727 rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
728
729 mutex_unlock(&local->chanctx_mtx);
730}
731
732void ieee80211_iter_chan_contexts_atomic( 1118void ieee80211_iter_chan_contexts_atomic(
733 struct ieee80211_hw *hw, 1119 struct ieee80211_hw *hw,
734 void (*iter)(struct ieee80211_hw *hw, 1120 void (*iter)(struct ieee80211_hw *hw,
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index fa16e54980a1..0e963bc1ceac 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -128,7 +128,7 @@ static ssize_t sta_tx_latency_stat_write(struct file *file,
128 if (!strcmp(buf, TX_LATENCY_DISABLED)) { 128 if (!strcmp(buf, TX_LATENCY_DISABLED)) {
129 if (!tx_latency) 129 if (!tx_latency)
130 goto unlock; 130 goto unlock;
131 rcu_assign_pointer(local->tx_latency, NULL); 131 RCU_INIT_POINTER(local->tx_latency, NULL);
132 synchronize_rcu(); 132 synchronize_rcu();
133 kfree(tx_latency); 133 kfree(tx_latency);
134 goto unlock; 134 goto unlock;
diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h
index 214ed4ecd739..60c35afee29d 100644
--- a/net/mac80211/debugfs.h
+++ b/net/mac80211/debugfs.h
@@ -1,6 +1,8 @@
1#ifndef __MAC80211_DEBUGFS_H 1#ifndef __MAC80211_DEBUGFS_H
2#define __MAC80211_DEBUGFS_H 2#define __MAC80211_DEBUGFS_H
3 3
4#include "ieee80211_i.h"
5
4#ifdef CONFIG_MAC80211_DEBUGFS 6#ifdef CONFIG_MAC80211_DEBUGFS
5void debugfs_hw_add(struct ieee80211_local *local); 7void debugfs_hw_add(struct ieee80211_local *local);
6int __printf(4, 5) mac80211_format_buffer(char __user *userbuf, size_t count, 8int __printf(4, 5) mac80211_format_buffer(char __user *userbuf, size_t count,
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 40a648938985..e205ebabfa50 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -34,8 +34,7 @@ static ssize_t ieee80211_if_read(
34 ssize_t ret = -EINVAL; 34 ssize_t ret = -EINVAL;
35 35
36 read_lock(&dev_base_lock); 36 read_lock(&dev_base_lock);
37 if (sdata->dev->reg_state == NETREG_REGISTERED) 37 ret = (*format)(sdata, buf, sizeof(buf));
38 ret = (*format)(sdata, buf, sizeof(buf));
39 read_unlock(&dev_base_lock); 38 read_unlock(&dev_base_lock);
40 39
41 if (ret >= 0) 40 if (ret >= 0)
@@ -62,8 +61,7 @@ static ssize_t ieee80211_if_write(
62 61
63 ret = -ENODEV; 62 ret = -ENODEV;
64 rtnl_lock(); 63 rtnl_lock();
65 if (sdata->dev->reg_state == NETREG_REGISTERED) 64 ret = (*write)(sdata, buf, count);
66 ret = (*write)(sdata, buf, count);
67 rtnl_unlock(); 65 rtnl_unlock();
68 66
69 return ret; 67 return ret;
diff --git a/net/mac80211/debugfs_netdev.h b/net/mac80211/debugfs_netdev.h
index 79025e79f4d6..9f5501a9a795 100644
--- a/net/mac80211/debugfs_netdev.h
+++ b/net/mac80211/debugfs_netdev.h
@@ -3,6 +3,8 @@
3#ifndef __IEEE80211_DEBUGFS_NETDEV_H 3#ifndef __IEEE80211_DEBUGFS_NETDEV_H
4#define __IEEE80211_DEBUGFS_NETDEV_H 4#define __IEEE80211_DEBUGFS_NETDEV_H
5 5
6#include "ieee80211_i.h"
7
6#ifdef CONFIG_MAC80211_DEBUGFS 8#ifdef CONFIG_MAC80211_DEBUGFS
7void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata); 9void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata);
8void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata); 10void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index fc689f5d971e..bd782dcffcc7 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -5,11 +5,11 @@
5#include "ieee80211_i.h" 5#include "ieee80211_i.h"
6#include "trace.h" 6#include "trace.h"
7 7
8static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata) 8static inline bool check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
9{ 9{
10 WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER), 10 return !WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
11 "%s: Failed check-sdata-in-driver check, flags: 0x%x\n", 11 "%s: Failed check-sdata-in-driver check, flags: 0x%x\n",
12 sdata->dev ? sdata->dev->name : sdata->name, sdata->flags); 12 sdata->dev ? sdata->dev->name : sdata->name, sdata->flags);
13} 13}
14 14
15static inline struct ieee80211_sub_if_data * 15static inline struct ieee80211_sub_if_data *
@@ -168,7 +168,8 @@ static inline int drv_change_interface(struct ieee80211_local *local,
168 168
169 might_sleep(); 169 might_sleep();
170 170
171 check_sdata_in_driver(sdata); 171 if (!check_sdata_in_driver(sdata))
172 return -EIO;
172 173
173 trace_drv_change_interface(local, sdata, type, p2p); 174 trace_drv_change_interface(local, sdata, type, p2p);
174 ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p); 175 ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p);
@@ -181,7 +182,8 @@ static inline void drv_remove_interface(struct ieee80211_local *local,
181{ 182{
182 might_sleep(); 183 might_sleep();
183 184
184 check_sdata_in_driver(sdata); 185 if (!check_sdata_in_driver(sdata))
186 return;
185 187
186 trace_drv_remove_interface(local, sdata); 188 trace_drv_remove_interface(local, sdata);
187 local->ops->remove_interface(&local->hw, &sdata->vif); 189 local->ops->remove_interface(&local->hw, &sdata->vif);
@@ -219,7 +221,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
219 sdata->vif.type == NL80211_IFTYPE_MONITOR)) 221 sdata->vif.type == NL80211_IFTYPE_MONITOR))
220 return; 222 return;
221 223
222 check_sdata_in_driver(sdata); 224 if (!check_sdata_in_driver(sdata))
225 return;
223 226
224 trace_drv_bss_info_changed(local, sdata, info, changed); 227 trace_drv_bss_info_changed(local, sdata, info, changed);
225 if (local->ops->bss_info_changed) 228 if (local->ops->bss_info_changed)
@@ -278,7 +281,8 @@ static inline int drv_set_key(struct ieee80211_local *local,
278 might_sleep(); 281 might_sleep();
279 282
280 sdata = get_bss_sdata(sdata); 283 sdata = get_bss_sdata(sdata);
281 check_sdata_in_driver(sdata); 284 if (!check_sdata_in_driver(sdata))
285 return -EIO;
282 286
283 trace_drv_set_key(local, cmd, sdata, sta, key); 287 trace_drv_set_key(local, cmd, sdata, sta, key);
284 ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key); 288 ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
@@ -298,7 +302,8 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
298 ista = &sta->sta; 302 ista = &sta->sta;
299 303
300 sdata = get_bss_sdata(sdata); 304 sdata = get_bss_sdata(sdata);
301 check_sdata_in_driver(sdata); 305 if (!check_sdata_in_driver(sdata))
306 return;
302 307
303 trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); 308 trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
304 if (local->ops->update_tkip_key) 309 if (local->ops->update_tkip_key)
@@ -315,7 +320,8 @@ static inline int drv_hw_scan(struct ieee80211_local *local,
315 320
316 might_sleep(); 321 might_sleep();
317 322
318 check_sdata_in_driver(sdata); 323 if (!check_sdata_in_driver(sdata))
324 return -EIO;
319 325
320 trace_drv_hw_scan(local, sdata); 326 trace_drv_hw_scan(local, sdata);
321 ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); 327 ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
@@ -328,7 +334,8 @@ static inline void drv_cancel_hw_scan(struct ieee80211_local *local,
328{ 334{
329 might_sleep(); 335 might_sleep();
330 336
331 check_sdata_in_driver(sdata); 337 if (!check_sdata_in_driver(sdata))
338 return;
332 339
333 trace_drv_cancel_hw_scan(local, sdata); 340 trace_drv_cancel_hw_scan(local, sdata);
334 local->ops->cancel_hw_scan(&local->hw, &sdata->vif); 341 local->ops->cancel_hw_scan(&local->hw, &sdata->vif);
@@ -345,7 +352,8 @@ drv_sched_scan_start(struct ieee80211_local *local,
345 352
346 might_sleep(); 353 might_sleep();
347 354
348 check_sdata_in_driver(sdata); 355 if (!check_sdata_in_driver(sdata))
356 return -EIO;
349 357
350 trace_drv_sched_scan_start(local, sdata); 358 trace_drv_sched_scan_start(local, sdata);
351 ret = local->ops->sched_scan_start(&local->hw, &sdata->vif, 359 ret = local->ops->sched_scan_start(&local->hw, &sdata->vif,
@@ -361,7 +369,8 @@ static inline int drv_sched_scan_stop(struct ieee80211_local *local,
361 369
362 might_sleep(); 370 might_sleep();
363 371
364 check_sdata_in_driver(sdata); 372 if (!check_sdata_in_driver(sdata))
373 return -EIO;
365 374
366 trace_drv_sched_scan_stop(local, sdata); 375 trace_drv_sched_scan_stop(local, sdata);
367 ret = local->ops->sched_scan_stop(&local->hw, &sdata->vif); 376 ret = local->ops->sched_scan_stop(&local->hw, &sdata->vif);
@@ -462,7 +471,8 @@ static inline void drv_sta_notify(struct ieee80211_local *local,
462 struct ieee80211_sta *sta) 471 struct ieee80211_sta *sta)
463{ 472{
464 sdata = get_bss_sdata(sdata); 473 sdata = get_bss_sdata(sdata);
465 check_sdata_in_driver(sdata); 474 if (!check_sdata_in_driver(sdata))
475 return;
466 476
467 trace_drv_sta_notify(local, sdata, cmd, sta); 477 trace_drv_sta_notify(local, sdata, cmd, sta);
468 if (local->ops->sta_notify) 478 if (local->ops->sta_notify)
@@ -479,7 +489,8 @@ static inline int drv_sta_add(struct ieee80211_local *local,
479 might_sleep(); 489 might_sleep();
480 490
481 sdata = get_bss_sdata(sdata); 491 sdata = get_bss_sdata(sdata);
482 check_sdata_in_driver(sdata); 492 if (!check_sdata_in_driver(sdata))
493 return -EIO;
483 494
484 trace_drv_sta_add(local, sdata, sta); 495 trace_drv_sta_add(local, sdata, sta);
485 if (local->ops->sta_add) 496 if (local->ops->sta_add)
@@ -497,7 +508,8 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
497 might_sleep(); 508 might_sleep();
498 509
499 sdata = get_bss_sdata(sdata); 510 sdata = get_bss_sdata(sdata);
500 check_sdata_in_driver(sdata); 511 if (!check_sdata_in_driver(sdata))
512 return;
501 513
502 trace_drv_sta_remove(local, sdata, sta); 514 trace_drv_sta_remove(local, sdata, sta);
503 if (local->ops->sta_remove) 515 if (local->ops->sta_remove)
@@ -515,7 +527,8 @@ static inline void drv_sta_add_debugfs(struct ieee80211_local *local,
515 might_sleep(); 527 might_sleep();
516 528
517 sdata = get_bss_sdata(sdata); 529 sdata = get_bss_sdata(sdata);
518 check_sdata_in_driver(sdata); 530 if (!check_sdata_in_driver(sdata))
531 return;
519 532
520 if (local->ops->sta_add_debugfs) 533 if (local->ops->sta_add_debugfs)
521 local->ops->sta_add_debugfs(&local->hw, &sdata->vif, 534 local->ops->sta_add_debugfs(&local->hw, &sdata->vif,
@@ -545,7 +558,8 @@ static inline void drv_sta_pre_rcu_remove(struct ieee80211_local *local,
545 might_sleep(); 558 might_sleep();
546 559
547 sdata = get_bss_sdata(sdata); 560 sdata = get_bss_sdata(sdata);
548 check_sdata_in_driver(sdata); 561 if (!check_sdata_in_driver(sdata))
562 return;
549 563
550 trace_drv_sta_pre_rcu_remove(local, sdata, &sta->sta); 564 trace_drv_sta_pre_rcu_remove(local, sdata, &sta->sta);
551 if (local->ops->sta_pre_rcu_remove) 565 if (local->ops->sta_pre_rcu_remove)
@@ -566,7 +580,8 @@ int drv_sta_state(struct ieee80211_local *local,
566 might_sleep(); 580 might_sleep();
567 581
568 sdata = get_bss_sdata(sdata); 582 sdata = get_bss_sdata(sdata);
569 check_sdata_in_driver(sdata); 583 if (!check_sdata_in_driver(sdata))
584 return -EIO;
570 585
571 trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state); 586 trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
572 if (local->ops->sta_state) { 587 if (local->ops->sta_state) {
@@ -590,7 +605,8 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local,
590 struct ieee80211_sta *sta, u32 changed) 605 struct ieee80211_sta *sta, u32 changed)
591{ 606{
592 sdata = get_bss_sdata(sdata); 607 sdata = get_bss_sdata(sdata);
593 check_sdata_in_driver(sdata); 608 if (!check_sdata_in_driver(sdata))
609 return;
594 610
595 WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED && 611 WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
596 (sdata->vif.type != NL80211_IFTYPE_ADHOC && 612 (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
@@ -612,7 +628,8 @@ static inline int drv_conf_tx(struct ieee80211_local *local,
612 628
613 might_sleep(); 629 might_sleep();
614 630
615 check_sdata_in_driver(sdata); 631 if (!check_sdata_in_driver(sdata))
632 return -EIO;
616 633
617 trace_drv_conf_tx(local, sdata, ac, params); 634 trace_drv_conf_tx(local, sdata, ac, params);
618 if (local->ops->conf_tx) 635 if (local->ops->conf_tx)
@@ -629,7 +646,8 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local,
629 646
630 might_sleep(); 647 might_sleep();
631 648
632 check_sdata_in_driver(sdata); 649 if (!check_sdata_in_driver(sdata))
650 return ret;
633 651
634 trace_drv_get_tsf(local, sdata); 652 trace_drv_get_tsf(local, sdata);
635 if (local->ops->get_tsf) 653 if (local->ops->get_tsf)
@@ -644,7 +662,8 @@ static inline void drv_set_tsf(struct ieee80211_local *local,
644{ 662{
645 might_sleep(); 663 might_sleep();
646 664
647 check_sdata_in_driver(sdata); 665 if (!check_sdata_in_driver(sdata))
666 return;
648 667
649 trace_drv_set_tsf(local, sdata, tsf); 668 trace_drv_set_tsf(local, sdata, tsf);
650 if (local->ops->set_tsf) 669 if (local->ops->set_tsf)
@@ -657,7 +676,8 @@ static inline void drv_reset_tsf(struct ieee80211_local *local,
657{ 676{
658 might_sleep(); 677 might_sleep();
659 678
660 check_sdata_in_driver(sdata); 679 if (!check_sdata_in_driver(sdata))
680 return;
661 681
662 trace_drv_reset_tsf(local, sdata); 682 trace_drv_reset_tsf(local, sdata);
663 if (local->ops->reset_tsf) 683 if (local->ops->reset_tsf)
@@ -689,7 +709,8 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
689 might_sleep(); 709 might_sleep();
690 710
691 sdata = get_bss_sdata(sdata); 711 sdata = get_bss_sdata(sdata);
692 check_sdata_in_driver(sdata); 712 if (!check_sdata_in_driver(sdata))
713 return -EIO;
693 714
694 trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size); 715 trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);
695 716
@@ -726,13 +747,19 @@ static inline void drv_rfkill_poll(struct ieee80211_local *local)
726} 747}
727 748
728static inline void drv_flush(struct ieee80211_local *local, 749static inline void drv_flush(struct ieee80211_local *local,
750 struct ieee80211_sub_if_data *sdata,
729 u32 queues, bool drop) 751 u32 queues, bool drop)
730{ 752{
753 struct ieee80211_vif *vif = sdata ? &sdata->vif : NULL;
754
731 might_sleep(); 755 might_sleep();
732 756
757 if (sdata && !check_sdata_in_driver(sdata))
758 return;
759
733 trace_drv_flush(local, queues, drop); 760 trace_drv_flush(local, queues, drop);
734 if (local->ops->flush) 761 if (local->ops->flush)
735 local->ops->flush(&local->hw, queues, drop); 762 local->ops->flush(&local->hw, vif, queues, drop);
736 trace_drv_return_void(local); 763 trace_drv_return_void(local);
737} 764}
738 765
@@ -848,7 +875,8 @@ static inline int drv_set_bitrate_mask(struct ieee80211_local *local,
848 875
849 might_sleep(); 876 might_sleep();
850 877
851 check_sdata_in_driver(sdata); 878 if (!check_sdata_in_driver(sdata))
879 return -EIO;
852 880
853 trace_drv_set_bitrate_mask(local, sdata, mask); 881 trace_drv_set_bitrate_mask(local, sdata, mask);
854 if (local->ops->set_bitrate_mask) 882 if (local->ops->set_bitrate_mask)
@@ -863,7 +891,8 @@ static inline void drv_set_rekey_data(struct ieee80211_local *local,
863 struct ieee80211_sub_if_data *sdata, 891 struct ieee80211_sub_if_data *sdata,
864 struct cfg80211_gtk_rekey_data *data) 892 struct cfg80211_gtk_rekey_data *data)
865{ 893{
866 check_sdata_in_driver(sdata); 894 if (!check_sdata_in_driver(sdata))
895 return;
867 896
868 trace_drv_set_rekey_data(local, sdata, data); 897 trace_drv_set_rekey_data(local, sdata, data);
869 if (local->ops->set_rekey_data) 898 if (local->ops->set_rekey_data)
@@ -931,7 +960,8 @@ static inline void drv_mgd_prepare_tx(struct ieee80211_local *local,
931{ 960{
932 might_sleep(); 961 might_sleep();
933 962
934 check_sdata_in_driver(sdata); 963 if (!check_sdata_in_driver(sdata))
964 return;
935 WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION); 965 WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
936 966
937 trace_drv_mgd_prepare_tx(local, sdata); 967 trace_drv_mgd_prepare_tx(local, sdata);
@@ -958,6 +988,9 @@ static inline int drv_add_chanctx(struct ieee80211_local *local,
958static inline void drv_remove_chanctx(struct ieee80211_local *local, 988static inline void drv_remove_chanctx(struct ieee80211_local *local,
959 struct ieee80211_chanctx *ctx) 989 struct ieee80211_chanctx *ctx)
960{ 990{
991 if (WARN_ON(!ctx->driver_present))
992 return;
993
961 trace_drv_remove_chanctx(local, ctx); 994 trace_drv_remove_chanctx(local, ctx);
962 if (local->ops->remove_chanctx) 995 if (local->ops->remove_chanctx)
963 local->ops->remove_chanctx(&local->hw, &ctx->conf); 996 local->ops->remove_chanctx(&local->hw, &ctx->conf);
@@ -983,7 +1016,8 @@ static inline int drv_assign_vif_chanctx(struct ieee80211_local *local,
983{ 1016{
984 int ret = 0; 1017 int ret = 0;
985 1018
986 check_sdata_in_driver(sdata); 1019 if (!check_sdata_in_driver(sdata))
1020 return -EIO;
987 1021
988 trace_drv_assign_vif_chanctx(local, sdata, ctx); 1022 trace_drv_assign_vif_chanctx(local, sdata, ctx);
989 if (local->ops->assign_vif_chanctx) { 1023 if (local->ops->assign_vif_chanctx) {
@@ -1001,7 +1035,8 @@ static inline void drv_unassign_vif_chanctx(struct ieee80211_local *local,
1001 struct ieee80211_sub_if_data *sdata, 1035 struct ieee80211_sub_if_data *sdata,
1002 struct ieee80211_chanctx *ctx) 1036 struct ieee80211_chanctx *ctx)
1003{ 1037{
1004 check_sdata_in_driver(sdata); 1038 if (!check_sdata_in_driver(sdata))
1039 return;
1005 1040
1006 trace_drv_unassign_vif_chanctx(local, sdata, ctx); 1041 trace_drv_unassign_vif_chanctx(local, sdata, ctx);
1007 if (local->ops->unassign_vif_chanctx) { 1042 if (local->ops->unassign_vif_chanctx) {
@@ -1013,12 +1048,66 @@ static inline void drv_unassign_vif_chanctx(struct ieee80211_local *local,
1013 trace_drv_return_void(local); 1048 trace_drv_return_void(local);
1014} 1049}
1015 1050
1051static inline int
1052drv_switch_vif_chanctx(struct ieee80211_local *local,
1053 struct ieee80211_vif_chanctx_switch *vifs,
1054 int n_vifs,
1055 enum ieee80211_chanctx_switch_mode mode)
1056{
1057 int ret = 0;
1058 int i;
1059
1060 if (!local->ops->switch_vif_chanctx)
1061 return -EOPNOTSUPP;
1062
1063 for (i = 0; i < n_vifs; i++) {
1064 struct ieee80211_chanctx *new_ctx =
1065 container_of(vifs[i].new_ctx,
1066 struct ieee80211_chanctx,
1067 conf);
1068 struct ieee80211_chanctx *old_ctx =
1069 container_of(vifs[i].old_ctx,
1070 struct ieee80211_chanctx,
1071 conf);
1072
1073 WARN_ON_ONCE(!old_ctx->driver_present);
1074 WARN_ON_ONCE((mode == CHANCTX_SWMODE_SWAP_CONTEXTS &&
1075 new_ctx->driver_present) ||
1076 (mode == CHANCTX_SWMODE_REASSIGN_VIF &&
1077 !new_ctx->driver_present));
1078 }
1079
1080 trace_drv_switch_vif_chanctx(local, vifs, n_vifs, mode);
1081 ret = local->ops->switch_vif_chanctx(&local->hw,
1082 vifs, n_vifs, mode);
1083 trace_drv_return_int(local, ret);
1084
1085 if (!ret && mode == CHANCTX_SWMODE_SWAP_CONTEXTS) {
1086 for (i = 0; i < n_vifs; i++) {
1087 struct ieee80211_chanctx *new_ctx =
1088 container_of(vifs[i].new_ctx,
1089 struct ieee80211_chanctx,
1090 conf);
1091 struct ieee80211_chanctx *old_ctx =
1092 container_of(vifs[i].old_ctx,
1093 struct ieee80211_chanctx,
1094 conf);
1095
1096 new_ctx->driver_present = true;
1097 old_ctx->driver_present = false;
1098 }
1099 }
1100
1101 return ret;
1102}
1103
1016static inline int drv_start_ap(struct ieee80211_local *local, 1104static inline int drv_start_ap(struct ieee80211_local *local,
1017 struct ieee80211_sub_if_data *sdata) 1105 struct ieee80211_sub_if_data *sdata)
1018{ 1106{
1019 int ret = 0; 1107 int ret = 0;
1020 1108
1021 check_sdata_in_driver(sdata); 1109 if (!check_sdata_in_driver(sdata))
1110 return -EIO;
1022 1111
1023 trace_drv_start_ap(local, sdata, &sdata->vif.bss_conf); 1112 trace_drv_start_ap(local, sdata, &sdata->vif.bss_conf);
1024 if (local->ops->start_ap) 1113 if (local->ops->start_ap)
@@ -1030,7 +1119,8 @@ static inline int drv_start_ap(struct ieee80211_local *local,
1030static inline void drv_stop_ap(struct ieee80211_local *local, 1119static inline void drv_stop_ap(struct ieee80211_local *local,
1031 struct ieee80211_sub_if_data *sdata) 1120 struct ieee80211_sub_if_data *sdata)
1032{ 1121{
1033 check_sdata_in_driver(sdata); 1122 if (!check_sdata_in_driver(sdata))
1123 return;
1034 1124
1035 trace_drv_stop_ap(local, sdata); 1125 trace_drv_stop_ap(local, sdata);
1036 if (local->ops->stop_ap) 1126 if (local->ops->stop_ap)
@@ -1053,7 +1143,8 @@ drv_set_default_unicast_key(struct ieee80211_local *local,
1053 struct ieee80211_sub_if_data *sdata, 1143 struct ieee80211_sub_if_data *sdata,
1054 int key_idx) 1144 int key_idx)
1055{ 1145{
1056 check_sdata_in_driver(sdata); 1146 if (!check_sdata_in_driver(sdata))
1147 return;
1057 1148
1058 WARN_ON_ONCE(key_idx < -1 || key_idx > 3); 1149 WARN_ON_ONCE(key_idx < -1 || key_idx > 3);
1059 1150
@@ -1095,7 +1186,8 @@ static inline int drv_join_ibss(struct ieee80211_local *local,
1095 int ret = 0; 1186 int ret = 0;
1096 1187
1097 might_sleep(); 1188 might_sleep();
1098 check_sdata_in_driver(sdata); 1189 if (!check_sdata_in_driver(sdata))
1190 return -EIO;
1099 1191
1100 trace_drv_join_ibss(local, sdata, &sdata->vif.bss_conf); 1192 trace_drv_join_ibss(local, sdata, &sdata->vif.bss_conf);
1101 if (local->ops->join_ibss) 1193 if (local->ops->join_ibss)
@@ -1108,7 +1200,8 @@ static inline void drv_leave_ibss(struct ieee80211_local *local,
1108 struct ieee80211_sub_if_data *sdata) 1200 struct ieee80211_sub_if_data *sdata)
1109{ 1201{
1110 might_sleep(); 1202 might_sleep();
1111 check_sdata_in_driver(sdata); 1203 if (!check_sdata_in_driver(sdata))
1204 return;
1112 1205
1113 trace_drv_leave_ibss(local, sdata); 1206 trace_drv_leave_ibss(local, sdata);
1114 if (local->ops->leave_ibss) 1207 if (local->ops->leave_ibss)
@@ -1116,4 +1209,17 @@ static inline void drv_leave_ibss(struct ieee80211_local *local,
1116 trace_drv_return_void(local); 1209 trace_drv_return_void(local);
1117} 1210}
1118 1211
1212static inline u32 drv_get_expected_throughput(struct ieee80211_local *local,
1213 struct ieee80211_sta *sta)
1214{
1215 u32 ret = 0;
1216
1217 trace_drv_get_expected_throughput(sta);
1218 if (local->ops->get_expected_throughput)
1219 ret = local->ops->get_expected_throughput(sta);
1220 trace_drv_return_u32(local, ret);
1221
1222 return ret;
1223}
1224
1119#endif /* __MAC80211_DRIVER_OPS */ 1225#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index c150b68436d7..15702ff64a4c 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -31,6 +31,18 @@ static void __check_htcap_disable(struct ieee80211_ht_cap *ht_capa,
31 } 31 }
32} 32}
33 33
34static void __check_htcap_enable(struct ieee80211_ht_cap *ht_capa,
35 struct ieee80211_ht_cap *ht_capa_mask,
36 struct ieee80211_sta_ht_cap *ht_cap,
37 u16 flag)
38{
39 __le16 le_flag = cpu_to_le16(flag);
40
41 if ((ht_capa_mask->cap_info & le_flag) &&
42 (ht_capa->cap_info & le_flag))
43 ht_cap->cap |= flag;
44}
45
34void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, 46void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
35 struct ieee80211_sta_ht_cap *ht_cap) 47 struct ieee80211_sta_ht_cap *ht_cap)
36{ 48{
@@ -59,7 +71,7 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
59 smask = (u8 *)(&ht_capa_mask->mcs.rx_mask); 71 smask = (u8 *)(&ht_capa_mask->mcs.rx_mask);
60 72
61 /* NOTE: If you add more over-rides here, update register_hw 73 /* NOTE: If you add more over-rides here, update register_hw
62 * ht_capa_mod_msk logic in main.c as well. 74 * ht_capa_mod_mask logic in main.c as well.
63 * And, if this method can ever change ht_cap.ht_supported, fix 75 * And, if this method can ever change ht_cap.ht_supported, fix
64 * the check in ieee80211_add_ht_ie. 76 * the check in ieee80211_add_ht_ie.
65 */ 77 */
@@ -86,6 +98,14 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
86 __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, 98 __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
87 IEEE80211_HT_CAP_MAX_AMSDU); 99 IEEE80211_HT_CAP_MAX_AMSDU);
88 100
101 /* Allow user to disable LDPC */
102 __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
103 IEEE80211_HT_CAP_LDPC_CODING);
104
105 /* Allow user to enable 40 MHz intolerant bit. */
106 __check_htcap_enable(ht_capa, ht_capa_mask, ht_cap,
107 IEEE80211_HT_CAP_40MHZ_INTOLERANT);
108
89 /* Allow user to decrease AMPDU factor */ 109 /* Allow user to decrease AMPDU factor */
90 if (ht_capa_mask->ampdu_params_info & 110 if (ht_capa_mask->ampdu_params_info &
91 IEEE80211_HT_AMPDU_PARM_FACTOR) { 111 IEEE80211_HT_AMPDU_PARM_FACTOR) {
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 06d28787945b..18ee0a256b1e 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -143,7 +143,7 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
143 *pos++ = csa_settings->block_tx ? 1 : 0; 143 *pos++ = csa_settings->block_tx ? 1 : 0;
144 *pos++ = ieee80211_frequency_to_channel( 144 *pos++ = ieee80211_frequency_to_channel(
145 csa_settings->chandef.chan->center_freq); 145 csa_settings->chandef.chan->center_freq);
146 sdata->csa_counter_offset_beacon = (pos - presp->head); 146 sdata->csa_counter_offset_beacon[0] = (pos - presp->head);
147 *pos++ = csa_settings->count; 147 *pos++ = csa_settings->count;
148 } 148 }
149 149
@@ -228,7 +228,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
228 struct beacon_data *presp; 228 struct beacon_data *presp;
229 enum nl80211_bss_scan_width scan_width; 229 enum nl80211_bss_scan_width scan_width;
230 bool have_higher_than_11mbit; 230 bool have_higher_than_11mbit;
231 bool radar_required = false; 231 bool radar_required;
232 int err; 232 int err;
233 233
234 sdata_assert_lock(sdata); 234 sdata_assert_lock(sdata);
@@ -253,7 +253,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
253 253
254 presp = rcu_dereference_protected(ifibss->presp, 254 presp = rcu_dereference_protected(ifibss->presp,
255 lockdep_is_held(&sdata->wdev.mtx)); 255 lockdep_is_held(&sdata->wdev.mtx));
256 rcu_assign_pointer(ifibss->presp, NULL); 256 RCU_INIT_POINTER(ifibss->presp, NULL);
257 if (presp) 257 if (presp)
258 kfree_rcu(presp, rcu_head); 258 kfree_rcu(presp, rcu_head);
259 259
@@ -262,7 +262,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
262 /* make a copy of the chandef, it could be modified below. */ 262 /* make a copy of the chandef, it could be modified below. */
263 chandef = *req_chandef; 263 chandef = *req_chandef;
264 chan = chandef.chan; 264 chan = chandef.chan;
265 if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { 265 if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
266 NL80211_IFTYPE_ADHOC)) {
266 if (chandef.width == NL80211_CHAN_WIDTH_5 || 267 if (chandef.width == NL80211_CHAN_WIDTH_5 ||
267 chandef.width == NL80211_CHAN_WIDTH_10 || 268 chandef.width == NL80211_CHAN_WIDTH_10 ||
268 chandef.width == NL80211_CHAN_WIDTH_20_NOHT || 269 chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
@@ -274,7 +275,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
274 chandef.width = NL80211_CHAN_WIDTH_20; 275 chandef.width = NL80211_CHAN_WIDTH_20;
275 chandef.center_freq1 = chan->center_freq; 276 chandef.center_freq1 = chan->center_freq;
276 /* check again for downgraded chandef */ 277 /* check again for downgraded chandef */
277 if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { 278 if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
279 NL80211_IFTYPE_ADHOC)) {
278 sdata_info(sdata, 280 sdata_info(sdata,
279 "Failed to join IBSS, beacons forbidden\n"); 281 "Failed to join IBSS, beacons forbidden\n");
280 return; 282 return;
@@ -282,21 +284,20 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
282 } 284 }
283 285
284 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, 286 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
285 &chandef); 287 &chandef, NL80211_IFTYPE_ADHOC);
286 if (err < 0) { 288 if (err < 0) {
287 sdata_info(sdata, 289 sdata_info(sdata,
288 "Failed to join IBSS, invalid chandef\n"); 290 "Failed to join IBSS, invalid chandef\n");
289 return; 291 return;
290 } 292 }
291 if (err > 0) { 293 if (err > 0 && !ifibss->userspace_handles_dfs) {
292 if (!ifibss->userspace_handles_dfs) { 294 sdata_info(sdata,
293 sdata_info(sdata, 295 "Failed to join IBSS, DFS channel without control program\n");
294 "Failed to join IBSS, DFS channel without control program\n"); 296 return;
295 return;
296 }
297 radar_required = true;
298 } 297 }
299 298
299 radar_required = err;
300
300 mutex_lock(&local->mtx); 301 mutex_lock(&local->mtx);
301 if (ieee80211_vif_use_channel(sdata, &chandef, 302 if (ieee80211_vif_use_channel(sdata, &chandef,
302 ifibss->fixed_channel ? 303 ifibss->fixed_channel ?
@@ -775,7 +776,8 @@ static void ieee80211_ibss_csa_mark_radar(struct ieee80211_sub_if_data *sdata)
775 * unavailable. 776 * unavailable.
776 */ 777 */
777 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, 778 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
778 &ifibss->chandef); 779 &ifibss->chandef,
780 NL80211_IFTYPE_ADHOC);
779 if (err > 0) 781 if (err > 0)
780 cfg80211_radar_event(sdata->local->hw.wiphy, &ifibss->chandef, 782 cfg80211_radar_event(sdata->local->hw.wiphy, &ifibss->chandef,
781 GFP_ATOMIC); 783 GFP_ATOMIC);
@@ -861,7 +863,8 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
861 goto disconnect; 863 goto disconnect;
862 } 864 }
863 865
864 if (!cfg80211_reg_can_beacon(sdata->local->hw.wiphy, &params.chandef)) { 866 if (!cfg80211_reg_can_beacon(sdata->local->hw.wiphy, &params.chandef,
867 NL80211_IFTYPE_ADHOC)) {
865 sdata_info(sdata, 868 sdata_info(sdata,
866 "IBSS %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", 869 "IBSS %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
867 ifibss->bssid, 870 ifibss->bssid,
@@ -873,17 +876,17 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
873 } 876 }
874 877
875 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, 878 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
876 &params.chandef); 879 &params.chandef,
880 NL80211_IFTYPE_ADHOC);
877 if (err < 0) 881 if (err < 0)
878 goto disconnect; 882 goto disconnect;
879 if (err) { 883 if (err > 0 && !ifibss->userspace_handles_dfs) {
880 /* IBSS-DFS only allowed with a control program */ 884 /* IBSS-DFS only allowed with a control program */
881 if (!ifibss->userspace_handles_dfs) 885 goto disconnect;
882 goto disconnect;
883
884 params.radar_required = true;
885 } 886 }
886 887
888 params.radar_required = err;
889
887 if (cfg80211_chandef_identical(&params.chandef, 890 if (cfg80211_chandef_identical(&params.chandef,
888 &sdata->vif.bss_conf.chandef)) { 891 &sdata->vif.bss_conf.chandef)) {
889 ibss_dbg(sdata, 892 ibss_dbg(sdata,
@@ -1636,7 +1639,33 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1636 u32 changed = 0; 1639 u32 changed = 0;
1637 u32 rate_flags; 1640 u32 rate_flags;
1638 struct ieee80211_supported_band *sband; 1641 struct ieee80211_supported_band *sband;
1642 enum ieee80211_chanctx_mode chanmode;
1643 struct ieee80211_local *local = sdata->local;
1644 int radar_detect_width = 0;
1639 int i; 1645 int i;
1646 int ret;
1647
1648 ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
1649 &params->chandef,
1650 sdata->wdev.iftype);
1651 if (ret < 0)
1652 return ret;
1653
1654 if (ret > 0) {
1655 if (!params->userspace_handles_dfs)
1656 return -EINVAL;
1657 radar_detect_width = BIT(params->chandef.width);
1658 }
1659
1660 chanmode = (params->channel_fixed && !ret) ?
1661 IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE;
1662
1663 mutex_lock(&local->chanctx_mtx);
1664 ret = ieee80211_check_combinations(sdata, &params->chandef, chanmode,
1665 radar_detect_width);
1666 mutex_unlock(&local->chanctx_mtx);
1667 if (ret < 0)
1668 return ret;
1640 1669
1641 if (params->bssid) { 1670 if (params->bssid) {
1642 memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN); 1671 memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN);
@@ -1648,10 +1677,11 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1648 sdata->u.ibss.control_port = params->control_port; 1677 sdata->u.ibss.control_port = params->control_port;
1649 sdata->u.ibss.userspace_handles_dfs = params->userspace_handles_dfs; 1678 sdata->u.ibss.userspace_handles_dfs = params->userspace_handles_dfs;
1650 sdata->u.ibss.basic_rates = params->basic_rates; 1679 sdata->u.ibss.basic_rates = params->basic_rates;
1680 sdata->u.ibss.last_scan_completed = jiffies;
1651 1681
1652 /* fix basic_rates if channel does not support these rates */ 1682 /* fix basic_rates if channel does not support these rates */
1653 rate_flags = ieee80211_chandef_rate_flags(&params->chandef); 1683 rate_flags = ieee80211_chandef_rate_flags(&params->chandef);
1654 sband = sdata->local->hw.wiphy->bands[params->chandef.chan->band]; 1684 sband = local->hw.wiphy->bands[params->chandef.chan->band];
1655 for (i = 0; i < sband->n_bitrates; i++) { 1685 for (i = 0; i < sband->n_bitrates; i++) {
1656 if ((rate_flags & sband->bitrates[i].flags) != rate_flags) 1686 if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
1657 sdata->u.ibss.basic_rates &= ~BIT(i); 1687 sdata->u.ibss.basic_rates &= ~BIT(i);
@@ -1700,9 +1730,9 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1700 ieee80211_bss_info_change_notify(sdata, changed); 1730 ieee80211_bss_info_change_notify(sdata, changed);
1701 1731
1702 sdata->smps_mode = IEEE80211_SMPS_OFF; 1732 sdata->smps_mode = IEEE80211_SMPS_OFF;
1703 sdata->needed_rx_chains = sdata->local->rx_chains; 1733 sdata->needed_rx_chains = local->rx_chains;
1704 1734
1705 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 1735 ieee80211_queue_work(&local->hw, &sdata->work);
1706 1736
1707 return 0; 1737 return 0;
1708} 1738}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f169b6ee94ee..ac9836e0aab3 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -260,7 +260,7 @@ struct ieee80211_if_ap {
260 260
261 /* to be used after channel switch. */ 261 /* to be used after channel switch. */
262 struct cfg80211_beacon_data *next_beacon; 262 struct cfg80211_beacon_data *next_beacon;
263 struct list_head vlans; 263 struct list_head vlans; /* write-protected with RTNL and local->mtx */
264 264
265 struct ps_data ps; 265 struct ps_data ps;
266 atomic_t num_mcast_sta; /* number of stations receiving multicast */ 266 atomic_t num_mcast_sta; /* number of stations receiving multicast */
@@ -276,7 +276,7 @@ struct ieee80211_if_wds {
276}; 276};
277 277
278struct ieee80211_if_vlan { 278struct ieee80211_if_vlan {
279 struct list_head list; 279 struct list_head list; /* write-protected with RTNL and local->mtx */
280 280
281 /* used for all tx if the VLAN is configured to 4-addr mode */ 281 /* used for all tx if the VLAN is configured to 4-addr mode */
282 struct sta_info __rcu *sta; 282 struct sta_info __rcu *sta;
@@ -692,8 +692,10 @@ struct ieee80211_chanctx {
692 struct list_head list; 692 struct list_head list;
693 struct rcu_head rcu_head; 693 struct rcu_head rcu_head;
694 694
695 struct list_head assigned_vifs;
696 struct list_head reserved_vifs;
697
695 enum ieee80211_chanctx_mode mode; 698 enum ieee80211_chanctx_mode mode;
696 int refcount;
697 bool driver_present; 699 bool driver_present;
698 700
699 struct ieee80211_chanctx_conf conf; 701 struct ieee80211_chanctx_conf conf;
@@ -752,11 +754,21 @@ struct ieee80211_sub_if_data {
752 struct mac80211_qos_map __rcu *qos_map; 754 struct mac80211_qos_map __rcu *qos_map;
753 755
754 struct work_struct csa_finalize_work; 756 struct work_struct csa_finalize_work;
755 int csa_counter_offset_beacon; 757 u16 csa_counter_offset_beacon[IEEE80211_MAX_CSA_COUNTERS_NUM];
756 int csa_counter_offset_presp; 758 u16 csa_counter_offset_presp[IEEE80211_MAX_CSA_COUNTERS_NUM];
757 bool csa_radar_required; 759 bool csa_radar_required;
760 bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */
758 struct cfg80211_chan_def csa_chandef; 761 struct cfg80211_chan_def csa_chandef;
759 762
763 struct list_head assigned_chanctx_list; /* protected by chanctx_mtx */
764 struct list_head reserved_chanctx_list; /* protected by chanctx_mtx */
765
766 /* context reservation -- protected with chanctx_mtx */
767 struct ieee80211_chanctx *reserved_chanctx;
768 struct cfg80211_chan_def reserved_chandef;
769 bool reserved_radar_required;
770 u8 csa_current_counter;
771
760 /* used to reconfigure hardware SM PS */ 772 /* used to reconfigure hardware SM PS */
761 struct work_struct recalc_smps; 773 struct work_struct recalc_smps;
762 774
@@ -1449,6 +1461,7 @@ __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
1449int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, 1461int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
1450 struct cfg80211_sched_scan_request *req); 1462 struct cfg80211_sched_scan_request *req);
1451int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata); 1463int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
1464void ieee80211_sched_scan_end(struct ieee80211_local *local);
1452void ieee80211_sched_scan_stopped_work(struct work_struct *work); 1465void ieee80211_sched_scan_stopped_work(struct work_struct *work);
1453 1466
1454/* off-channel helpers */ 1467/* off-channel helpers */
@@ -1463,6 +1476,7 @@ void ieee80211_sw_roc_work(struct work_struct *work);
1463void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); 1476void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
1464 1477
1465/* channel switch handling */ 1478/* channel switch handling */
1479bool ieee80211_csa_needs_block_tx(struct ieee80211_local *local);
1466void ieee80211_csa_finalize_work(struct work_struct *work); 1480void ieee80211_csa_finalize_work(struct work_struct *work);
1467int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, 1481int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
1468 struct cfg80211_csa_settings *params); 1482 struct cfg80211_csa_settings *params);
@@ -1772,6 +1786,16 @@ ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
1772 const struct cfg80211_chan_def *chandef, 1786 const struct cfg80211_chan_def *chandef,
1773 enum ieee80211_chanctx_mode mode); 1787 enum ieee80211_chanctx_mode mode);
1774int __must_check 1788int __must_check
1789ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
1790 const struct cfg80211_chan_def *chandef,
1791 enum ieee80211_chanctx_mode mode,
1792 bool radar_required);
1793int __must_check
1794ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata,
1795 u32 *changed);
1796int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata);
1797
1798int __must_check
1775ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata, 1799ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
1776 const struct cfg80211_chan_def *chandef, 1800 const struct cfg80211_chan_def *chandef,
1777 u32 *changed); 1801 u32 *changed);
@@ -1783,6 +1807,8 @@ void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
1783void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata); 1807void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
1784void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata, 1808void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
1785 bool clear); 1809 bool clear);
1810int ieee80211_chanctx_refcount(struct ieee80211_local *local,
1811 struct ieee80211_chanctx *ctx);
1786 1812
1787void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, 1813void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
1788 struct ieee80211_chanctx *chanctx); 1814 struct ieee80211_chanctx *chanctx);
@@ -1806,6 +1832,20 @@ int ieee80211_cs_headroom(struct ieee80211_local *local,
1806 enum nl80211_iftype iftype); 1832 enum nl80211_iftype iftype);
1807void ieee80211_recalc_dtim(struct ieee80211_local *local, 1833void ieee80211_recalc_dtim(struct ieee80211_local *local,
1808 struct ieee80211_sub_if_data *sdata); 1834 struct ieee80211_sub_if_data *sdata);
1835int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
1836 const struct cfg80211_chan_def *chandef,
1837 enum ieee80211_chanctx_mode chanmode,
1838 u8 radar_detect);
1839int ieee80211_max_num_channels(struct ieee80211_local *local);
1840
1841/* TDLS */
1842int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
1843 const u8 *peer, u8 action_code, u8 dialog_token,
1844 u16 status_code, u32 peer_capability,
1845 const u8 *extra_ies, size_t extra_ies_len);
1846int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
1847 const u8 *peer, enum nl80211_tdls_operation oper);
1848
1809 1849
1810#ifdef CONFIG_MAC80211_NOINLINE 1850#ifdef CONFIG_MAC80211_NOINLINE
1811#define debug_noinline noinline 1851#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index b8d331e7d883..388b863e821c 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -250,6 +250,7 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
250{ 250{
251 struct ieee80211_local *local = sdata->local; 251 struct ieee80211_local *local = sdata->local;
252 struct ieee80211_sub_if_data *nsdata; 252 struct ieee80211_sub_if_data *nsdata;
253 int ret;
253 254
254 ASSERT_RTNL(); 255 ASSERT_RTNL();
255 256
@@ -300,7 +301,10 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
300 } 301 }
301 } 302 }
302 303
303 return 0; 304 mutex_lock(&local->chanctx_mtx);
305 ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
306 mutex_unlock(&local->chanctx_mtx);
307 return ret;
304} 308}
305 309
306static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata, 310static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata,
@@ -395,6 +399,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
395 sdata->vif.type = NL80211_IFTYPE_MONITOR; 399 sdata->vif.type = NL80211_IFTYPE_MONITOR;
396 snprintf(sdata->name, IFNAMSIZ, "%s-monitor", 400 snprintf(sdata->name, IFNAMSIZ, "%s-monitor",
397 wiphy_name(local->hw.wiphy)); 401 wiphy_name(local->hw.wiphy));
402 sdata->wdev.iftype = NL80211_IFTYPE_MONITOR;
398 403
399 sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM; 404 sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
400 405
@@ -423,7 +428,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
423 mutex_unlock(&local->mtx); 428 mutex_unlock(&local->mtx);
424 if (ret) { 429 if (ret) {
425 mutex_lock(&local->iflist_mtx); 430 mutex_lock(&local->iflist_mtx);
426 rcu_assign_pointer(local->monitor_sdata, NULL); 431 RCU_INIT_POINTER(local->monitor_sdata, NULL);
427 mutex_unlock(&local->iflist_mtx); 432 mutex_unlock(&local->iflist_mtx);
428 synchronize_net(); 433 synchronize_net();
429 drv_remove_interface(local, sdata); 434 drv_remove_interface(local, sdata);
@@ -452,7 +457,7 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
452 return; 457 return;
453 } 458 }
454 459
455 rcu_assign_pointer(local->monitor_sdata, NULL); 460 RCU_INIT_POINTER(local->monitor_sdata, NULL);
456 mutex_unlock(&local->iflist_mtx); 461 mutex_unlock(&local->iflist_mtx);
457 462
458 synchronize_net(); 463 synchronize_net();
@@ -492,7 +497,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
492 if (!sdata->bss) 497 if (!sdata->bss)
493 return -ENOLINK; 498 return -ENOLINK;
494 499
500 mutex_lock(&local->mtx);
495 list_add(&sdata->u.vlan.list, &sdata->bss->vlans); 501 list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
502 mutex_unlock(&local->mtx);
496 503
497 master = container_of(sdata->bss, 504 master = container_of(sdata->bss,
498 struct ieee80211_sub_if_data, u.ap); 505 struct ieee80211_sub_if_data, u.ap);
@@ -722,8 +729,11 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
722 drv_stop(local); 729 drv_stop(local);
723 err_del_bss: 730 err_del_bss:
724 sdata->bss = NULL; 731 sdata->bss = NULL;
725 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 732 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
733 mutex_lock(&local->mtx);
726 list_del(&sdata->u.vlan.list); 734 list_del(&sdata->u.vlan.list);
735 mutex_unlock(&local->mtx);
736 }
727 /* might already be clear but that doesn't matter */ 737 /* might already be clear but that doesn't matter */
728 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 738 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
729 return res; 739 return res;
@@ -829,8 +839,15 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
829 839
830 cancel_work_sync(&sdata->recalc_smps); 840 cancel_work_sync(&sdata->recalc_smps);
831 sdata_lock(sdata); 841 sdata_lock(sdata);
842 mutex_lock(&local->mtx);
832 sdata->vif.csa_active = false; 843 sdata->vif.csa_active = false;
844 if (!ieee80211_csa_needs_block_tx(local))
845 ieee80211_wake_queues_by_reason(&local->hw,
846 IEEE80211_MAX_QUEUE_MAP,
847 IEEE80211_QUEUE_STOP_REASON_CSA);
848 mutex_unlock(&local->mtx);
833 sdata_unlock(sdata); 849 sdata_unlock(sdata);
850
834 cancel_work_sync(&sdata->csa_finalize_work); 851 cancel_work_sync(&sdata->csa_finalize_work);
835 852
836 cancel_delayed_work_sync(&sdata->dfs_cac_timer_work); 853 cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
@@ -875,8 +892,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
875 892
876 switch (sdata->vif.type) { 893 switch (sdata->vif.type) {
877 case NL80211_IFTYPE_AP_VLAN: 894 case NL80211_IFTYPE_AP_VLAN:
895 mutex_lock(&local->mtx);
878 list_del(&sdata->u.vlan.list); 896 list_del(&sdata->u.vlan.list);
879 rcu_assign_pointer(sdata->vif.chanctx_conf, NULL); 897 mutex_unlock(&local->mtx);
898 RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL);
880 /* no need to tell driver */ 899 /* no need to tell driver */
881 break; 900 break;
882 case NL80211_IFTYPE_MONITOR: 901 case NL80211_IFTYPE_MONITOR:
@@ -895,7 +914,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
895 break; 914 break;
896 case NL80211_IFTYPE_P2P_DEVICE: 915 case NL80211_IFTYPE_P2P_DEVICE:
897 /* relies on synchronize_rcu() below */ 916 /* relies on synchronize_rcu() below */
898 rcu_assign_pointer(local->p2p_sdata, NULL); 917 RCU_INIT_POINTER(local->p2p_sdata, NULL);
899 /* fall through */ 918 /* fall through */
900 default: 919 default:
901 cancel_work_sync(&sdata->work); 920 cancel_work_sync(&sdata->work);
@@ -1267,6 +1286,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1267 sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE); 1286 sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
1268 sdata->control_port_no_encrypt = false; 1287 sdata->control_port_no_encrypt = false;
1269 sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM; 1288 sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
1289 sdata->vif.bss_conf.idle = true;
1270 1290
1271 sdata->noack_map = 0; 1291 sdata->noack_map = 0;
1272 1292
@@ -1280,6 +1300,8 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1280 INIT_WORK(&sdata->work, ieee80211_iface_work); 1300 INIT_WORK(&sdata->work, ieee80211_iface_work);
1281 INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work); 1301 INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work);
1282 INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work); 1302 INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work);
1303 INIT_LIST_HEAD(&sdata->assigned_chanctx_list);
1304 INIT_LIST_HEAD(&sdata->reserved_chanctx_list);
1283 1305
1284 switch (type) { 1306 switch (type) {
1285 case NL80211_IFTYPE_P2P_GO: 1307 case NL80211_IFTYPE_P2P_GO:
@@ -1758,7 +1780,6 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1758 } 1780 }
1759 mutex_unlock(&local->iflist_mtx); 1781 mutex_unlock(&local->iflist_mtx);
1760 unregister_netdevice_many(&unreg_list); 1782 unregister_netdevice_many(&unreg_list);
1761 list_del(&unreg_list);
1762 1783
1763 list_for_each_entry_safe(sdata, tmp, &wdev_list, list) { 1784 list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
1764 list_del(&sdata->list); 1785 list_del(&sdata->list);
@@ -1774,20 +1795,19 @@ static int netdev_notify(struct notifier_block *nb,
1774 struct ieee80211_sub_if_data *sdata; 1795 struct ieee80211_sub_if_data *sdata;
1775 1796
1776 if (state != NETDEV_CHANGENAME) 1797 if (state != NETDEV_CHANGENAME)
1777 return 0; 1798 return NOTIFY_DONE;
1778 1799
1779 if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy) 1800 if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
1780 return 0; 1801 return NOTIFY_DONE;
1781 1802
1782 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) 1803 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
1783 return 0; 1804 return NOTIFY_DONE;
1784 1805
1785 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1806 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1786
1787 memcpy(sdata->name, dev->name, IFNAMSIZ); 1807 memcpy(sdata->name, dev->name, IFNAMSIZ);
1788
1789 ieee80211_debugfs_rename_netdev(sdata); 1808 ieee80211_debugfs_rename_netdev(sdata);
1790 return 0; 1809
1810 return NOTIFY_OK;
1791} 1811}
1792 1812
1793static struct notifier_block mac80211_netdev_notifier = { 1813static struct notifier_block mac80211_netdev_notifier = {
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 6ff65a1ebaa9..16d97f044a20 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -325,7 +325,8 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
325 struct ieee80211_key *key; 325 struct ieee80211_key *key;
326 int i, j, err; 326 int i, j, err;
327 327
328 BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS); 328 if (WARN_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS))
329 return ERR_PTR(-EINVAL);
329 330
330 key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL); 331 key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL);
331 if (!key) 332 if (!key)
@@ -481,8 +482,8 @@ int ieee80211_key_link(struct ieee80211_key *key,
481 int idx, ret; 482 int idx, ret;
482 bool pairwise; 483 bool pairwise;
483 484
484 BUG_ON(!sdata); 485 if (WARN_ON(!sdata || !key))
485 BUG_ON(!key); 486 return -EINVAL;
486 487
487 pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; 488 pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
488 idx = key->conf.keyidx; 489 idx = key->conf.keyidx;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 4c1bf61bc778..d17c26d6e369 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -340,7 +340,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
340 340
341 sdata_unlock(sdata); 341 sdata_unlock(sdata);
342 342
343 return NOTIFY_DONE; 343 return NOTIFY_OK;
344} 344}
345#endif 345#endif
346 346
@@ -371,7 +371,7 @@ static int ieee80211_ifa6_changed(struct notifier_block *nb,
371 371
372 drv_ipv6_addr_change(local, sdata, idev); 372 drv_ipv6_addr_change(local, sdata, idev);
373 373
374 return NOTIFY_DONE; 374 return NOTIFY_OK;
375} 375}
376#endif 376#endif
377 377
@@ -446,7 +446,9 @@ static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
446 .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 446 .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
447 IEEE80211_HT_CAP_MAX_AMSDU | 447 IEEE80211_HT_CAP_MAX_AMSDU |
448 IEEE80211_HT_CAP_SGI_20 | 448 IEEE80211_HT_CAP_SGI_20 |
449 IEEE80211_HT_CAP_SGI_40), 449 IEEE80211_HT_CAP_SGI_40 |
450 IEEE80211_HT_CAP_LDPC_CODING |
451 IEEE80211_HT_CAP_40MHZ_INTOLERANT),
450 .mcs = { 452 .mcs = {
451 .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff, 453 .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff,
452 0xff, 0xff, 0xff, 0xff, 0xff, }, 454 0xff, 0xff, 0xff, 0xff, 0xff, },
@@ -954,6 +956,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
954 if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) 956 if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)
955 local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP; 957 local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
956 958
959 local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CSA_COUNTERS_NUM;
960
957 result = wiphy_register(local->hw.wiphy); 961 result = wiphy_register(local->hw.wiphy);
958 if (result < 0) 962 if (result < 0)
959 goto fail_wiphy_register; 963 goto fail_wiphy_register;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index f70e9cd10552..6495a3f0428d 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -366,20 +366,15 @@ int mesh_add_rsn_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
366 return 0; 366 return 0;
367 367
368 /* find RSN IE */ 368 /* find RSN IE */
369 data = ifmsh->ie; 369 data = cfg80211_find_ie(WLAN_EID_RSN, ifmsh->ie, ifmsh->ie_len);
370 while (data < ifmsh->ie + ifmsh->ie_len) { 370 if (!data)
371 if (*data == WLAN_EID_RSN) { 371 return 0;
372 len = data[1] + 2;
373 break;
374 }
375 data++;
376 }
377 372
378 if (len) { 373 len = data[1] + 2;
379 if (skb_tailroom(skb) < len) 374
380 return -ENOMEM; 375 if (skb_tailroom(skb) < len)
381 memcpy(skb_put(skb, len), data, len); 376 return -ENOMEM;
382 } 377 memcpy(skb_put(skb, len), data, len);
383 378
384 return 0; 379 return 0;
385} 380}
@@ -684,7 +679,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
684 *pos++ = 0x0; 679 *pos++ = 0x0;
685 *pos++ = ieee80211_frequency_to_channel( 680 *pos++ = ieee80211_frequency_to_channel(
686 csa->settings.chandef.chan->center_freq); 681 csa->settings.chandef.chan->center_freq);
687 sdata->csa_counter_offset_beacon = hdr_len + 6; 682 sdata->csa_counter_offset_beacon[0] = hdr_len + 6;
688 *pos++ = csa->settings.count; 683 *pos++ = csa->settings.count;
689 *pos++ = WLAN_EID_CHAN_SWITCH_PARAM; 684 *pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
690 *pos++ = 6; 685 *pos++ = 6;
@@ -829,7 +824,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
829 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 824 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
830 bcn = rcu_dereference_protected(ifmsh->beacon, 825 bcn = rcu_dereference_protected(ifmsh->beacon,
831 lockdep_is_held(&sdata->wdev.mtx)); 826 lockdep_is_held(&sdata->wdev.mtx));
832 rcu_assign_pointer(ifmsh->beacon, NULL); 827 RCU_INIT_POINTER(ifmsh->beacon, NULL);
833 kfree_rcu(bcn, rcu_head); 828 kfree_rcu(bcn, rcu_head);
834 829
835 /* flush STAs and mpaths on this iface */ 830 /* flush STAs and mpaths on this iface */
@@ -903,14 +898,15 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
903 } 898 }
904 899
905 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, 900 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
906 &params.chandef); 901 &params.chandef,
902 NL80211_IFTYPE_MESH_POINT);
907 if (err < 0) 903 if (err < 0)
908 return false; 904 return false;
909 if (err) { 905 if (err > 0)
910 params.radar_required = true;
911 /* TODO: DFS not (yet) supported */ 906 /* TODO: DFS not (yet) supported */
912 return false; 907 return false;
913 } 908
909 params.radar_required = err;
914 910
915 if (cfg80211_chandef_identical(&params.chandef, 911 if (cfg80211_chandef_identical(&params.chandef,
916 &sdata->vif.bss_conf.chandef)) { 912 &sdata->vif.bss_conf.chandef)) {
@@ -1068,7 +1064,7 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
1068 1064
1069 /* Remove the CSA and MCSP elements from the beacon */ 1065 /* Remove the CSA and MCSP elements from the beacon */
1070 tmp_csa_settings = rcu_dereference(ifmsh->csa); 1066 tmp_csa_settings = rcu_dereference(ifmsh->csa);
1071 rcu_assign_pointer(ifmsh->csa, NULL); 1067 RCU_INIT_POINTER(ifmsh->csa, NULL);
1072 if (tmp_csa_settings) 1068 if (tmp_csa_settings)
1073 kfree_rcu(tmp_csa_settings, rcu_head); 1069 kfree_rcu(tmp_csa_settings, rcu_head);
1074 ret = ieee80211_mesh_rebuild_beacon(sdata); 1070 ret = ieee80211_mesh_rebuild_beacon(sdata);
@@ -1102,7 +1098,7 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
1102 ret = ieee80211_mesh_rebuild_beacon(sdata); 1098 ret = ieee80211_mesh_rebuild_beacon(sdata);
1103 if (ret) { 1099 if (ret) {
1104 tmp_csa_settings = rcu_dereference(ifmsh->csa); 1100 tmp_csa_settings = rcu_dereference(ifmsh->csa);
1105 rcu_assign_pointer(ifmsh->csa, NULL); 1101 RCU_INIT_POINTER(ifmsh->csa, NULL);
1106 kfree_rcu(tmp_csa_settings, rcu_head); 1102 kfree_rcu(tmp_csa_settings, rcu_head);
1107 return ret; 1103 return ret;
1108 } 1104 }
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index f9514685d45a..94758b9c9ed4 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -37,7 +37,7 @@ static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
37 return get_unaligned_le32(preq_elem + offset); 37 return get_unaligned_le32(preq_elem + offset);
38} 38}
39 39
40static inline u32 u16_field_get(const u8 *preq_elem, int offset, bool ae) 40static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae)
41{ 41{
42 if (ae) 42 if (ae)
43 offset += 6; 43 offset += 6;
@@ -544,9 +544,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
544 if (time_after(jiffies, ifmsh->last_sn_update + 544 if (time_after(jiffies, ifmsh->last_sn_update +
545 net_traversal_jiffies(sdata)) || 545 net_traversal_jiffies(sdata)) ||
546 time_before(jiffies, ifmsh->last_sn_update)) { 546 time_before(jiffies, ifmsh->last_sn_update)) {
547 target_sn = ++ifmsh->sn; 547 ++ifmsh->sn;
548 ifmsh->last_sn_update = jiffies; 548 ifmsh->last_sn_update = jiffies;
549 } 549 }
550 target_sn = ifmsh->sn;
550 } else if (is_broadcast_ether_addr(target_addr) && 551 } else if (is_broadcast_ether_addr(target_addr) &&
551 (target_flags & IEEE80211_PREQ_TO_FLAG)) { 552 (target_flags & IEEE80211_PREQ_TO_FLAG)) {
552 rcu_read_lock(); 553 rcu_read_lock();
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 7d050ed6fe5a..cf032a8db9d7 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -287,8 +287,10 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
287 struct sk_buff_head failq; 287 struct sk_buff_head failq;
288 unsigned long flags; 288 unsigned long flags;
289 289
290 BUG_ON(gate_mpath == from_mpath); 290 if (WARN_ON(gate_mpath == from_mpath))
291 BUG_ON(!gate_mpath->next_hop); 291 return;
292 if (WARN_ON(!gate_mpath->next_hop))
293 return;
292 294
293 __skb_queue_head_init(&failq); 295 __skb_queue_head_init(&failq);
294 296
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index 2bc5dc25d5ad..09625d6205c3 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -171,7 +171,7 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
171 u8 cap; 171 u8 cap;
172 172
173 WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); 173 WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
174 BUG_ON(!rcu_read_lock_held()); 174 WARN_ON(!rcu_read_lock_held());
175 cap = beacon->meshconf->meshconf_cap; 175 cap = beacon->meshconf->meshconf_cap;
176 176
177 spin_lock_bh(&ifmsh->sync_offset_lock); 177 spin_lock_bh(&ifmsh->sync_offset_lock);
diff --git a/net/mac80211/michael.h b/net/mac80211/michael.h
index 3b848dad9587..0e4886f881f1 100644
--- a/net/mac80211/michael.h
+++ b/net/mac80211/michael.h
@@ -11,6 +11,7 @@
11#define MICHAEL_H 11#define MICHAEL_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/ieee80211.h>
14 15
15#define MICHAEL_MIC_LEN 8 16#define MICHAEL_MIC_LEN 8
16 17
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 27600a9808ba..3345401be1b3 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -975,16 +975,23 @@ static void ieee80211_chswitch_work(struct work_struct *work)
975 /* XXX: shouldn't really modify cfg80211-owned data! */ 975 /* XXX: shouldn't really modify cfg80211-owned data! */
976 ifmgd->associated->channel = sdata->csa_chandef.chan; 976 ifmgd->associated->channel = sdata->csa_chandef.chan;
977 977
978 ieee80211_bss_info_change_notify(sdata, changed);
979
980 mutex_lock(&local->mtx);
981 sdata->vif.csa_active = false;
978 /* XXX: wait for a beacon first? */ 982 /* XXX: wait for a beacon first? */
979 ieee80211_wake_queues_by_reason(&local->hw, 983 if (!ieee80211_csa_needs_block_tx(local))
984 ieee80211_wake_queues_by_reason(&local->hw,
980 IEEE80211_MAX_QUEUE_MAP, 985 IEEE80211_MAX_QUEUE_MAP,
981 IEEE80211_QUEUE_STOP_REASON_CSA); 986 IEEE80211_QUEUE_STOP_REASON_CSA);
987 mutex_unlock(&local->mtx);
982 988
983 ieee80211_bss_info_change_notify(sdata, changed);
984
985 out:
986 sdata->vif.csa_active = false;
987 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; 989 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
990
991 ieee80211_sta_reset_beacon_monitor(sdata);
992 ieee80211_sta_reset_conn_monitor(sdata);
993
994out:
988 sdata_unlock(sdata); 995 sdata_unlock(sdata);
989} 996}
990 997
@@ -1089,7 +1096,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1089 } 1096 }
1090 chanctx = container_of(rcu_access_pointer(sdata->vif.chanctx_conf), 1097 chanctx = container_of(rcu_access_pointer(sdata->vif.chanctx_conf),
1091 struct ieee80211_chanctx, conf); 1098 struct ieee80211_chanctx, conf);
1092 if (chanctx->refcount > 1) { 1099 if (ieee80211_chanctx_refcount(local, chanctx) > 1) {
1093 sdata_info(sdata, 1100 sdata_info(sdata,
1094 "channel switch with multiple interfaces on the same channel, disconnecting\n"); 1101 "channel switch with multiple interfaces on the same channel, disconnecting\n");
1095 ieee80211_queue_work(&local->hw, 1102 ieee80211_queue_work(&local->hw,
@@ -1100,12 +1107,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1100 mutex_unlock(&local->chanctx_mtx); 1107 mutex_unlock(&local->chanctx_mtx);
1101 1108
1102 sdata->csa_chandef = csa_ie.chandef; 1109 sdata->csa_chandef = csa_ie.chandef;
1110
1111 mutex_lock(&local->mtx);
1103 sdata->vif.csa_active = true; 1112 sdata->vif.csa_active = true;
1113 sdata->csa_block_tx = csa_ie.mode;
1104 1114
1105 if (csa_ie.mode) 1115 if (sdata->csa_block_tx)
1106 ieee80211_stop_queues_by_reason(&local->hw, 1116 ieee80211_stop_queues_by_reason(&local->hw,
1107 IEEE80211_MAX_QUEUE_MAP, 1117 IEEE80211_MAX_QUEUE_MAP,
1108 IEEE80211_QUEUE_STOP_REASON_CSA); 1118 IEEE80211_QUEUE_STOP_REASON_CSA);
1119 mutex_unlock(&local->mtx);
1109 1120
1110 if (local->ops->channel_switch) { 1121 if (local->ops->channel_switch) {
1111 /* use driver's channel switch callback */ 1122 /* use driver's channel switch callback */
@@ -1817,6 +1828,12 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1817 ifmgd->flags = 0; 1828 ifmgd->flags = 0;
1818 mutex_lock(&local->mtx); 1829 mutex_lock(&local->mtx);
1819 ieee80211_vif_release_channel(sdata); 1830 ieee80211_vif_release_channel(sdata);
1831
1832 sdata->vif.csa_active = false;
1833 if (!ieee80211_csa_needs_block_tx(local))
1834 ieee80211_wake_queues_by_reason(&local->hw,
1835 IEEE80211_MAX_QUEUE_MAP,
1836 IEEE80211_QUEUE_STOP_REASON_CSA);
1820 mutex_unlock(&local->mtx); 1837 mutex_unlock(&local->mtx);
1821 1838
1822 sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM; 1839 sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
@@ -2045,6 +2062,7 @@ EXPORT_SYMBOL(ieee80211_ap_probereq_get);
2045 2062
2046static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) 2063static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
2047{ 2064{
2065 struct ieee80211_local *local = sdata->local;
2048 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2066 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2049 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; 2067 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
2050 2068
@@ -2058,10 +2076,14 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
2058 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, 2076 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
2059 true, frame_buf); 2077 true, frame_buf);
2060 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; 2078 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
2079
2080 mutex_lock(&local->mtx);
2061 sdata->vif.csa_active = false; 2081 sdata->vif.csa_active = false;
2062 ieee80211_wake_queues_by_reason(&sdata->local->hw, 2082 if (!ieee80211_csa_needs_block_tx(local))
2083 ieee80211_wake_queues_by_reason(&local->hw,
2063 IEEE80211_MAX_QUEUE_MAP, 2084 IEEE80211_MAX_QUEUE_MAP,
2064 IEEE80211_QUEUE_STOP_REASON_CSA); 2085 IEEE80211_QUEUE_STOP_REASON_CSA);
2086 mutex_unlock(&local->mtx);
2065 2087
2066 cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, 2088 cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
2067 IEEE80211_DEAUTH_FRAME_LEN); 2089 IEEE80211_DEAUTH_FRAME_LEN);
@@ -3546,6 +3568,9 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
3546 if (local->quiescing) 3568 if (local->quiescing)
3547 return; 3569 return;
3548 3570
3571 if (sdata->vif.csa_active)
3572 return;
3573
3549 sdata->u.mgd.connection_loss = false; 3574 sdata->u.mgd.connection_loss = false;
3550 ieee80211_queue_work(&sdata->local->hw, 3575 ieee80211_queue_work(&sdata->local->hw,
3551 &sdata->u.mgd.beacon_connection_loss_work); 3576 &sdata->u.mgd.beacon_connection_loss_work);
@@ -3561,6 +3586,9 @@ static void ieee80211_sta_conn_mon_timer(unsigned long data)
3561 if (local->quiescing) 3586 if (local->quiescing)
3562 return; 3587 return;
3563 3588
3589 if (sdata->vif.csa_active)
3590 return;
3591
3564 ieee80211_queue_work(&local->hw, &ifmgd->monitor_work); 3592 ieee80211_queue_work(&local->hw, &ifmgd->monitor_work);
3565} 3593}
3566 3594
@@ -3707,7 +3735,7 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
3707 ieee80211_recalc_ps(local, latency_usec); 3735 ieee80211_recalc_ps(local, latency_usec);
3708 mutex_unlock(&local->iflist_mtx); 3736 mutex_unlock(&local->iflist_mtx);
3709 3737
3710 return 0; 3738 return NOTIFY_OK;
3711} 3739}
3712 3740
3713static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata, 3741static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 26fd94fa0aed..1c1469c36dca 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -657,6 +657,17 @@ minstrel_free(void *priv)
657 kfree(priv); 657 kfree(priv);
658} 658}
659 659
660static u32 minstrel_get_expected_throughput(void *priv_sta)
661{
662 struct minstrel_sta_info *mi = priv_sta;
663 int idx = mi->max_tp_rate[0];
664
665 /* convert pkt per sec in kbps (1200 is the average pkt size used for
666 * computing cur_tp
667 */
668 return MINSTREL_TRUNC(mi->r[idx].cur_tp) * 1200 * 8 / 1024;
669}
670
660const struct rate_control_ops mac80211_minstrel = { 671const struct rate_control_ops mac80211_minstrel = {
661 .name = "minstrel", 672 .name = "minstrel",
662 .tx_status = minstrel_tx_status, 673 .tx_status = minstrel_tx_status,
@@ -670,6 +681,7 @@ const struct rate_control_ops mac80211_minstrel = {
670 .add_sta_debugfs = minstrel_add_sta_debugfs, 681 .add_sta_debugfs = minstrel_add_sta_debugfs,
671 .remove_sta_debugfs = minstrel_remove_sta_debugfs, 682 .remove_sta_debugfs = minstrel_remove_sta_debugfs,
672#endif 683#endif
684 .get_expected_throughput = minstrel_get_expected_throughput,
673}; 685};
674 686
675int __init 687int __init
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index bccaf854a309..85c1e74b7714 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -22,7 +22,7 @@
22#define MCS_NBITS (AVG_PKT_SIZE << 3) 22#define MCS_NBITS (AVG_PKT_SIZE << 3)
23 23
24/* Number of symbols for a packet with (bps) bits per symbol */ 24/* Number of symbols for a packet with (bps) bits per symbol */
25#define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps)) 25#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
26 26
27/* Transmission time (nanoseconds) for a packet containing (syms) symbols */ 27/* Transmission time (nanoseconds) for a packet containing (syms) symbols */
28#define MCS_SYMBOL_TIME(sgi, syms) \ 28#define MCS_SYMBOL_TIME(sgi, syms) \
@@ -226,8 +226,9 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
226 nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); 226 nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
227 227
228 nsecs += minstrel_mcs_groups[group].duration[rate]; 228 nsecs += minstrel_mcs_groups[group].duration[rate];
229 tp = 1000000 * ((prob * 1000) / nsecs);
230 229
230 /* prob is scaled - see MINSTREL_FRAC above */
231 tp = 1000000 * ((prob * 1000) / nsecs);
231 mr->cur_tp = MINSTREL_TRUNC(tp); 232 mr->cur_tp = MINSTREL_TRUNC(tp);
232} 233}
233 234
@@ -1031,6 +1032,22 @@ minstrel_ht_free(void *priv)
1031 mac80211_minstrel.free(priv); 1032 mac80211_minstrel.free(priv);
1032} 1033}
1033 1034
1035static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
1036{
1037 struct minstrel_ht_sta_priv *msp = priv_sta;
1038 struct minstrel_ht_sta *mi = &msp->ht;
1039 int i, j;
1040
1041 if (!msp->is_ht)
1042 return mac80211_minstrel.get_expected_throughput(priv_sta);
1043
1044 i = mi->max_tp_rate / MCS_GROUP_RATES;
1045 j = mi->max_tp_rate % MCS_GROUP_RATES;
1046
1047 /* convert cur_tp from pkt per second in kbps */
1048 return mi->groups[i].rates[j].cur_tp * AVG_PKT_SIZE * 8 / 1024;
1049}
1050
1034static const struct rate_control_ops mac80211_minstrel_ht = { 1051static const struct rate_control_ops mac80211_minstrel_ht = {
1035 .name = "minstrel_ht", 1052 .name = "minstrel_ht",
1036 .tx_status = minstrel_ht_tx_status, 1053 .tx_status = minstrel_ht_tx_status,
@@ -1045,6 +1062,7 @@ static const struct rate_control_ops mac80211_minstrel_ht = {
1045 .add_sta_debugfs = minstrel_ht_add_sta_debugfs, 1062 .add_sta_debugfs = minstrel_ht_add_sta_debugfs,
1046 .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs, 1063 .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs,
1047#endif 1064#endif
1065 .get_expected_throughput = minstrel_ht_get_expected_throughput,
1048}; 1066};
1049 1067
1050 1068
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 2b608b2b70ec..394e201cde6d 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -54,24 +54,25 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
54 return skb; 54 return skb;
55} 55}
56 56
57static inline int should_drop_frame(struct sk_buff *skb, int present_fcs_len) 57static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len)
58{ 58{
59 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 59 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
60 struct ieee80211_hdr *hdr; 60 struct ieee80211_hdr *hdr = (void *)skb->data;
61
62 hdr = (void *)(skb->data);
63 61
64 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 62 if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
65 RX_FLAG_FAILED_PLCP_CRC | 63 RX_FLAG_FAILED_PLCP_CRC |
66 RX_FLAG_AMPDU_IS_ZEROLEN)) 64 RX_FLAG_AMPDU_IS_ZEROLEN))
67 return 1; 65 return true;
66
68 if (unlikely(skb->len < 16 + present_fcs_len)) 67 if (unlikely(skb->len < 16 + present_fcs_len))
69 return 1; 68 return true;
69
70 if (ieee80211_is_ctl(hdr->frame_control) && 70 if (ieee80211_is_ctl(hdr->frame_control) &&
71 !ieee80211_is_pspoll(hdr->frame_control) && 71 !ieee80211_is_pspoll(hdr->frame_control) &&
72 !ieee80211_is_back_req(hdr->frame_control)) 72 !ieee80211_is_back_req(hdr->frame_control))
73 return 1; 73 return true;
74 return 0; 74
75 return false;
75} 76}
76 77
77static int 78static int
@@ -3191,7 +3192,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
3191} 3192}
3192 3193
3193/* 3194/*
3194 * This is the actual Rx frames handler. as it blongs to Rx path it must 3195 * This is the actual Rx frames handler. as it belongs to Rx path it must
3195 * be called with rcu_read_lock protection. 3196 * be called with rcu_read_lock protection.
3196 */ 3197 */
3197static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 3198static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 3ce7f2c8539a..f40661eb75b5 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -309,7 +309,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
309 if (local->scan_req != local->int_scan_req) 309 if (local->scan_req != local->int_scan_req)
310 cfg80211_scan_done(local->scan_req, aborted); 310 cfg80211_scan_done(local->scan_req, aborted);
311 local->scan_req = NULL; 311 local->scan_req = NULL;
312 rcu_assign_pointer(local->scan_sdata, NULL); 312 RCU_INIT_POINTER(local->scan_sdata, NULL);
313 313
314 local->scanning = 0; 314 local->scanning = 0;
315 local->scan_chandef.chan = NULL; 315 local->scan_chandef.chan = NULL;
@@ -559,7 +559,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
559 ieee80211_recalc_idle(local); 559 ieee80211_recalc_idle(local);
560 560
561 local->scan_req = NULL; 561 local->scan_req = NULL;
562 rcu_assign_pointer(local->scan_sdata, NULL); 562 RCU_INIT_POINTER(local->scan_sdata, NULL);
563 } 563 }
564 564
565 return rc; 565 return rc;
@@ -773,7 +773,7 @@ void ieee80211_scan_work(struct work_struct *work)
773 int rc; 773 int rc;
774 774
775 local->scan_req = NULL; 775 local->scan_req = NULL;
776 rcu_assign_pointer(local->scan_sdata, NULL); 776 RCU_INIT_POINTER(local->scan_sdata, NULL);
777 777
778 rc = __ieee80211_start_scan(sdata, req); 778 rc = __ieee80211_start_scan(sdata, req);
779 if (rc) { 779 if (rc) {
@@ -1014,7 +1014,7 @@ out_free:
1014 1014
1015 if (ret) { 1015 if (ret) {
1016 /* Clean in case of failure after HW restart or upon resume. */ 1016 /* Clean in case of failure after HW restart or upon resume. */
1017 rcu_assign_pointer(local->sched_scan_sdata, NULL); 1017 RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
1018 local->sched_scan_req = NULL; 1018 local->sched_scan_req = NULL;
1019 } 1019 }
1020 1020
@@ -1076,12 +1076,8 @@ void ieee80211_sched_scan_results(struct ieee80211_hw *hw)
1076} 1076}
1077EXPORT_SYMBOL(ieee80211_sched_scan_results); 1077EXPORT_SYMBOL(ieee80211_sched_scan_results);
1078 1078
1079void ieee80211_sched_scan_stopped_work(struct work_struct *work) 1079void ieee80211_sched_scan_end(struct ieee80211_local *local)
1080{ 1080{
1081 struct ieee80211_local *local =
1082 container_of(work, struct ieee80211_local,
1083 sched_scan_stopped_work);
1084
1085 mutex_lock(&local->mtx); 1081 mutex_lock(&local->mtx);
1086 1082
1087 if (!rcu_access_pointer(local->sched_scan_sdata)) { 1083 if (!rcu_access_pointer(local->sched_scan_sdata)) {
@@ -1089,7 +1085,7 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
1089 return; 1085 return;
1090 } 1086 }
1091 1087
1092 rcu_assign_pointer(local->sched_scan_sdata, NULL); 1088 RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
1093 1089
1094 /* If sched scan was aborted by the driver. */ 1090 /* If sched scan was aborted by the driver. */
1095 local->sched_scan_req = NULL; 1091 local->sched_scan_req = NULL;
@@ -1099,6 +1095,15 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
1099 cfg80211_sched_scan_stopped(local->hw.wiphy); 1095 cfg80211_sched_scan_stopped(local->hw.wiphy);
1100} 1096}
1101 1097
1098void ieee80211_sched_scan_stopped_work(struct work_struct *work)
1099{
1100 struct ieee80211_local *local =
1101 container_of(work, struct ieee80211_local,
1102 sched_scan_stopped_work);
1103
1104 ieee80211_sched_scan_end(local);
1105}
1106
1102void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw) 1107void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
1103{ 1108{
1104 struct ieee80211_local *local = hw_to_local(hw); 1109 struct ieee80211_local *local = hw_to_local(hw);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 847d92f6bef6..a9b46d8ea22f 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -240,6 +240,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
240 240
241 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr); 241 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
242 242
243 kfree(rcu_dereference_raw(sta->sta.rates));
243 kfree(sta); 244 kfree(sta);
244} 245}
245 246
@@ -552,7 +553,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
552int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) 553int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
553{ 554{
554 struct ieee80211_local *local = sta->local; 555 struct ieee80211_local *local = sta->local;
555 int err = 0; 556 int err;
556 557
557 might_sleep(); 558 might_sleep();
558 559
@@ -570,7 +571,6 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
570 571
571 return 0; 572 return 0;
572 out_free: 573 out_free:
573 BUG_ON(!err);
574 sta_info_free(local, sta); 574 sta_info_free(local, sta);
575 return err; 575 return err;
576} 576}
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 60cb7a665976..ba29ebc86141 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -541,6 +541,23 @@ static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
541 */ 541 */
542#define STA_LOST_PKT_THRESHOLD 50 542#define STA_LOST_PKT_THRESHOLD 50
543 543
544static void ieee80211_lost_packet(struct sta_info *sta, struct sk_buff *skb)
545{
546 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
547
548 /* This packet was aggregated but doesn't carry status info */
549 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
550 !(info->flags & IEEE80211_TX_STAT_AMPDU))
551 return;
552
553 if (++sta->lost_packets < STA_LOST_PKT_THRESHOLD)
554 return;
555
556 cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
557 sta->lost_packets, GFP_ATOMIC);
558 sta->lost_packets = 0;
559}
560
544void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) 561void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
545{ 562{
546 struct sk_buff *skb2; 563 struct sk_buff *skb2;
@@ -680,12 +697,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
680 if (info->flags & IEEE80211_TX_STAT_ACK) { 697 if (info->flags & IEEE80211_TX_STAT_ACK) {
681 if (sta->lost_packets) 698 if (sta->lost_packets)
682 sta->lost_packets = 0; 699 sta->lost_packets = 0;
683 } else if (++sta->lost_packets >= STA_LOST_PKT_THRESHOLD) { 700 } else {
684 cfg80211_cqm_pktloss_notify(sta->sdata->dev, 701 ieee80211_lost_packet(sta, skb);
685 sta->sta.addr,
686 sta->lost_packets,
687 GFP_ATOMIC);
688 sta->lost_packets = 0;
689 } 702 }
690 } 703 }
691 704
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
new file mode 100644
index 000000000000..652813b2d3df
--- /dev/null
+++ b/net/mac80211/tdls.c
@@ -0,0 +1,325 @@
1/*
2 * mac80211 TDLS handling code
3 *
4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 * Copyright 2014, Intel Corporation
6 *
7 * This file is GPLv2 as found in COPYING.
8 */
9
10#include <linux/ieee80211.h>
11#include "ieee80211_i.h"
12
13static void ieee80211_tdls_add_ext_capab(struct sk_buff *skb)
14{
15 u8 *pos = (void *)skb_put(skb, 7);
16
17 *pos++ = WLAN_EID_EXT_CAPABILITY;
18 *pos++ = 5; /* len */
19 *pos++ = 0x0;
20 *pos++ = 0x0;
21 *pos++ = 0x0;
22 *pos++ = 0x0;
23 *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
24}
25
26static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata)
27{
28 struct ieee80211_local *local = sdata->local;
29 u16 capab;
30
31 capab = 0;
32 if (ieee80211_get_sdata_band(sdata) != IEEE80211_BAND_2GHZ)
33 return capab;
34
35 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
36 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
37 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
38 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
39
40 return capab;
41}
42
43static void ieee80211_tdls_add_link_ie(struct sk_buff *skb, const u8 *src_addr,
44 const u8 *peer, const u8 *bssid)
45{
46 struct ieee80211_tdls_lnkie *lnkid;
47
48 lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
49
50 lnkid->ie_type = WLAN_EID_LINK_ID;
51 lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
52
53 memcpy(lnkid->bssid, bssid, ETH_ALEN);
54 memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
55 memcpy(lnkid->resp_sta, peer, ETH_ALEN);
56}
57
58static int
59ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
60 const u8 *peer, u8 action_code, u8 dialog_token,
61 u16 status_code, struct sk_buff *skb)
62{
63 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
64 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
65 struct ieee80211_tdls_data *tf;
66
67 tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
68
69 memcpy(tf->da, peer, ETH_ALEN);
70 memcpy(tf->sa, sdata->vif.addr, ETH_ALEN);
71 tf->ether_type = cpu_to_be16(ETH_P_TDLS);
72 tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
73
74 switch (action_code) {
75 case WLAN_TDLS_SETUP_REQUEST:
76 tf->category = WLAN_CATEGORY_TDLS;
77 tf->action_code = WLAN_TDLS_SETUP_REQUEST;
78
79 skb_put(skb, sizeof(tf->u.setup_req));
80 tf->u.setup_req.dialog_token = dialog_token;
81 tf->u.setup_req.capability =
82 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
83
84 ieee80211_add_srates_ie(sdata, skb, false, band);
85 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
86 ieee80211_tdls_add_ext_capab(skb);
87 break;
88 case WLAN_TDLS_SETUP_RESPONSE:
89 tf->category = WLAN_CATEGORY_TDLS;
90 tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
91
92 skb_put(skb, sizeof(tf->u.setup_resp));
93 tf->u.setup_resp.status_code = cpu_to_le16(status_code);
94 tf->u.setup_resp.dialog_token = dialog_token;
95 tf->u.setup_resp.capability =
96 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
97
98 ieee80211_add_srates_ie(sdata, skb, false, band);
99 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
100 ieee80211_tdls_add_ext_capab(skb);
101 break;
102 case WLAN_TDLS_SETUP_CONFIRM:
103 tf->category = WLAN_CATEGORY_TDLS;
104 tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
105
106 skb_put(skb, sizeof(tf->u.setup_cfm));
107 tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
108 tf->u.setup_cfm.dialog_token = dialog_token;
109 break;
110 case WLAN_TDLS_TEARDOWN:
111 tf->category = WLAN_CATEGORY_TDLS;
112 tf->action_code = WLAN_TDLS_TEARDOWN;
113
114 skb_put(skb, sizeof(tf->u.teardown));
115 tf->u.teardown.reason_code = cpu_to_le16(status_code);
116 break;
117 case WLAN_TDLS_DISCOVERY_REQUEST:
118 tf->category = WLAN_CATEGORY_TDLS;
119 tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
120
121 skb_put(skb, sizeof(tf->u.discover_req));
122 tf->u.discover_req.dialog_token = dialog_token;
123 break;
124 default:
125 return -EINVAL;
126 }
127
128 return 0;
129}
130
131static int
132ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
133 const u8 *peer, u8 action_code, u8 dialog_token,
134 u16 status_code, struct sk_buff *skb)
135{
136 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
137 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
138 struct ieee80211_mgmt *mgmt;
139
140 mgmt = (void *)skb_put(skb, 24);
141 memset(mgmt, 0, 24);
142 memcpy(mgmt->da, peer, ETH_ALEN);
143 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
144 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
145
146 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
147 IEEE80211_STYPE_ACTION);
148
149 switch (action_code) {
150 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
151 skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp));
152 mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
153 mgmt->u.action.u.tdls_discover_resp.action_code =
154 WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
155 mgmt->u.action.u.tdls_discover_resp.dialog_token =
156 dialog_token;
157 mgmt->u.action.u.tdls_discover_resp.capability =
158 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
159
160 ieee80211_add_srates_ie(sdata, skb, false, band);
161 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
162 ieee80211_tdls_add_ext_capab(skb);
163 break;
164 default:
165 return -EINVAL;
166 }
167
168 return 0;
169}
170
171int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
172 const u8 *peer, u8 action_code, u8 dialog_token,
173 u16 status_code, u32 peer_capability,
174 const u8 *extra_ies, size_t extra_ies_len)
175{
176 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
177 struct ieee80211_local *local = sdata->local;
178 struct sk_buff *skb = NULL;
179 bool send_direct;
180 int ret;
181
182 if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
183 return -ENOTSUPP;
184
185 /* make sure we are in managed mode, and associated */
186 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
187 !sdata->u.mgd.associated)
188 return -EINVAL;
189
190 tdls_dbg(sdata, "TDLS mgmt action %d peer %pM\n",
191 action_code, peer);
192
193 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
194 max(sizeof(struct ieee80211_mgmt),
195 sizeof(struct ieee80211_tdls_data)) +
196 50 + /* supported rates */
197 7 + /* ext capab */
198 extra_ies_len +
199 sizeof(struct ieee80211_tdls_lnkie));
200 if (!skb)
201 return -ENOMEM;
202
203 skb_reserve(skb, local->hw.extra_tx_headroom);
204
205 switch (action_code) {
206 case WLAN_TDLS_SETUP_REQUEST:
207 case WLAN_TDLS_SETUP_RESPONSE:
208 case WLAN_TDLS_SETUP_CONFIRM:
209 case WLAN_TDLS_TEARDOWN:
210 case WLAN_TDLS_DISCOVERY_REQUEST:
211 ret = ieee80211_prep_tdls_encap_data(wiphy, dev, peer,
212 action_code, dialog_token,
213 status_code, skb);
214 send_direct = false;
215 break;
216 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
217 ret = ieee80211_prep_tdls_direct(wiphy, dev, peer, action_code,
218 dialog_token, status_code,
219 skb);
220 send_direct = true;
221 break;
222 default:
223 ret = -ENOTSUPP;
224 break;
225 }
226
227 if (ret < 0)
228 goto fail;
229
230 if (extra_ies_len)
231 memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
232
233 /* the TDLS link IE is always added last */
234 switch (action_code) {
235 case WLAN_TDLS_SETUP_REQUEST:
236 case WLAN_TDLS_SETUP_CONFIRM:
237 case WLAN_TDLS_TEARDOWN:
238 case WLAN_TDLS_DISCOVERY_REQUEST:
239 /* we are the initiator */
240 ieee80211_tdls_add_link_ie(skb, sdata->vif.addr, peer,
241 sdata->u.mgd.bssid);
242 break;
243 case WLAN_TDLS_SETUP_RESPONSE:
244 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
245 /* we are the responder */
246 ieee80211_tdls_add_link_ie(skb, peer, sdata->vif.addr,
247 sdata->u.mgd.bssid);
248 break;
249 default:
250 ret = -ENOTSUPP;
251 goto fail;
252 }
253
254 if (send_direct) {
255 ieee80211_tx_skb(sdata, skb);
256 return 0;
257 }
258
259 /*
260 * According to 802.11z: Setup req/resp are sent in AC_BK, otherwise
261 * we should default to AC_VI.
262 */
263 switch (action_code) {
264 case WLAN_TDLS_SETUP_REQUEST:
265 case WLAN_TDLS_SETUP_RESPONSE:
266 skb_set_queue_mapping(skb, IEEE80211_AC_BK);
267 skb->priority = 2;
268 break;
269 default:
270 skb_set_queue_mapping(skb, IEEE80211_AC_VI);
271 skb->priority = 5;
272 break;
273 }
274
275 /* disable bottom halves when entering the Tx path */
276 local_bh_disable();
277 ret = ieee80211_subif_start_xmit(skb, dev);
278 local_bh_enable();
279
280 return ret;
281
282fail:
283 dev_kfree_skb(skb);
284 return ret;
285}
286
287int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
288 const u8 *peer, enum nl80211_tdls_operation oper)
289{
290 struct sta_info *sta;
291 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
292
293 if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
294 return -ENOTSUPP;
295
296 if (sdata->vif.type != NL80211_IFTYPE_STATION)
297 return -EINVAL;
298
299 tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
300
301 switch (oper) {
302 case NL80211_TDLS_ENABLE_LINK:
303 rcu_read_lock();
304 sta = sta_info_get(sdata, peer);
305 if (!sta) {
306 rcu_read_unlock();
307 return -ENOLINK;
308 }
309
310 set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
311 rcu_read_unlock();
312 break;
313 case NL80211_TDLS_DISABLE_LINK:
314 return sta_info_destroy_addr(sdata, peer);
315 case NL80211_TDLS_TEARDOWN:
316 case NL80211_TDLS_SETUP:
317 case NL80211_TDLS_DISCOVERY_REQ:
318 /* We don't support in-driver setup/teardown/discovery */
319 return -ENOTSUPP;
320 default:
321 return -ENOTSUPP;
322 }
323
324 return 0;
325}
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index cec5b60487a4..cfe1a0688b5c 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -184,6 +184,20 @@ TRACE_EVENT(drv_return_bool,
184 "true" : "false") 184 "true" : "false")
185); 185);
186 186
187TRACE_EVENT(drv_return_u32,
188 TP_PROTO(struct ieee80211_local *local, u32 ret),
189 TP_ARGS(local, ret),
190 TP_STRUCT__entry(
191 LOCAL_ENTRY
192 __field(u32, ret)
193 ),
194 TP_fast_assign(
195 LOCAL_ASSIGN;
196 __entry->ret = ret;
197 ),
198 TP_printk(LOCAL_PR_FMT " - %u", LOCAL_PR_ARG, __entry->ret)
199);
200
187TRACE_EVENT(drv_return_u64, 201TRACE_EVENT(drv_return_u64,
188 TP_PROTO(struct ieee80211_local *local, u64 ret), 202 TP_PROTO(struct ieee80211_local *local, u64 ret),
189 TP_ARGS(local, ret), 203 TP_ARGS(local, ret),
@@ -1375,6 +1389,91 @@ TRACE_EVENT(drv_change_chanctx,
1375 ) 1389 )
1376); 1390);
1377 1391
1392#if !defined(__TRACE_VIF_ENTRY)
1393#define __TRACE_VIF_ENTRY
1394struct trace_vif_entry {
1395 enum nl80211_iftype vif_type;
1396 bool p2p;
1397 char vif_name[IFNAMSIZ];
1398} __packed;
1399
1400struct trace_chandef_entry {
1401 u32 control_freq;
1402 u32 chan_width;
1403 u32 center_freq1;
1404 u32 center_freq2;
1405} __packed;
1406
1407struct trace_switch_entry {
1408 struct trace_vif_entry vif;
1409 struct trace_chandef_entry old_chandef;
1410 struct trace_chandef_entry new_chandef;
1411} __packed;
1412
1413#define SWITCH_ENTRY_ASSIGN(to, from) local_vifs[i].to = vifs[i].from
1414#endif
1415
1416TRACE_EVENT(drv_switch_vif_chanctx,
1417 TP_PROTO(struct ieee80211_local *local,
1418 struct ieee80211_vif_chanctx_switch *vifs,
1419 int n_vifs, enum ieee80211_chanctx_switch_mode mode),
1420 TP_ARGS(local, vifs, n_vifs, mode),
1421
1422 TP_STRUCT__entry(
1423 LOCAL_ENTRY
1424 __field(int, n_vifs)
1425 __field(u32, mode)
1426 __dynamic_array(u8, vifs,
1427 sizeof(struct trace_switch_entry) * n_vifs)
1428 ),
1429
1430 TP_fast_assign(
1431 LOCAL_ASSIGN;
1432 __entry->n_vifs = n_vifs;
1433 __entry->mode = mode;
1434 {
1435 struct trace_switch_entry *local_vifs =
1436 __get_dynamic_array(vifs);
1437 int i;
1438
1439 for (i = 0; i < n_vifs; i++) {
1440 struct ieee80211_sub_if_data *sdata;
1441
1442 sdata = container_of(vifs[i].vif,
1443 struct ieee80211_sub_if_data,
1444 vif);
1445
1446 SWITCH_ENTRY_ASSIGN(vif.vif_type, vif->type);
1447 SWITCH_ENTRY_ASSIGN(vif.p2p, vif->p2p);
1448 strncpy(local_vifs[i].vif.vif_name,
1449 sdata->name,
1450 sizeof(local_vifs[i].vif.vif_name));
1451 SWITCH_ENTRY_ASSIGN(old_chandef.control_freq,
1452 old_ctx->def.chan->center_freq);
1453 SWITCH_ENTRY_ASSIGN(old_chandef.chan_width,
1454 old_ctx->def.width);
1455 SWITCH_ENTRY_ASSIGN(old_chandef.center_freq1,
1456 old_ctx->def.center_freq1);
1457 SWITCH_ENTRY_ASSIGN(old_chandef.center_freq2,
1458 old_ctx->def.center_freq2);
1459 SWITCH_ENTRY_ASSIGN(new_chandef.control_freq,
1460 new_ctx->def.chan->center_freq);
1461 SWITCH_ENTRY_ASSIGN(new_chandef.chan_width,
1462 new_ctx->def.width);
1463 SWITCH_ENTRY_ASSIGN(new_chandef.center_freq1,
1464 new_ctx->def.center_freq1);
1465 SWITCH_ENTRY_ASSIGN(new_chandef.center_freq2,
1466 new_ctx->def.center_freq2);
1467 }
1468 }
1469 ),
1470
1471 TP_printk(
1472 LOCAL_PR_FMT " n_vifs:%d mode:%d",
1473 LOCAL_PR_ARG, __entry->n_vifs, __entry->mode
1474 )
1475);
1476
1378DECLARE_EVENT_CLASS(local_sdata_chanctx, 1477DECLARE_EVENT_CLASS(local_sdata_chanctx,
1379 TP_PROTO(struct ieee80211_local *local, 1478 TP_PROTO(struct ieee80211_local *local,
1380 struct ieee80211_sub_if_data *sdata, 1479 struct ieee80211_sub_if_data *sdata,
@@ -1499,6 +1598,24 @@ DEFINE_EVENT(local_sdata_evt, drv_leave_ibss,
1499 TP_ARGS(local, sdata) 1598 TP_ARGS(local, sdata)
1500); 1599);
1501 1600
1601TRACE_EVENT(drv_get_expected_throughput,
1602 TP_PROTO(struct ieee80211_sta *sta),
1603
1604 TP_ARGS(sta),
1605
1606 TP_STRUCT__entry(
1607 STA_ENTRY
1608 ),
1609
1610 TP_fast_assign(
1611 STA_ASSIGN;
1612 ),
1613
1614 TP_printk(
1615 STA_PR_FMT, STA_PR_ARG
1616 )
1617);
1618
1502/* 1619/*
1503 * Tracing for API calls that drivers call. 1620 * Tracing for API calls that drivers call.
1504 */ 1621 */
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 19d36d4117e0..5214686d9fd1 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2328,7 +2328,8 @@ void ieee80211_tx_pending(unsigned long data)
2328/* functions for drivers to get certain frames */ 2328/* functions for drivers to get certain frames */
2329 2329
2330static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, 2330static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2331 struct ps_data *ps, struct sk_buff *skb) 2331 struct ps_data *ps, struct sk_buff *skb,
2332 bool is_template)
2332{ 2333{
2333 u8 *pos, *tim; 2334 u8 *pos, *tim;
2334 int aid0 = 0; 2335 int aid0 = 0;
@@ -2341,11 +2342,12 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2341 * checking byte-for-byte */ 2342 * checking byte-for-byte */
2342 have_bits = !bitmap_empty((unsigned long *)ps->tim, 2343 have_bits = !bitmap_empty((unsigned long *)ps->tim,
2343 IEEE80211_MAX_AID+1); 2344 IEEE80211_MAX_AID+1);
2344 2345 if (!is_template) {
2345 if (ps->dtim_count == 0) 2346 if (ps->dtim_count == 0)
2346 ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1; 2347 ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
2347 else 2348 else
2348 ps->dtim_count--; 2349 ps->dtim_count--;
2350 }
2349 2351
2350 tim = pos = (u8 *) skb_put(skb, 6); 2352 tim = pos = (u8 *) skb_put(skb, 6);
2351 *pos++ = WLAN_EID_TIM; 2353 *pos++ = WLAN_EID_TIM;
@@ -2391,7 +2393,8 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2391} 2393}
2392 2394
2393static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, 2395static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2394 struct ps_data *ps, struct sk_buff *skb) 2396 struct ps_data *ps, struct sk_buff *skb,
2397 bool is_template)
2395{ 2398{
2396 struct ieee80211_local *local = sdata->local; 2399 struct ieee80211_local *local = sdata->local;
2397 2400
@@ -2403,24 +2406,24 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2403 * of the tim bitmap in mac80211 and the driver. 2406 * of the tim bitmap in mac80211 and the driver.
2404 */ 2407 */
2405 if (local->tim_in_locked_section) { 2408 if (local->tim_in_locked_section) {
2406 __ieee80211_beacon_add_tim(sdata, ps, skb); 2409 __ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
2407 } else { 2410 } else {
2408 spin_lock_bh(&local->tim_lock); 2411 spin_lock_bh(&local->tim_lock);
2409 __ieee80211_beacon_add_tim(sdata, ps, skb); 2412 __ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
2410 spin_unlock_bh(&local->tim_lock); 2413 spin_unlock_bh(&local->tim_lock);
2411 } 2414 }
2412 2415
2413 return 0; 2416 return 0;
2414} 2417}
2415 2418
2416static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata, 2419static void ieee80211_set_csa(struct ieee80211_sub_if_data *sdata,
2417 struct beacon_data *beacon) 2420 struct beacon_data *beacon)
2418{ 2421{
2419 struct probe_resp *resp; 2422 struct probe_resp *resp;
2420 int counter_offset_beacon = sdata->csa_counter_offset_beacon;
2421 int counter_offset_presp = sdata->csa_counter_offset_presp;
2422 u8 *beacon_data; 2423 u8 *beacon_data;
2423 size_t beacon_data_len; 2424 size_t beacon_data_len;
2425 int i;
2426 u8 count = sdata->csa_current_counter;
2424 2427
2425 switch (sdata->vif.type) { 2428 switch (sdata->vif.type) {
2426 case NL80211_IFTYPE_AP: 2429 case NL80211_IFTYPE_AP:
@@ -2438,40 +2441,57 @@ static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
2438 default: 2441 default:
2439 return; 2442 return;
2440 } 2443 }
2441 if (WARN_ON(counter_offset_beacon >= beacon_data_len))
2442 return;
2443 2444
2444 /* Warn if the driver did not check for/react to csa 2445 for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; ++i) {
2445 * completeness. A beacon with CSA counter set to 0 should 2446 u16 counter_offset_beacon =
2446 * never occur, because a counter of 1 means switch just 2447 sdata->csa_counter_offset_beacon[i];
2447 * before the next beacon. 2448 u16 counter_offset_presp = sdata->csa_counter_offset_presp[i];
2448 */
2449 if (WARN_ON(beacon_data[counter_offset_beacon] == 1))
2450 return;
2451 2449
2452 beacon_data[counter_offset_beacon]--; 2450 if (counter_offset_beacon) {
2451 if (WARN_ON(counter_offset_beacon >= beacon_data_len))
2452 return;
2453 2453
2454 if (sdata->vif.type == NL80211_IFTYPE_AP && counter_offset_presp) { 2454 beacon_data[counter_offset_beacon] = count;
2455 rcu_read_lock(); 2455 }
2456 resp = rcu_dereference(sdata->u.ap.probe_resp); 2456
2457 if (sdata->vif.type == NL80211_IFTYPE_AP &&
2458 counter_offset_presp) {
2459 rcu_read_lock();
2460 resp = rcu_dereference(sdata->u.ap.probe_resp);
2457 2461
2458 /* if nl80211 accepted the offset, this should not happen. */ 2462 /* If nl80211 accepted the offset, this should
2459 if (WARN_ON(!resp)) { 2463 * not happen.
2464 */
2465 if (WARN_ON(!resp)) {
2466 rcu_read_unlock();
2467 return;
2468 }
2469 resp->data[counter_offset_presp] = count;
2460 rcu_read_unlock(); 2470 rcu_read_unlock();
2461 return;
2462 } 2471 }
2463 resp->data[counter_offset_presp]--;
2464 rcu_read_unlock();
2465 } 2472 }
2466} 2473}
2467 2474
2475u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
2476{
2477 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
2478
2479 sdata->csa_current_counter--;
2480
2481 /* the counter should never reach 0 */
2482 WARN_ON(!sdata->csa_current_counter);
2483
2484 return sdata->csa_current_counter;
2485}
2486EXPORT_SYMBOL(ieee80211_csa_update_counter);
2487
2468bool ieee80211_csa_is_complete(struct ieee80211_vif *vif) 2488bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
2469{ 2489{
2470 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 2490 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
2471 struct beacon_data *beacon = NULL; 2491 struct beacon_data *beacon = NULL;
2472 u8 *beacon_data; 2492 u8 *beacon_data;
2473 size_t beacon_data_len; 2493 size_t beacon_data_len;
2474 int counter_beacon = sdata->csa_counter_offset_beacon; 2494 int counter_beacon = sdata->csa_counter_offset_beacon[0];
2475 int ret = false; 2495 int ret = false;
2476 2496
2477 if (!ieee80211_sdata_running(sdata)) 2497 if (!ieee80211_sdata_running(sdata))
@@ -2521,9 +2541,11 @@ bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
2521} 2541}
2522EXPORT_SYMBOL(ieee80211_csa_is_complete); 2542EXPORT_SYMBOL(ieee80211_csa_is_complete);
2523 2543
2524struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, 2544static struct sk_buff *
2525 struct ieee80211_vif *vif, 2545__ieee80211_beacon_get(struct ieee80211_hw *hw,
2526 u16 *tim_offset, u16 *tim_length) 2546 struct ieee80211_vif *vif,
2547 struct ieee80211_mutable_offsets *offs,
2548 bool is_template)
2527{ 2549{
2528 struct ieee80211_local *local = hw_to_local(hw); 2550 struct ieee80211_local *local = hw_to_local(hw);
2529 struct sk_buff *skb = NULL; 2551 struct sk_buff *skb = NULL;
@@ -2532,6 +2554,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2532 enum ieee80211_band band; 2554 enum ieee80211_band band;
2533 struct ieee80211_tx_rate_control txrc; 2555 struct ieee80211_tx_rate_control txrc;
2534 struct ieee80211_chanctx_conf *chanctx_conf; 2556 struct ieee80211_chanctx_conf *chanctx_conf;
2557 int csa_off_base = 0;
2535 2558
2536 rcu_read_lock(); 2559 rcu_read_lock();
2537 2560
@@ -2541,18 +2564,20 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2541 if (!ieee80211_sdata_running(sdata) || !chanctx_conf) 2564 if (!ieee80211_sdata_running(sdata) || !chanctx_conf)
2542 goto out; 2565 goto out;
2543 2566
2544 if (tim_offset) 2567 if (offs)
2545 *tim_offset = 0; 2568 memset(offs, 0, sizeof(*offs));
2546 if (tim_length)
2547 *tim_length = 0;
2548 2569
2549 if (sdata->vif.type == NL80211_IFTYPE_AP) { 2570 if (sdata->vif.type == NL80211_IFTYPE_AP) {
2550 struct ieee80211_if_ap *ap = &sdata->u.ap; 2571 struct ieee80211_if_ap *ap = &sdata->u.ap;
2551 struct beacon_data *beacon = rcu_dereference(ap->beacon); 2572 struct beacon_data *beacon = rcu_dereference(ap->beacon);
2552 2573
2553 if (beacon) { 2574 if (beacon) {
2554 if (sdata->vif.csa_active) 2575 if (sdata->vif.csa_active) {
2555 ieee80211_update_csa(sdata, beacon); 2576 if (!is_template)
2577 ieee80211_csa_update_counter(vif);
2578
2579 ieee80211_set_csa(sdata, beacon);
2580 }
2556 2581
2557 /* 2582 /*
2558 * headroom, head length, 2583 * headroom, head length,
@@ -2569,12 +2594,16 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2569 memcpy(skb_put(skb, beacon->head_len), beacon->head, 2594 memcpy(skb_put(skb, beacon->head_len), beacon->head,
2570 beacon->head_len); 2595 beacon->head_len);
2571 2596
2572 ieee80211_beacon_add_tim(sdata, &ap->ps, skb); 2597 ieee80211_beacon_add_tim(sdata, &ap->ps, skb,
2598 is_template);
2573 2599
2574 if (tim_offset) 2600 if (offs) {
2575 *tim_offset = beacon->head_len; 2601 offs->tim_offset = beacon->head_len;
2576 if (tim_length) 2602 offs->tim_length = skb->len - beacon->head_len;
2577 *tim_length = skb->len - beacon->head_len; 2603
2604 /* for AP the csa offsets are from tail */
2605 csa_off_base = skb->len;
2606 }
2578 2607
2579 if (beacon->tail) 2608 if (beacon->tail)
2580 memcpy(skb_put(skb, beacon->tail_len), 2609 memcpy(skb_put(skb, beacon->tail_len),
@@ -2589,9 +2618,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2589 if (!presp) 2618 if (!presp)
2590 goto out; 2619 goto out;
2591 2620
2592 if (sdata->vif.csa_active) 2621 if (sdata->vif.csa_active) {
2593 ieee80211_update_csa(sdata, presp); 2622 if (!is_template)
2623 ieee80211_csa_update_counter(vif);
2594 2624
2625 ieee80211_set_csa(sdata, presp);
2626 }
2595 2627
2596 skb = dev_alloc_skb(local->tx_headroom + presp->head_len + 2628 skb = dev_alloc_skb(local->tx_headroom + presp->head_len +
2597 local->hw.extra_beacon_tailroom); 2629 local->hw.extra_beacon_tailroom);
@@ -2611,8 +2643,17 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2611 if (!bcn) 2643 if (!bcn)
2612 goto out; 2644 goto out;
2613 2645
2614 if (sdata->vif.csa_active) 2646 if (sdata->vif.csa_active) {
2615 ieee80211_update_csa(sdata, bcn); 2647 if (!is_template)
2648 /* TODO: For mesh csa_counter is in TU, so
2649 * decrementing it by one isn't correct, but
2650 * for now we leave it consistent with overall
2651 * mac80211's behavior.
2652 */
2653 ieee80211_csa_update_counter(vif);
2654
2655 ieee80211_set_csa(sdata, bcn);
2656 }
2616 2657
2617 if (ifmsh->sync_ops) 2658 if (ifmsh->sync_ops)
2618 ifmsh->sync_ops->adjust_tbtt(sdata, bcn); 2659 ifmsh->sync_ops->adjust_tbtt(sdata, bcn);
@@ -2626,13 +2667,33 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2626 goto out; 2667 goto out;
2627 skb_reserve(skb, local->tx_headroom); 2668 skb_reserve(skb, local->tx_headroom);
2628 memcpy(skb_put(skb, bcn->head_len), bcn->head, bcn->head_len); 2669 memcpy(skb_put(skb, bcn->head_len), bcn->head, bcn->head_len);
2629 ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb); 2670 ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template);
2671
2672 if (offs) {
2673 offs->tim_offset = bcn->head_len;
2674 offs->tim_length = skb->len - bcn->head_len;
2675 }
2676
2630 memcpy(skb_put(skb, bcn->tail_len), bcn->tail, bcn->tail_len); 2677 memcpy(skb_put(skb, bcn->tail_len), bcn->tail, bcn->tail_len);
2631 } else { 2678 } else {
2632 WARN_ON(1); 2679 WARN_ON(1);
2633 goto out; 2680 goto out;
2634 } 2681 }
2635 2682
2683 /* CSA offsets */
2684 if (offs) {
2685 int i;
2686
2687 for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; i++) {
2688 u16 csa_off = sdata->csa_counter_offset_beacon[i];
2689
2690 if (!csa_off)
2691 continue;
2692
2693 offs->csa_counter_offs[i] = csa_off_base + csa_off;
2694 }
2695 }
2696
2636 band = chanctx_conf->def.chan->band; 2697 band = chanctx_conf->def.chan->band;
2637 2698
2638 info = IEEE80211_SKB_CB(skb); 2699 info = IEEE80211_SKB_CB(skb);
@@ -2663,6 +2724,32 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2663 out: 2724 out:
2664 rcu_read_unlock(); 2725 rcu_read_unlock();
2665 return skb; 2726 return skb;
2727
2728}
2729
2730struct sk_buff *
2731ieee80211_beacon_get_template(struct ieee80211_hw *hw,
2732 struct ieee80211_vif *vif,
2733 struct ieee80211_mutable_offsets *offs)
2734{
2735 return __ieee80211_beacon_get(hw, vif, offs, true);
2736}
2737EXPORT_SYMBOL(ieee80211_beacon_get_template);
2738
2739struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2740 struct ieee80211_vif *vif,
2741 u16 *tim_offset, u16 *tim_length)
2742{
2743 struct ieee80211_mutable_offsets offs = {};
2744 struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false);
2745
2746 if (tim_offset)
2747 *tim_offset = offs.tim_offset;
2748
2749 if (tim_length)
2750 *tim_length = offs.tim_length;
2751
2752 return bcn;
2666} 2753}
2667EXPORT_SYMBOL(ieee80211_beacon_get_tim); 2754EXPORT_SYMBOL(ieee80211_beacon_get_tim);
2668 2755
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 3c365837e910..6886601afe1c 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -554,7 +554,7 @@ void ieee80211_flush_queues(struct ieee80211_local *local,
554 ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, 554 ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
555 IEEE80211_QUEUE_STOP_REASON_FLUSH); 555 IEEE80211_QUEUE_STOP_REASON_FLUSH);
556 556
557 drv_flush(local, queues, false); 557 drv_flush(local, sdata, queues, false);
558 558
559 ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, 559 ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
560 IEEE80211_QUEUE_STOP_REASON_FLUSH); 560 IEEE80211_QUEUE_STOP_REASON_FLUSH);
@@ -1457,6 +1457,44 @@ void ieee80211_stop_device(struct ieee80211_local *local)
1457 drv_stop(local); 1457 drv_stop(local);
1458} 1458}
1459 1459
1460static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
1461{
1462 struct ieee80211_sub_if_data *sdata;
1463 struct ieee80211_chanctx *ctx;
1464
1465 /*
1466 * We get here if during resume the device can't be restarted properly.
1467 * We might also get here if this happens during HW reset, which is a
1468 * slightly different situation and we need to drop all connections in
1469 * the latter case.
1470 *
1471 * Ask cfg80211 to turn off all interfaces, this will result in more
1472 * warnings but at least we'll then get into a clean stopped state.
1473 */
1474
1475 local->resuming = false;
1476 local->suspended = false;
1477 local->started = false;
1478
1479 /* scheduled scan clearly can't be running any more, but tell
1480 * cfg80211 and clear local state
1481 */
1482 ieee80211_sched_scan_end(local);
1483
1484 list_for_each_entry(sdata, &local->interfaces, list)
1485 sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
1486
1487 /* Mark channel contexts as not being in the driver any more to avoid
1488 * removing them from the driver during the shutdown process...
1489 */
1490 mutex_lock(&local->chanctx_mtx);
1491 list_for_each_entry(ctx, &local->chanctx_list, list)
1492 ctx->driver_present = false;
1493 mutex_unlock(&local->chanctx_mtx);
1494
1495 cfg80211_shutdown_all_interfaces(local->hw.wiphy);
1496}
1497
1460static void ieee80211_assign_chanctx(struct ieee80211_local *local, 1498static void ieee80211_assign_chanctx(struct ieee80211_local *local,
1461 struct ieee80211_sub_if_data *sdata) 1499 struct ieee80211_sub_if_data *sdata)
1462{ 1500{
@@ -1520,9 +1558,11 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1520 */ 1558 */
1521 res = drv_start(local); 1559 res = drv_start(local);
1522 if (res) { 1560 if (res) {
1523 WARN(local->suspended, "Hardware became unavailable " 1561 if (local->suspended)
1524 "upon resume. This could be a software issue " 1562 WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n");
1525 "prior to suspend or a hardware issue.\n"); 1563 else
1564 WARN(1, "Hardware became unavailable during restart.\n");
1565 ieee80211_handle_reconfig_failure(local);
1526 return res; 1566 return res;
1527 } 1567 }
1528 1568
@@ -1546,7 +1586,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1546 WARN_ON(local->resuming); 1586 WARN_ON(local->resuming);
1547 res = drv_add_interface(local, sdata); 1587 res = drv_add_interface(local, sdata);
1548 if (WARN_ON(res)) { 1588 if (WARN_ON(res)) {
1549 rcu_assign_pointer(local->monitor_sdata, NULL); 1589 RCU_INIT_POINTER(local->monitor_sdata, NULL);
1550 synchronize_net(); 1590 synchronize_net();
1551 kfree(sdata); 1591 kfree(sdata);
1552 } 1592 }
@@ -1565,17 +1605,17 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1565 list_for_each_entry(ctx, &local->chanctx_list, list) 1605 list_for_each_entry(ctx, &local->chanctx_list, list)
1566 WARN_ON(drv_add_chanctx(local, ctx)); 1606 WARN_ON(drv_add_chanctx(local, ctx));
1567 mutex_unlock(&local->chanctx_mtx); 1607 mutex_unlock(&local->chanctx_mtx);
1568 }
1569 1608
1570 list_for_each_entry(sdata, &local->interfaces, list) { 1609 list_for_each_entry(sdata, &local->interfaces, list) {
1571 if (!ieee80211_sdata_running(sdata)) 1610 if (!ieee80211_sdata_running(sdata))
1572 continue; 1611 continue;
1573 ieee80211_assign_chanctx(local, sdata); 1612 ieee80211_assign_chanctx(local, sdata);
1574 } 1613 }
1575 1614
1576 sdata = rtnl_dereference(local->monitor_sdata); 1615 sdata = rtnl_dereference(local->monitor_sdata);
1577 if (sdata && ieee80211_sdata_running(sdata)) 1616 if (sdata && ieee80211_sdata_running(sdata))
1578 ieee80211_assign_chanctx(local, sdata); 1617 ieee80211_assign_chanctx(local, sdata);
1618 }
1579 1619
1580 /* add STAs back */ 1620 /* add STAs back */
1581 mutex_lock(&local->sta_mtx); 1621 mutex_lock(&local->sta_mtx);
@@ -1671,13 +1711,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1671 } 1711 }
1672 break; 1712 break;
1673 case NL80211_IFTYPE_WDS: 1713 case NL80211_IFTYPE_WDS:
1674 break;
1675 case NL80211_IFTYPE_AP_VLAN: 1714 case NL80211_IFTYPE_AP_VLAN:
1676 case NL80211_IFTYPE_MONITOR: 1715 case NL80211_IFTYPE_MONITOR:
1677 /* ignore virtual */
1678 break;
1679 case NL80211_IFTYPE_P2P_DEVICE: 1716 case NL80211_IFTYPE_P2P_DEVICE:
1680 changed = BSS_CHANGED_IDLE; 1717 /* nothing to do */
1681 break; 1718 break;
1682 case NL80211_IFTYPE_UNSPECIFIED: 1719 case NL80211_IFTYPE_UNSPECIFIED:
1683 case NUM_NL80211_IFTYPES: 1720 case NUM_NL80211_IFTYPES:
@@ -2797,3 +2834,121 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local,
2797 2834
2798 ps->dtim_count = dtim_count; 2835 ps->dtim_count = dtim_count;
2799} 2836}
2837
2838int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
2839 const struct cfg80211_chan_def *chandef,
2840 enum ieee80211_chanctx_mode chanmode,
2841 u8 radar_detect)
2842{
2843 struct ieee80211_local *local = sdata->local;
2844 struct ieee80211_sub_if_data *sdata_iter;
2845 enum nl80211_iftype iftype = sdata->wdev.iftype;
2846 int num[NUM_NL80211_IFTYPES];
2847 struct ieee80211_chanctx *ctx;
2848 int num_different_channels = 0;
2849 int total = 1;
2850
2851 lockdep_assert_held(&local->chanctx_mtx);
2852
2853 if (WARN_ON(hweight32(radar_detect) > 1))
2854 return -EINVAL;
2855
2856 if (WARN_ON(chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
2857 !chandef->chan))
2858 return -EINVAL;
2859
2860 if (chandef)
2861 num_different_channels = 1;
2862
2863 if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
2864 return -EINVAL;
2865
2866 /* Always allow software iftypes */
2867 if (local->hw.wiphy->software_iftypes & BIT(iftype)) {
2868 if (radar_detect)
2869 return -EINVAL;
2870 return 0;
2871 }
2872
2873 memset(num, 0, sizeof(num));
2874
2875 if (iftype != NL80211_IFTYPE_UNSPECIFIED)
2876 num[iftype] = 1;
2877
2878 list_for_each_entry(ctx, &local->chanctx_list, list) {
2879 if (ctx->conf.radar_enabled)
2880 radar_detect |= BIT(ctx->conf.def.width);
2881 if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) {
2882 num_different_channels++;
2883 continue;
2884 }
2885 if (chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
2886 cfg80211_chandef_compatible(chandef,
2887 &ctx->conf.def))
2888 continue;
2889 num_different_channels++;
2890 }
2891
2892 list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) {
2893 struct wireless_dev *wdev_iter;
2894
2895 wdev_iter = &sdata_iter->wdev;
2896
2897 if (sdata_iter == sdata ||
2898 rcu_access_pointer(sdata_iter->vif.chanctx_conf) == NULL ||
2899 local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
2900 continue;
2901
2902 num[wdev_iter->iftype]++;
2903 total++;
2904 }
2905
2906 if (total == 1 && !radar_detect)
2907 return 0;
2908
2909 return cfg80211_check_combinations(local->hw.wiphy,
2910 num_different_channels,
2911 radar_detect, num);
2912}
2913
2914static void
2915ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c,
2916 void *data)
2917{
2918 u32 *max_num_different_channels = data;
2919
2920 *max_num_different_channels = max(*max_num_different_channels,
2921 c->num_different_channels);
2922}
2923
2924int ieee80211_max_num_channels(struct ieee80211_local *local)
2925{
2926 struct ieee80211_sub_if_data *sdata;
2927 int num[NUM_NL80211_IFTYPES] = {};
2928 struct ieee80211_chanctx *ctx;
2929 int num_different_channels = 0;
2930 u8 radar_detect = 0;
2931 u32 max_num_different_channels = 1;
2932 int err;
2933
2934 lockdep_assert_held(&local->chanctx_mtx);
2935
2936 list_for_each_entry(ctx, &local->chanctx_list, list) {
2937 num_different_channels++;
2938
2939 if (ctx->conf.radar_enabled)
2940 radar_detect |= BIT(ctx->conf.def.width);
2941 }
2942
2943 list_for_each_entry_rcu(sdata, &local->interfaces, list)
2944 num[sdata->wdev.iftype]++;
2945
2946 err = cfg80211_iter_combinations(local->hw.wiphy,
2947 num_different_channels, radar_detect,
2948 num, ieee80211_iter_max_chans,
2949 &max_num_different_channels);
2950 if (err < 0)
2951 return err;
2952
2953 return max_num_different_channels;
2954}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index b8600e3c29c8..9b3dcc201145 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -406,7 +406,10 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
406 406
407 if (info->control.hw_key && 407 if (info->control.hw_key &&
408 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) && 408 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
409 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) { 409 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
410 !((info->control.hw_key->flags &
411 IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) &&
412 ieee80211_is_mgmt(hdr->frame_control))) {
410 /* 413 /*
411 * hwaccel has no need for preallocated room for CCMP 414 * hwaccel has no need for preallocated room for CCMP
412 * header or MIC fields 415 * header or MIC fields
diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig
index b33dd76d4307..1818a99b3081 100644
--- a/net/mac802154/Kconfig
+++ b/net/mac802154/Kconfig
@@ -2,6 +2,10 @@ config MAC802154
2 tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)" 2 tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)"
3 depends on IEEE802154 3 depends on IEEE802154
4 select CRC_CCITT 4 select CRC_CCITT
5 select CRYPTO_AUTHENC
6 select CRYPTO_CCM
7 select CRYPTO_CTR
8 select CRYPTO_AES
5 ---help--- 9 ---help---
6 This option enables the hardware independent IEEE 802.15.4 10 This option enables the hardware independent IEEE 802.15.4
7 networking stack for SoftMAC devices (the ones implementing 11 networking stack for SoftMAC devices (the ones implementing
diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile
index 15d62df52182..9723d6f3f3e5 100644
--- a/net/mac802154/Makefile
+++ b/net/mac802154/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_MAC802154) += mac802154.o 1obj-$(CONFIG_MAC802154) += mac802154.o
2mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o wpan.o 2mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o \
3 monitor.o wpan.o llsec.o
3 4
4ccflags-y += -D__CHECK_ENDIAN__ 5ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
new file mode 100644
index 000000000000..1456f73b02b9
--- /dev/null
+++ b/net/mac802154/llsec.c
@@ -0,0 +1,1070 @@
1/*
2 * Copyright (C) 2014 Fraunhofer ITWM
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Written by:
14 * Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
15 */
16
17#include <linux/err.h>
18#include <linux/bug.h>
19#include <linux/completion.h>
20#include <net/ieee802154.h>
21#include <crypto/algapi.h>
22
23#include "mac802154.h"
24#include "llsec.h"
25
26static void llsec_key_put(struct mac802154_llsec_key *key);
27static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
28 const struct ieee802154_llsec_key_id *b);
29
30static void llsec_dev_free(struct mac802154_llsec_device *dev);
31
32void mac802154_llsec_init(struct mac802154_llsec *sec)
33{
34 memset(sec, 0, sizeof(*sec));
35
36 memset(&sec->params.default_key_source, 0xFF, IEEE802154_ADDR_LEN);
37
38 INIT_LIST_HEAD(&sec->table.security_levels);
39 INIT_LIST_HEAD(&sec->table.devices);
40 INIT_LIST_HEAD(&sec->table.keys);
41 hash_init(sec->devices_short);
42 hash_init(sec->devices_hw);
43 rwlock_init(&sec->lock);
44}
45
46void mac802154_llsec_destroy(struct mac802154_llsec *sec)
47{
48 struct ieee802154_llsec_seclevel *sl, *sn;
49 struct ieee802154_llsec_device *dev, *dn;
50 struct ieee802154_llsec_key_entry *key, *kn;
51
52 list_for_each_entry_safe(sl, sn, &sec->table.security_levels, list) {
53 struct mac802154_llsec_seclevel *msl;
54
55 msl = container_of(sl, struct mac802154_llsec_seclevel, level);
56 list_del(&sl->list);
57 kfree(msl);
58 }
59
60 list_for_each_entry_safe(dev, dn, &sec->table.devices, list) {
61 struct mac802154_llsec_device *mdev;
62
63 mdev = container_of(dev, struct mac802154_llsec_device, dev);
64 list_del(&dev->list);
65 llsec_dev_free(mdev);
66 }
67
68 list_for_each_entry_safe(key, kn, &sec->table.keys, list) {
69 struct mac802154_llsec_key *mkey;
70
71 mkey = container_of(key->key, struct mac802154_llsec_key, key);
72 list_del(&key->list);
73 llsec_key_put(mkey);
74 kfree(key);
75 }
76}
77
78
79
80int mac802154_llsec_get_params(struct mac802154_llsec *sec,
81 struct ieee802154_llsec_params *params)
82{
83 read_lock_bh(&sec->lock);
84 *params = sec->params;
85 read_unlock_bh(&sec->lock);
86
87 return 0;
88}
89
90int mac802154_llsec_set_params(struct mac802154_llsec *sec,
91 const struct ieee802154_llsec_params *params,
92 int changed)
93{
94 write_lock_bh(&sec->lock);
95
96 if (changed & IEEE802154_LLSEC_PARAM_ENABLED)
97 sec->params.enabled = params->enabled;
98 if (changed & IEEE802154_LLSEC_PARAM_FRAME_COUNTER)
99 sec->params.frame_counter = params->frame_counter;
100 if (changed & IEEE802154_LLSEC_PARAM_OUT_LEVEL)
101 sec->params.out_level = params->out_level;
102 if (changed & IEEE802154_LLSEC_PARAM_OUT_KEY)
103 sec->params.out_key = params->out_key;
104 if (changed & IEEE802154_LLSEC_PARAM_KEY_SOURCE)
105 sec->params.default_key_source = params->default_key_source;
106 if (changed & IEEE802154_LLSEC_PARAM_PAN_ID)
107 sec->params.pan_id = params->pan_id;
108 if (changed & IEEE802154_LLSEC_PARAM_HWADDR)
109 sec->params.hwaddr = params->hwaddr;
110 if (changed & IEEE802154_LLSEC_PARAM_COORD_HWADDR)
111 sec->params.coord_hwaddr = params->coord_hwaddr;
112 if (changed & IEEE802154_LLSEC_PARAM_COORD_SHORTADDR)
113 sec->params.coord_shortaddr = params->coord_shortaddr;
114
115 write_unlock_bh(&sec->lock);
116
117 return 0;
118}
119
120
121
122static struct mac802154_llsec_key*
123llsec_key_alloc(const struct ieee802154_llsec_key *template)
124{
125 const int authsizes[3] = { 4, 8, 16 };
126 struct mac802154_llsec_key *key;
127 int i;
128
129 key = kzalloc(sizeof(*key), GFP_KERNEL);
130 if (!key)
131 return NULL;
132
133 kref_init(&key->ref);
134 key->key = *template;
135
136 BUILD_BUG_ON(ARRAY_SIZE(authsizes) != ARRAY_SIZE(key->tfm));
137
138 for (i = 0; i < ARRAY_SIZE(key->tfm); i++) {
139 key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0,
140 CRYPTO_ALG_ASYNC);
141 if (!key->tfm[i])
142 goto err_tfm;
143 if (crypto_aead_setkey(key->tfm[i], template->key,
144 IEEE802154_LLSEC_KEY_SIZE))
145 goto err_tfm;
146 if (crypto_aead_setauthsize(key->tfm[i], authsizes[i]))
147 goto err_tfm;
148 }
149
150 key->tfm0 = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
151 if (!key->tfm0)
152 goto err_tfm;
153
154 if (crypto_blkcipher_setkey(key->tfm0, template->key,
155 IEEE802154_LLSEC_KEY_SIZE))
156 goto err_tfm0;
157
158 return key;
159
160err_tfm0:
161 crypto_free_blkcipher(key->tfm0);
162err_tfm:
163 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
164 if (key->tfm[i])
165 crypto_free_aead(key->tfm[i]);
166
167 kfree(key);
168 return NULL;
169}
170
171static void llsec_key_release(struct kref *ref)
172{
173 struct mac802154_llsec_key *key;
174 int i;
175
176 key = container_of(ref, struct mac802154_llsec_key, ref);
177
178 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
179 crypto_free_aead(key->tfm[i]);
180
181 crypto_free_blkcipher(key->tfm0);
182 kfree(key);
183}
184
185static struct mac802154_llsec_key*
186llsec_key_get(struct mac802154_llsec_key *key)
187{
188 kref_get(&key->ref);
189 return key;
190}
191
192static void llsec_key_put(struct mac802154_llsec_key *key)
193{
194 kref_put(&key->ref, llsec_key_release);
195}
196
197static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
198 const struct ieee802154_llsec_key_id *b)
199{
200 if (a->mode != b->mode)
201 return false;
202
203 if (a->mode == IEEE802154_SCF_KEY_IMPLICIT)
204 return ieee802154_addr_equal(&a->device_addr, &b->device_addr);
205
206 if (a->id != b->id)
207 return false;
208
209 switch (a->mode) {
210 case IEEE802154_SCF_KEY_INDEX:
211 return true;
212 case IEEE802154_SCF_KEY_SHORT_INDEX:
213 return a->short_source == b->short_source;
214 case IEEE802154_SCF_KEY_HW_INDEX:
215 return a->extended_source == b->extended_source;
216 }
217
218 return false;
219}
220
221int mac802154_llsec_key_add(struct mac802154_llsec *sec,
222 const struct ieee802154_llsec_key_id *id,
223 const struct ieee802154_llsec_key *key)
224{
225 struct mac802154_llsec_key *mkey = NULL;
226 struct ieee802154_llsec_key_entry *pos, *new;
227
228 if (!(key->frame_types & (1 << IEEE802154_FC_TYPE_MAC_CMD)) &&
229 key->cmd_frame_ids)
230 return -EINVAL;
231
232 list_for_each_entry(pos, &sec->table.keys, list) {
233 if (llsec_key_id_equal(&pos->id, id))
234 return -EEXIST;
235
236 if (memcmp(pos->key->key, key->key,
237 IEEE802154_LLSEC_KEY_SIZE))
238 continue;
239
240 mkey = container_of(pos->key, struct mac802154_llsec_key, key);
241
242 /* Don't allow multiple instances of the same AES key to have
243 * different allowed frame types/command frame ids, as this is
244 * not possible in the 802.15.4 PIB.
245 */
246 if (pos->key->frame_types != key->frame_types ||
247 pos->key->cmd_frame_ids != key->cmd_frame_ids)
248 return -EEXIST;
249
250 break;
251 }
252
253 new = kzalloc(sizeof(*new), GFP_KERNEL);
254 if (!new)
255 return -ENOMEM;
256
257 if (!mkey)
258 mkey = llsec_key_alloc(key);
259 else
260 mkey = llsec_key_get(mkey);
261
262 if (!mkey)
263 goto fail;
264
265 new->id = *id;
266 new->key = &mkey->key;
267
268 list_add_rcu(&new->list, &sec->table.keys);
269
270 return 0;
271
272fail:
273 kfree(new);
274 return -ENOMEM;
275}
276
277int mac802154_llsec_key_del(struct mac802154_llsec *sec,
278 const struct ieee802154_llsec_key_id *key)
279{
280 struct ieee802154_llsec_key_entry *pos;
281
282 list_for_each_entry(pos, &sec->table.keys, list) {
283 struct mac802154_llsec_key *mkey;
284
285 mkey = container_of(pos->key, struct mac802154_llsec_key, key);
286
287 if (llsec_key_id_equal(&pos->id, key)) {
288 list_del_rcu(&pos->list);
289 llsec_key_put(mkey);
290 return 0;
291 }
292 }
293
294 return -ENOENT;
295}
296
297
298
299static bool llsec_dev_use_shortaddr(__le16 short_addr)
300{
301 return short_addr != cpu_to_le16(IEEE802154_ADDR_UNDEF) &&
302 short_addr != cpu_to_le16(0xffff);
303}
304
305static u32 llsec_dev_hash_short(__le16 short_addr, __le16 pan_id)
306{
307 return ((__force u16) short_addr) << 16 | (__force u16) pan_id;
308}
309
310static u64 llsec_dev_hash_long(__le64 hwaddr)
311{
312 return (__force u64) hwaddr;
313}
314
315static struct mac802154_llsec_device*
316llsec_dev_find_short(struct mac802154_llsec *sec, __le16 short_addr,
317 __le16 pan_id)
318{
319 struct mac802154_llsec_device *dev;
320 u32 key = llsec_dev_hash_short(short_addr, pan_id);
321
322 hash_for_each_possible_rcu(sec->devices_short, dev, bucket_s, key) {
323 if (dev->dev.short_addr == short_addr &&
324 dev->dev.pan_id == pan_id)
325 return dev;
326 }
327
328 return NULL;
329}
330
331static struct mac802154_llsec_device*
332llsec_dev_find_long(struct mac802154_llsec *sec, __le64 hwaddr)
333{
334 struct mac802154_llsec_device *dev;
335 u64 key = llsec_dev_hash_long(hwaddr);
336
337 hash_for_each_possible_rcu(sec->devices_hw, dev, bucket_hw, key) {
338 if (dev->dev.hwaddr == hwaddr)
339 return dev;
340 }
341
342 return NULL;
343}
344
345static void llsec_dev_free(struct mac802154_llsec_device *dev)
346{
347 struct ieee802154_llsec_device_key *pos, *pn;
348 struct mac802154_llsec_device_key *devkey;
349
350 list_for_each_entry_safe(pos, pn, &dev->dev.keys, list) {
351 devkey = container_of(pos, struct mac802154_llsec_device_key,
352 devkey);
353
354 list_del(&pos->list);
355 kfree(devkey);
356 }
357
358 kfree(dev);
359}
360
361int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
362 const struct ieee802154_llsec_device *dev)
363{
364 struct mac802154_llsec_device *entry;
365 u32 skey = llsec_dev_hash_short(dev->short_addr, dev->pan_id);
366 u64 hwkey = llsec_dev_hash_long(dev->hwaddr);
367
368 BUILD_BUG_ON(sizeof(hwkey) != IEEE802154_ADDR_LEN);
369
370 if ((llsec_dev_use_shortaddr(dev->short_addr) &&
371 llsec_dev_find_short(sec, dev->short_addr, dev->pan_id)) ||
372 llsec_dev_find_long(sec, dev->hwaddr))
373 return -EEXIST;
374
375 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
376 if (!entry)
377 return -ENOMEM;
378
379 entry->dev = *dev;
380 spin_lock_init(&entry->lock);
381 INIT_LIST_HEAD(&entry->dev.keys);
382
383 if (llsec_dev_use_shortaddr(dev->short_addr))
384 hash_add_rcu(sec->devices_short, &entry->bucket_s, skey);
385 else
386 INIT_HLIST_NODE(&entry->bucket_s);
387
388 hash_add_rcu(sec->devices_hw, &entry->bucket_hw, hwkey);
389 list_add_tail_rcu(&entry->dev.list, &sec->table.devices);
390
391 return 0;
392}
393
394static void llsec_dev_free_rcu(struct rcu_head *rcu)
395{
396 llsec_dev_free(container_of(rcu, struct mac802154_llsec_device, rcu));
397}
398
399int mac802154_llsec_dev_del(struct mac802154_llsec *sec, __le64 device_addr)
400{
401 struct mac802154_llsec_device *pos;
402
403 pos = llsec_dev_find_long(sec, device_addr);
404 if (!pos)
405 return -ENOENT;
406
407 hash_del_rcu(&pos->bucket_s);
408 hash_del_rcu(&pos->bucket_hw);
409 call_rcu(&pos->rcu, llsec_dev_free_rcu);
410
411 return 0;
412}
413
414
415
416static struct mac802154_llsec_device_key*
417llsec_devkey_find(struct mac802154_llsec_device *dev,
418 const struct ieee802154_llsec_key_id *key)
419{
420 struct ieee802154_llsec_device_key *devkey;
421
422 list_for_each_entry_rcu(devkey, &dev->dev.keys, list) {
423 if (!llsec_key_id_equal(key, &devkey->key_id))
424 continue;
425
426 return container_of(devkey, struct mac802154_llsec_device_key,
427 devkey);
428 }
429
430 return NULL;
431}
432
433int mac802154_llsec_devkey_add(struct mac802154_llsec *sec,
434 __le64 dev_addr,
435 const struct ieee802154_llsec_device_key *key)
436{
437 struct mac802154_llsec_device *dev;
438 struct mac802154_llsec_device_key *devkey;
439
440 dev = llsec_dev_find_long(sec, dev_addr);
441
442 if (!dev)
443 return -ENOENT;
444
445 if (llsec_devkey_find(dev, &key->key_id))
446 return -EEXIST;
447
448 devkey = kmalloc(sizeof(*devkey), GFP_KERNEL);
449 if (!devkey)
450 return -ENOMEM;
451
452 devkey->devkey = *key;
453 list_add_tail_rcu(&devkey->devkey.list, &dev->dev.keys);
454 return 0;
455}
456
457int mac802154_llsec_devkey_del(struct mac802154_llsec *sec,
458 __le64 dev_addr,
459 const struct ieee802154_llsec_device_key *key)
460{
461 struct mac802154_llsec_device *dev;
462 struct mac802154_llsec_device_key *devkey;
463
464 dev = llsec_dev_find_long(sec, dev_addr);
465
466 if (!dev)
467 return -ENOENT;
468
469 devkey = llsec_devkey_find(dev, &key->key_id);
470 if (!devkey)
471 return -ENOENT;
472
473 list_del_rcu(&devkey->devkey.list);
474 kfree_rcu(devkey, rcu);
475 return 0;
476}
477
478
479
480static struct mac802154_llsec_seclevel*
481llsec_find_seclevel(const struct mac802154_llsec *sec,
482 const struct ieee802154_llsec_seclevel *sl)
483{
484 struct ieee802154_llsec_seclevel *pos;
485
486 list_for_each_entry(pos, &sec->table.security_levels, list) {
487 if (pos->frame_type != sl->frame_type ||
488 (pos->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
489 pos->cmd_frame_id != sl->cmd_frame_id) ||
490 pos->device_override != sl->device_override ||
491 pos->sec_levels != sl->sec_levels)
492 continue;
493
494 return container_of(pos, struct mac802154_llsec_seclevel,
495 level);
496 }
497
498 return NULL;
499}
500
501int mac802154_llsec_seclevel_add(struct mac802154_llsec *sec,
502 const struct ieee802154_llsec_seclevel *sl)
503{
504 struct mac802154_llsec_seclevel *entry;
505
506 if (llsec_find_seclevel(sec, sl))
507 return -EEXIST;
508
509 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
510 if (!entry)
511 return -ENOMEM;
512
513 entry->level = *sl;
514
515 list_add_tail_rcu(&entry->level.list, &sec->table.security_levels);
516
517 return 0;
518}
519
520int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
521 const struct ieee802154_llsec_seclevel *sl)
522{
523 struct mac802154_llsec_seclevel *pos;
524
525 pos = llsec_find_seclevel(sec, sl);
526 if (!pos)
527 return -ENOENT;
528
529 list_del_rcu(&pos->level.list);
530 kfree_rcu(pos, rcu);
531
532 return 0;
533}
534
535
536
537static int llsec_recover_addr(struct mac802154_llsec *sec,
538 struct ieee802154_addr *addr)
539{
540 __le16 caddr = sec->params.coord_shortaddr;
541 addr->pan_id = sec->params.pan_id;
542
543 if (caddr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
544 return -EINVAL;
545 } else if (caddr == cpu_to_le16(IEEE802154_ADDR_UNDEF)) {
546 addr->extended_addr = sec->params.coord_hwaddr;
547 addr->mode = IEEE802154_ADDR_LONG;
548 } else {
549 addr->short_addr = sec->params.coord_shortaddr;
550 addr->mode = IEEE802154_ADDR_SHORT;
551 }
552
553 return 0;
554}
555
556static struct mac802154_llsec_key*
557llsec_lookup_key(struct mac802154_llsec *sec,
558 const struct ieee802154_hdr *hdr,
559 const struct ieee802154_addr *addr,
560 struct ieee802154_llsec_key_id *key_id)
561{
562 struct ieee802154_addr devaddr = *addr;
563 u8 key_id_mode = hdr->sec.key_id_mode;
564 struct ieee802154_llsec_key_entry *key_entry;
565 struct mac802154_llsec_key *key;
566
567 if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT &&
568 devaddr.mode == IEEE802154_ADDR_NONE) {
569 if (hdr->fc.type == IEEE802154_FC_TYPE_BEACON) {
570 devaddr.extended_addr = sec->params.coord_hwaddr;
571 devaddr.mode = IEEE802154_ADDR_LONG;
572 } else if (llsec_recover_addr(sec, &devaddr) < 0) {
573 return NULL;
574 }
575 }
576
577 list_for_each_entry_rcu(key_entry, &sec->table.keys, list) {
578 const struct ieee802154_llsec_key_id *id = &key_entry->id;
579
580 if (!(key_entry->key->frame_types & BIT(hdr->fc.type)))
581 continue;
582
583 if (id->mode != key_id_mode)
584 continue;
585
586 if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT) {
587 if (ieee802154_addr_equal(&devaddr, &id->device_addr))
588 goto found;
589 } else {
590 if (id->id != hdr->sec.key_id)
591 continue;
592
593 if ((key_id_mode == IEEE802154_SCF_KEY_INDEX) ||
594 (key_id_mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
595 id->short_source == hdr->sec.short_src) ||
596 (key_id_mode == IEEE802154_SCF_KEY_HW_INDEX &&
597 id->extended_source == hdr->sec.extended_src))
598 goto found;
599 }
600 }
601
602 return NULL;
603
604found:
605 key = container_of(key_entry->key, struct mac802154_llsec_key, key);
606 if (key_id)
607 *key_id = key_entry->id;
608 return llsec_key_get(key);
609}
610
611
612static void llsec_geniv(u8 iv[16], __le64 addr,
613 const struct ieee802154_sechdr *sec)
614{
615 __be64 addr_bytes = (__force __be64) swab64((__force u64) addr);
616 __be32 frame_counter = (__force __be32) swab32((__force u32) sec->frame_counter);
617
618 iv[0] = 1; /* L' = L - 1 = 1 */
619 memcpy(iv + 1, &addr_bytes, sizeof(addr_bytes));
620 memcpy(iv + 9, &frame_counter, sizeof(frame_counter));
621 iv[13] = sec->level;
622 iv[14] = 0;
623 iv[15] = 1;
624}
625
626static int
627llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
628 const struct ieee802154_hdr *hdr,
629 struct mac802154_llsec_key *key)
630{
631 u8 iv[16];
632 struct scatterlist src;
633 struct blkcipher_desc req = {
634 .tfm = key->tfm0,
635 .info = iv,
636 .flags = 0,
637 };
638
639 llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
640 sg_init_one(&src, skb->data, skb->len);
641 return crypto_blkcipher_encrypt_iv(&req, &src, &src, skb->len);
642}
643
644static struct crypto_aead*
645llsec_tfm_by_len(struct mac802154_llsec_key *key, int authlen)
646{
647 int i;
648
649 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
650 if (crypto_aead_authsize(key->tfm[i]) == authlen)
651 return key->tfm[i];
652
653 BUG();
654}
655
656static int
657llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
658 const struct ieee802154_hdr *hdr,
659 struct mac802154_llsec_key *key)
660{
661 u8 iv[16];
662 unsigned char *data;
663 int authlen, assoclen, datalen, rc;
664 struct scatterlist src, assoc[2], dst[2];
665 struct aead_request *req;
666
667 authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
668 llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
669
670 req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
671 if (!req)
672 return -ENOMEM;
673
674 sg_init_table(assoc, 2);
675 sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len);
676 assoclen = skb->mac_len;
677
678 data = skb_mac_header(skb) + skb->mac_len;
679 datalen = skb_tail_pointer(skb) - data;
680
681 if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) {
682 sg_set_buf(&assoc[1], data, 0);
683 } else {
684 sg_set_buf(&assoc[1], data, datalen);
685 assoclen += datalen;
686 datalen = 0;
687 }
688
689 sg_init_one(&src, data, datalen);
690
691 sg_init_table(dst, 2);
692 sg_set_buf(&dst[0], data, datalen);
693 sg_set_buf(&dst[1], skb_put(skb, authlen), authlen);
694
695 aead_request_set_callback(req, 0, NULL, NULL);
696 aead_request_set_assoc(req, assoc, assoclen);
697 aead_request_set_crypt(req, &src, dst, datalen, iv);
698
699 rc = crypto_aead_encrypt(req);
700
701 kfree(req);
702
703 return rc;
704}
705
706static int llsec_do_encrypt(struct sk_buff *skb,
707 const struct mac802154_llsec *sec,
708 const struct ieee802154_hdr *hdr,
709 struct mac802154_llsec_key *key)
710{
711 if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
712 return llsec_do_encrypt_unauth(skb, sec, hdr, key);
713 else
714 return llsec_do_encrypt_auth(skb, sec, hdr, key);
715}
716
717int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
718{
719 struct ieee802154_hdr hdr;
720 int rc, authlen, hlen;
721 struct mac802154_llsec_key *key;
722 u32 frame_ctr;
723
724 hlen = ieee802154_hdr_pull(skb, &hdr);
725
726 if (hlen < 0 || hdr.fc.type != IEEE802154_FC_TYPE_DATA)
727 return -EINVAL;
728
729 if (!hdr.fc.security_enabled || hdr.sec.level == 0) {
730 skb_push(skb, hlen);
731 return 0;
732 }
733
734 authlen = ieee802154_sechdr_authtag_len(&hdr.sec);
735
736 if (skb->len + hlen + authlen + IEEE802154_MFR_SIZE > IEEE802154_MTU)
737 return -EMSGSIZE;
738
739 rcu_read_lock();
740
741 read_lock_bh(&sec->lock);
742
743 if (!sec->params.enabled) {
744 rc = -EINVAL;
745 goto fail_read;
746 }
747
748 key = llsec_lookup_key(sec, &hdr, &hdr.dest, NULL);
749 if (!key) {
750 rc = -ENOKEY;
751 goto fail_read;
752 }
753
754 read_unlock_bh(&sec->lock);
755
756 write_lock_bh(&sec->lock);
757
758 frame_ctr = be32_to_cpu(sec->params.frame_counter);
759 hdr.sec.frame_counter = cpu_to_le32(frame_ctr);
760 if (frame_ctr == 0xFFFFFFFF) {
761 write_unlock_bh(&sec->lock);
762 llsec_key_put(key);
763 rc = -EOVERFLOW;
764 goto fail;
765 }
766
767 sec->params.frame_counter = cpu_to_be32(frame_ctr + 1);
768
769 write_unlock_bh(&sec->lock);
770
771 rcu_read_unlock();
772
773 skb->mac_len = ieee802154_hdr_push(skb, &hdr);
774 skb_reset_mac_header(skb);
775
776 rc = llsec_do_encrypt(skb, sec, &hdr, key);
777 llsec_key_put(key);
778
779 return rc;
780
781fail_read:
782 read_unlock_bh(&sec->lock);
783fail:
784 rcu_read_unlock();
785 return rc;
786}
787
788
789
790static struct mac802154_llsec_device*
791llsec_lookup_dev(struct mac802154_llsec *sec,
792 const struct ieee802154_addr *addr)
793{
794 struct ieee802154_addr devaddr = *addr;
795 struct mac802154_llsec_device *dev = NULL;
796
797 if (devaddr.mode == IEEE802154_ADDR_NONE &&
798 llsec_recover_addr(sec, &devaddr) < 0)
799 return NULL;
800
801 if (devaddr.mode == IEEE802154_ADDR_SHORT) {
802 u32 key = llsec_dev_hash_short(devaddr.short_addr,
803 devaddr.pan_id);
804
805 hash_for_each_possible_rcu(sec->devices_short, dev,
806 bucket_s, key) {
807 if (dev->dev.pan_id == devaddr.pan_id &&
808 dev->dev.short_addr == devaddr.short_addr)
809 return dev;
810 }
811 } else {
812 u64 key = llsec_dev_hash_long(devaddr.extended_addr);
813
814 hash_for_each_possible_rcu(sec->devices_hw, dev,
815 bucket_hw, key) {
816 if (dev->dev.hwaddr == devaddr.extended_addr)
817 return dev;
818 }
819 }
820
821 return NULL;
822}
823
824static int
825llsec_lookup_seclevel(const struct mac802154_llsec *sec,
826 u8 frame_type, u8 cmd_frame_id,
827 struct ieee802154_llsec_seclevel *rlevel)
828{
829 struct ieee802154_llsec_seclevel *level;
830
831 list_for_each_entry_rcu(level, &sec->table.security_levels, list) {
832 if (level->frame_type == frame_type &&
833 (frame_type != IEEE802154_FC_TYPE_MAC_CMD ||
834 level->cmd_frame_id == cmd_frame_id)) {
835 *rlevel = *level;
836 return 0;
837 }
838 }
839
840 return -EINVAL;
841}
842
843static int
844llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
845 const struct ieee802154_hdr *hdr,
846 struct mac802154_llsec_key *key, __le64 dev_addr)
847{
848 u8 iv[16];
849 unsigned char *data;
850 int datalen;
851 struct scatterlist src;
852 struct blkcipher_desc req = {
853 .tfm = key->tfm0,
854 .info = iv,
855 .flags = 0,
856 };
857
858 llsec_geniv(iv, dev_addr, &hdr->sec);
859 data = skb_mac_header(skb) + skb->mac_len;
860 datalen = skb_tail_pointer(skb) - data;
861
862 sg_init_one(&src, data, datalen);
863
864 return crypto_blkcipher_decrypt_iv(&req, &src, &src, datalen);
865}
866
867static int
868llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
869 const struct ieee802154_hdr *hdr,
870 struct mac802154_llsec_key *key, __le64 dev_addr)
871{
872 u8 iv[16];
873 unsigned char *data;
874 int authlen, datalen, assoclen, rc;
875 struct scatterlist src, assoc[2];
876 struct aead_request *req;
877
878 authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
879 llsec_geniv(iv, dev_addr, &hdr->sec);
880
881 req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
882 if (!req)
883 return -ENOMEM;
884
885 sg_init_table(assoc, 2);
886 sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len);
887 assoclen = skb->mac_len;
888
889 data = skb_mac_header(skb) + skb->mac_len;
890 datalen = skb_tail_pointer(skb) - data;
891
892 if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) {
893 sg_set_buf(&assoc[1], data, 0);
894 } else {
895 sg_set_buf(&assoc[1], data, datalen - authlen);
896 assoclen += datalen - authlen;
897 data += datalen - authlen;
898 datalen = authlen;
899 }
900
901 sg_init_one(&src, data, datalen);
902
903 aead_request_set_callback(req, 0, NULL, NULL);
904 aead_request_set_assoc(req, assoc, assoclen);
905 aead_request_set_crypt(req, &src, &src, datalen, iv);
906
907 rc = crypto_aead_decrypt(req);
908
909 kfree(req);
910 skb_trim(skb, skb->len - authlen);
911
912 return rc;
913}
914
915static int
916llsec_do_decrypt(struct sk_buff *skb, const struct mac802154_llsec *sec,
917 const struct ieee802154_hdr *hdr,
918 struct mac802154_llsec_key *key, __le64 dev_addr)
919{
920 if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
921 return llsec_do_decrypt_unauth(skb, sec, hdr, key, dev_addr);
922 else
923 return llsec_do_decrypt_auth(skb, sec, hdr, key, dev_addr);
924}
925
926static int
927llsec_update_devkey_record(struct mac802154_llsec_device *dev,
928 const struct ieee802154_llsec_key_id *in_key)
929{
930 struct mac802154_llsec_device_key *devkey;
931
932 devkey = llsec_devkey_find(dev, in_key);
933
934 if (!devkey) {
935 struct mac802154_llsec_device_key *next;
936
937 next = kzalloc(sizeof(*devkey), GFP_ATOMIC);
938 if (!next)
939 return -ENOMEM;
940
941 next->devkey.key_id = *in_key;
942
943 spin_lock_bh(&dev->lock);
944
945 devkey = llsec_devkey_find(dev, in_key);
946 if (!devkey)
947 list_add_rcu(&next->devkey.list, &dev->dev.keys);
948 else
949 kfree(next);
950
951 spin_unlock_bh(&dev->lock);
952 }
953
954 return 0;
955}
956
957static int
958llsec_update_devkey_info(struct mac802154_llsec_device *dev,
959 const struct ieee802154_llsec_key_id *in_key,
960 u32 frame_counter)
961{
962 struct mac802154_llsec_device_key *devkey = NULL;
963
964 if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RESTRICT) {
965 devkey = llsec_devkey_find(dev, in_key);
966 if (!devkey)
967 return -ENOENT;
968 }
969
970 if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RECORD) {
971 int rc = llsec_update_devkey_record(dev, in_key);
972
973 if (rc < 0)
974 return rc;
975 }
976
977 spin_lock_bh(&dev->lock);
978
979 if ((!devkey && frame_counter < dev->dev.frame_counter) ||
980 (devkey && frame_counter < devkey->devkey.frame_counter)) {
981 spin_unlock_bh(&dev->lock);
982 return -EINVAL;
983 }
984
985 if (devkey)
986 devkey->devkey.frame_counter = frame_counter + 1;
987 else
988 dev->dev.frame_counter = frame_counter + 1;
989
990 spin_unlock_bh(&dev->lock);
991
992 return 0;
993}
994
995int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
996{
997 struct ieee802154_hdr hdr;
998 struct mac802154_llsec_key *key;
999 struct ieee802154_llsec_key_id key_id;
1000 struct mac802154_llsec_device *dev;
1001 struct ieee802154_llsec_seclevel seclevel;
1002 int err;
1003 __le64 dev_addr;
1004 u32 frame_ctr;
1005
1006 if (ieee802154_hdr_peek(skb, &hdr) < 0)
1007 return -EINVAL;
1008 if (!hdr.fc.security_enabled)
1009 return 0;
1010 if (hdr.fc.version == 0)
1011 return -EINVAL;
1012
1013 read_lock_bh(&sec->lock);
1014 if (!sec->params.enabled) {
1015 read_unlock_bh(&sec->lock);
1016 return -EINVAL;
1017 }
1018 read_unlock_bh(&sec->lock);
1019
1020 rcu_read_lock();
1021
1022 key = llsec_lookup_key(sec, &hdr, &hdr.source, &key_id);
1023 if (!key) {
1024 err = -ENOKEY;
1025 goto fail;
1026 }
1027
1028 dev = llsec_lookup_dev(sec, &hdr.source);
1029 if (!dev) {
1030 err = -EINVAL;
1031 goto fail_dev;
1032 }
1033
1034 if (llsec_lookup_seclevel(sec, hdr.fc.type, 0, &seclevel) < 0) {
1035 err = -EINVAL;
1036 goto fail_dev;
1037 }
1038
1039 if (!(seclevel.sec_levels & BIT(hdr.sec.level)) &&
1040 (hdr.sec.level == 0 && seclevel.device_override &&
1041 !dev->dev.seclevel_exempt)) {
1042 err = -EINVAL;
1043 goto fail_dev;
1044 }
1045
1046 frame_ctr = le32_to_cpu(hdr.sec.frame_counter);
1047
1048 if (frame_ctr == 0xffffffff) {
1049 err = -EOVERFLOW;
1050 goto fail_dev;
1051 }
1052
1053 err = llsec_update_devkey_info(dev, &key_id, frame_ctr);
1054 if (err)
1055 goto fail_dev;
1056
1057 dev_addr = dev->dev.hwaddr;
1058
1059 rcu_read_unlock();
1060
1061 err = llsec_do_decrypt(skb, sec, &hdr, key, dev_addr);
1062 llsec_key_put(key);
1063 return err;
1064
1065fail_dev:
1066 llsec_key_put(key);
1067fail:
1068 rcu_read_unlock();
1069 return err;
1070}
diff --git a/net/mac802154/llsec.h b/net/mac802154/llsec.h
new file mode 100644
index 000000000000..950578e1d7be
--- /dev/null
+++ b/net/mac802154/llsec.h
@@ -0,0 +1,108 @@
1/*
2 * Copyright (C) 2014 Fraunhofer ITWM
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Written by:
14 * Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
15 */
16
17#ifndef MAC802154_LLSEC_H
18#define MAC802154_LLSEC_H
19
20#include <linux/slab.h>
21#include <linux/hashtable.h>
22#include <linux/crypto.h>
23#include <linux/kref.h>
24#include <linux/spinlock.h>
25#include <net/af_ieee802154.h>
26#include <net/ieee802154_netdev.h>
27
28struct mac802154_llsec_key {
29 struct ieee802154_llsec_key key;
30
31 /* one tfm for each authsize (4/8/16) */
32 struct crypto_aead *tfm[3];
33 struct crypto_blkcipher *tfm0;
34
35 struct kref ref;
36};
37
38struct mac802154_llsec_device_key {
39 struct ieee802154_llsec_device_key devkey;
40
41 struct rcu_head rcu;
42};
43
44struct mac802154_llsec_device {
45 struct ieee802154_llsec_device dev;
46
47 struct hlist_node bucket_s;
48 struct hlist_node bucket_hw;
49
50 /* protects dev.frame_counter and the elements of dev.keys */
51 spinlock_t lock;
52
53 struct rcu_head rcu;
54};
55
56struct mac802154_llsec_seclevel {
57 struct ieee802154_llsec_seclevel level;
58
59 struct rcu_head rcu;
60};
61
62struct mac802154_llsec {
63 struct ieee802154_llsec_params params;
64 struct ieee802154_llsec_table table;
65
66 DECLARE_HASHTABLE(devices_short, 6);
67 DECLARE_HASHTABLE(devices_hw, 6);
68
69 /* protects params, all other fields are fine with RCU */
70 rwlock_t lock;
71};
72
73void mac802154_llsec_init(struct mac802154_llsec *sec);
74void mac802154_llsec_destroy(struct mac802154_llsec *sec);
75
76int mac802154_llsec_get_params(struct mac802154_llsec *sec,
77 struct ieee802154_llsec_params *params);
78int mac802154_llsec_set_params(struct mac802154_llsec *sec,
79 const struct ieee802154_llsec_params *params,
80 int changed);
81
82int mac802154_llsec_key_add(struct mac802154_llsec *sec,
83 const struct ieee802154_llsec_key_id *id,
84 const struct ieee802154_llsec_key *key);
85int mac802154_llsec_key_del(struct mac802154_llsec *sec,
86 const struct ieee802154_llsec_key_id *key);
87
88int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
89 const struct ieee802154_llsec_device *dev);
90int mac802154_llsec_dev_del(struct mac802154_llsec *sec,
91 __le64 device_addr);
92
93int mac802154_llsec_devkey_add(struct mac802154_llsec *sec,
94 __le64 dev_addr,
95 const struct ieee802154_llsec_device_key *key);
96int mac802154_llsec_devkey_del(struct mac802154_llsec *sec,
97 __le64 dev_addr,
98 const struct ieee802154_llsec_device_key *key);
99
100int mac802154_llsec_seclevel_add(struct mac802154_llsec *sec,
101 const struct ieee802154_llsec_seclevel *sl);
102int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
103 const struct ieee802154_llsec_seclevel *sl);
104
105int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb);
106int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb);
107
108#endif /* MAC802154_LLSEC_H */
diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h
index 28ef59c566e6..762a6f849c6b 100644
--- a/net/mac802154/mac802154.h
+++ b/net/mac802154/mac802154.h
@@ -23,8 +23,12 @@
23#ifndef MAC802154_H 23#ifndef MAC802154_H
24#define MAC802154_H 24#define MAC802154_H
25 25
26#include <linux/mutex.h>
27#include <net/mac802154.h>
26#include <net/ieee802154_netdev.h> 28#include <net/ieee802154_netdev.h>
27 29
30#include "llsec.h"
31
28/* mac802154 device private data */ 32/* mac802154 device private data */
29struct mac802154_priv { 33struct mac802154_priv {
30 struct ieee802154_dev hw; 34 struct ieee802154_dev hw;
@@ -90,6 +94,13 @@ struct mac802154_sub_if_data {
90 u8 bsn; 94 u8 bsn;
91 /* MAC DSN field */ 95 /* MAC DSN field */
92 u8 dsn; 96 u8 dsn;
97
98 /* protects sec from concurrent access by netlink. access by
99 * encrypt/decrypt/header_create safe without additional protection.
100 */
101 struct mutex sec_mtx;
102
103 struct mac802154_llsec sec;
93}; 104};
94 105
95#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw) 106#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw)
@@ -125,4 +136,37 @@ int mac802154_set_mac_params(struct net_device *dev,
125void mac802154_get_mac_params(struct net_device *dev, 136void mac802154_get_mac_params(struct net_device *dev,
126 struct ieee802154_mac_params *params); 137 struct ieee802154_mac_params *params);
127 138
139int mac802154_get_params(struct net_device *dev,
140 struct ieee802154_llsec_params *params);
141int mac802154_set_params(struct net_device *dev,
142 const struct ieee802154_llsec_params *params,
143 int changed);
144
145int mac802154_add_key(struct net_device *dev,
146 const struct ieee802154_llsec_key_id *id,
147 const struct ieee802154_llsec_key *key);
148int mac802154_del_key(struct net_device *dev,
149 const struct ieee802154_llsec_key_id *id);
150
151int mac802154_add_dev(struct net_device *dev,
152 const struct ieee802154_llsec_device *llsec_dev);
153int mac802154_del_dev(struct net_device *dev, __le64 dev_addr);
154
155int mac802154_add_devkey(struct net_device *dev,
156 __le64 device_addr,
157 const struct ieee802154_llsec_device_key *key);
158int mac802154_del_devkey(struct net_device *dev,
159 __le64 device_addr,
160 const struct ieee802154_llsec_device_key *key);
161
162int mac802154_add_seclevel(struct net_device *dev,
163 const struct ieee802154_llsec_seclevel *sl);
164int mac802154_del_seclevel(struct net_device *dev,
165 const struct ieee802154_llsec_seclevel *sl);
166
167void mac802154_lock_table(struct net_device *dev);
168void mac802154_get_table(struct net_device *dev,
169 struct ieee802154_llsec_table **t);
170void mac802154_unlock_table(struct net_device *dev);
171
128#endif /* MAC802154_H */ 172#endif /* MAC802154_H */
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
index d40c0928bc62..bf809131eef7 100644
--- a/net/mac802154/mac_cmd.c
+++ b/net/mac802154/mac_cmd.c
@@ -40,6 +40,9 @@ static int mac802154_mlme_start_req(struct net_device *dev,
40 u8 pan_coord, u8 blx, 40 u8 pan_coord, u8 blx,
41 u8 coord_realign) 41 u8 coord_realign)
42{ 42{
43 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
44 int rc = 0;
45
43 BUG_ON(addr->mode != IEEE802154_ADDR_SHORT); 46 BUG_ON(addr->mode != IEEE802154_ADDR_SHORT);
44 47
45 mac802154_dev_set_pan_id(dev, addr->pan_id); 48 mac802154_dev_set_pan_id(dev, addr->pan_id);
@@ -47,12 +50,31 @@ static int mac802154_mlme_start_req(struct net_device *dev,
47 mac802154_dev_set_ieee_addr(dev); 50 mac802154_dev_set_ieee_addr(dev);
48 mac802154_dev_set_page_channel(dev, page, channel); 51 mac802154_dev_set_page_channel(dev, page, channel);
49 52
53 if (ops->llsec) {
54 struct ieee802154_llsec_params params;
55 int changed = 0;
56
57 params.coord_shortaddr = addr->short_addr;
58 changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR;
59
60 params.pan_id = addr->pan_id;
61 changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
62
63 params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr);
64 changed |= IEEE802154_LLSEC_PARAM_HWADDR;
65
66 params.coord_hwaddr = params.hwaddr;
67 changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR;
68
69 rc = ops->llsec->set_params(dev, &params, changed);
70 }
71
50 /* FIXME: add validation for unused parameters to be sane 72 /* FIXME: add validation for unused parameters to be sane
51 * for SoftMAC 73 * for SoftMAC
52 */ 74 */
53 ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS); 75 ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS);
54 76
55 return 0; 77 return rc;
56} 78}
57 79
58static struct wpan_phy *mac802154_get_phy(const struct net_device *dev) 80static struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
@@ -64,6 +86,22 @@ static struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
64 return to_phy(get_device(&priv->hw->phy->dev)); 86 return to_phy(get_device(&priv->hw->phy->dev));
65} 87}
66 88
89static struct ieee802154_llsec_ops mac802154_llsec_ops = {
90 .get_params = mac802154_get_params,
91 .set_params = mac802154_set_params,
92 .add_key = mac802154_add_key,
93 .del_key = mac802154_del_key,
94 .add_dev = mac802154_add_dev,
95 .del_dev = mac802154_del_dev,
96 .add_devkey = mac802154_add_devkey,
97 .del_devkey = mac802154_del_devkey,
98 .add_seclevel = mac802154_add_seclevel,
99 .del_seclevel = mac802154_del_seclevel,
100 .lock_table = mac802154_lock_table,
101 .get_table = mac802154_get_table,
102 .unlock_table = mac802154_unlock_table,
103};
104
67struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = { 105struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = {
68 .get_phy = mac802154_get_phy, 106 .get_phy = mac802154_get_phy,
69}; 107};
@@ -75,6 +113,8 @@ struct ieee802154_mlme_ops mac802154_mlme_wpan = {
75 .get_short_addr = mac802154_dev_get_short_addr, 113 .get_short_addr = mac802154_dev_get_short_addr,
76 .get_dsn = mac802154_dev_get_dsn, 114 .get_dsn = mac802154_dev_get_dsn,
77 115
116 .llsec = &mac802154_llsec_ops,
117
78 .set_mac_params = mac802154_set_mac_params, 118 .set_mac_params = mac802154_set_mac_params,
79 .get_mac_params = mac802154_get_mac_params, 119 .get_mac_params = mac802154_get_mac_params,
80}; 120};
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
index f0991f2344d4..15aa2f2b03a7 100644
--- a/net/mac802154/mib.c
+++ b/net/mac802154/mib.c
@@ -213,3 +213,190 @@ void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
213 } else 213 } else
214 mutex_unlock(&priv->hw->phy->pib_lock); 214 mutex_unlock(&priv->hw->phy->pib_lock);
215} 215}
216
217
218int mac802154_get_params(struct net_device *dev,
219 struct ieee802154_llsec_params *params)
220{
221 struct mac802154_sub_if_data *priv = netdev_priv(dev);
222 int res;
223
224 BUG_ON(dev->type != ARPHRD_IEEE802154);
225
226 mutex_lock(&priv->sec_mtx);
227 res = mac802154_llsec_get_params(&priv->sec, params);
228 mutex_unlock(&priv->sec_mtx);
229
230 return res;
231}
232
233int mac802154_set_params(struct net_device *dev,
234 const struct ieee802154_llsec_params *params,
235 int changed)
236{
237 struct mac802154_sub_if_data *priv = netdev_priv(dev);
238 int res;
239
240 BUG_ON(dev->type != ARPHRD_IEEE802154);
241
242 mutex_lock(&priv->sec_mtx);
243 res = mac802154_llsec_set_params(&priv->sec, params, changed);
244 mutex_unlock(&priv->sec_mtx);
245
246 return res;
247}
248
249
250int mac802154_add_key(struct net_device *dev,
251 const struct ieee802154_llsec_key_id *id,
252 const struct ieee802154_llsec_key *key)
253{
254 struct mac802154_sub_if_data *priv = netdev_priv(dev);
255 int res;
256
257 BUG_ON(dev->type != ARPHRD_IEEE802154);
258
259 mutex_lock(&priv->sec_mtx);
260 res = mac802154_llsec_key_add(&priv->sec, id, key);
261 mutex_unlock(&priv->sec_mtx);
262
263 return res;
264}
265
266int mac802154_del_key(struct net_device *dev,
267 const struct ieee802154_llsec_key_id *id)
268{
269 struct mac802154_sub_if_data *priv = netdev_priv(dev);
270 int res;
271
272 BUG_ON(dev->type != ARPHRD_IEEE802154);
273
274 mutex_lock(&priv->sec_mtx);
275 res = mac802154_llsec_key_del(&priv->sec, id);
276 mutex_unlock(&priv->sec_mtx);
277
278 return res;
279}
280
281
282int mac802154_add_dev(struct net_device *dev,
283 const struct ieee802154_llsec_device *llsec_dev)
284{
285 struct mac802154_sub_if_data *priv = netdev_priv(dev);
286 int res;
287
288 BUG_ON(dev->type != ARPHRD_IEEE802154);
289
290 mutex_lock(&priv->sec_mtx);
291 res = mac802154_llsec_dev_add(&priv->sec, llsec_dev);
292 mutex_unlock(&priv->sec_mtx);
293
294 return res;
295}
296
297int mac802154_del_dev(struct net_device *dev, __le64 dev_addr)
298{
299 struct mac802154_sub_if_data *priv = netdev_priv(dev);
300 int res;
301
302 BUG_ON(dev->type != ARPHRD_IEEE802154);
303
304 mutex_lock(&priv->sec_mtx);
305 res = mac802154_llsec_dev_del(&priv->sec, dev_addr);
306 mutex_unlock(&priv->sec_mtx);
307
308 return res;
309}
310
311
312int mac802154_add_devkey(struct net_device *dev,
313 __le64 device_addr,
314 const struct ieee802154_llsec_device_key *key)
315{
316 struct mac802154_sub_if_data *priv = netdev_priv(dev);
317 int res;
318
319 BUG_ON(dev->type != ARPHRD_IEEE802154);
320
321 mutex_lock(&priv->sec_mtx);
322 res = mac802154_llsec_devkey_add(&priv->sec, device_addr, key);
323 mutex_unlock(&priv->sec_mtx);
324
325 return res;
326}
327
328int mac802154_del_devkey(struct net_device *dev,
329 __le64 device_addr,
330 const struct ieee802154_llsec_device_key *key)
331{
332 struct mac802154_sub_if_data *priv = netdev_priv(dev);
333 int res;
334
335 BUG_ON(dev->type != ARPHRD_IEEE802154);
336
337 mutex_lock(&priv->sec_mtx);
338 res = mac802154_llsec_devkey_del(&priv->sec, device_addr, key);
339 mutex_unlock(&priv->sec_mtx);
340
341 return res;
342}
343
344
345int mac802154_add_seclevel(struct net_device *dev,
346 const struct ieee802154_llsec_seclevel *sl)
347{
348 struct mac802154_sub_if_data *priv = netdev_priv(dev);
349 int res;
350
351 BUG_ON(dev->type != ARPHRD_IEEE802154);
352
353 mutex_lock(&priv->sec_mtx);
354 res = mac802154_llsec_seclevel_add(&priv->sec, sl);
355 mutex_unlock(&priv->sec_mtx);
356
357 return res;
358}
359
360int mac802154_del_seclevel(struct net_device *dev,
361 const struct ieee802154_llsec_seclevel *sl)
362{
363 struct mac802154_sub_if_data *priv = netdev_priv(dev);
364 int res;
365
366 BUG_ON(dev->type != ARPHRD_IEEE802154);
367
368 mutex_lock(&priv->sec_mtx);
369 res = mac802154_llsec_seclevel_del(&priv->sec, sl);
370 mutex_unlock(&priv->sec_mtx);
371
372 return res;
373}
374
375
376void mac802154_lock_table(struct net_device *dev)
377{
378 struct mac802154_sub_if_data *priv = netdev_priv(dev);
379
380 BUG_ON(dev->type != ARPHRD_IEEE802154);
381
382 mutex_lock(&priv->sec_mtx);
383}
384
385void mac802154_get_table(struct net_device *dev,
386 struct ieee802154_llsec_table **t)
387{
388 struct mac802154_sub_if_data *priv = netdev_priv(dev);
389
390 BUG_ON(dev->type != ARPHRD_IEEE802154);
391
392 *t = &priv->sec.table;
393}
394
395void mac802154_unlock_table(struct net_device *dev)
396{
397 struct mac802154_sub_if_data *priv = netdev_priv(dev);
398
399 BUG_ON(dev->type != ARPHRD_IEEE802154);
400
401 mutex_unlock(&priv->sec_mtx);
402}
diff --git a/net/mac802154/monitor.c b/net/mac802154/monitor.c
index 434a26f76a80..a68230e2b25f 100644
--- a/net/mac802154/monitor.c
+++ b/net/mac802154/monitor.c
@@ -70,7 +70,8 @@ void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb)
70 70
71 rcu_read_lock(); 71 rcu_read_lock();
72 list_for_each_entry_rcu(sdata, &priv->slaves, list) { 72 list_for_each_entry_rcu(sdata, &priv->slaves, list) {
73 if (sdata->type != IEEE802154_DEV_MONITOR) 73 if (sdata->type != IEEE802154_DEV_MONITOR ||
74 !netif_running(sdata->dev))
74 continue; 75 continue;
75 76
76 skb2 = skb_clone(skb, GFP_ATOMIC); 77 skb2 = skb_clone(skb, GFP_ATOMIC);
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 03855b0677cc..7f820a108a9c 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -59,27 +59,28 @@ mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
59 skb->protocol = htons(ETH_P_IEEE802154); 59 skb->protocol = htons(ETH_P_IEEE802154);
60 skb_reset_mac_header(skb); 60 skb_reset_mac_header(skb);
61 61
62 BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
63
64 if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) { 62 if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
65 u16 crc; 63 u16 crc;
66 64
67 if (skb->len < 2) { 65 if (skb->len < 2) {
68 pr_debug("got invalid frame\n"); 66 pr_debug("got invalid frame\n");
69 goto out; 67 goto fail;
70 } 68 }
71 crc = crc_ccitt(0, skb->data, skb->len); 69 crc = crc_ccitt(0, skb->data, skb->len);
72 if (crc) { 70 if (crc) {
73 pr_debug("CRC mismatch\n"); 71 pr_debug("CRC mismatch\n");
74 goto out; 72 goto fail;
75 } 73 }
76 skb_trim(skb, skb->len - 2); /* CRC */ 74 skb_trim(skb, skb->len - 2); /* CRC */
77 } 75 }
78 76
79 mac802154_monitors_rx(priv, skb); 77 mac802154_monitors_rx(priv, skb);
80 mac802154_wpans_rx(priv, skb); 78 mac802154_wpans_rx(priv, skb);
81out: 79
82 dev_kfree_skb(skb); 80 return;
81
82fail:
83 kfree_skb(skb);
83} 84}
84 85
85static void mac802154_rx_worker(struct work_struct *work) 86static void mac802154_rx_worker(struct work_struct *work)
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
index 1df7a6a57386..3c3069fd6971 100644
--- a/net/mac802154/wpan.c
+++ b/net/mac802154/wpan.c
@@ -35,6 +35,28 @@
35 35
36#include "mac802154.h" 36#include "mac802154.h"
37 37
38static int mac802154_wpan_update_llsec(struct net_device *dev)
39{
40 struct mac802154_sub_if_data *priv = netdev_priv(dev);
41 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
42 int rc = 0;
43
44 if (ops->llsec) {
45 struct ieee802154_llsec_params params;
46 int changed = 0;
47
48 params.pan_id = priv->pan_id;
49 changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
50
51 params.hwaddr = priv->extended_addr;
52 changed |= IEEE802154_LLSEC_PARAM_HWADDR;
53
54 rc = ops->llsec->set_params(dev, &params, changed);
55 }
56
57 return rc;
58}
59
38static int 60static int
39mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 61mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40{ 62{
@@ -81,7 +103,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
81 priv->pan_id = cpu_to_le16(sa->addr.pan_id); 103 priv->pan_id = cpu_to_le16(sa->addr.pan_id);
82 priv->short_addr = cpu_to_le16(sa->addr.short_addr); 104 priv->short_addr = cpu_to_le16(sa->addr.short_addr);
83 105
84 err = 0; 106 err = mac802154_wpan_update_llsec(dev);
85 break; 107 break;
86 } 108 }
87 109
@@ -99,7 +121,7 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
99 /* FIXME: validate addr */ 121 /* FIXME: validate addr */
100 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 122 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
101 mac802154_dev_set_ieee_addr(dev); 123 mac802154_dev_set_ieee_addr(dev);
102 return 0; 124 return mac802154_wpan_update_llsec(dev);
103} 125}
104 126
105int mac802154_set_mac_params(struct net_device *dev, 127int mac802154_set_mac_params(struct net_device *dev,
@@ -124,7 +146,7 @@ void mac802154_get_mac_params(struct net_device *dev,
124 mutex_unlock(&priv->hw->slaves_mtx); 146 mutex_unlock(&priv->hw->slaves_mtx);
125} 147}
126 148
127int mac802154_wpan_open(struct net_device *dev) 149static int mac802154_wpan_open(struct net_device *dev)
128{ 150{
129 int rc; 151 int rc;
130 struct mac802154_sub_if_data *priv = netdev_priv(dev); 152 struct mac802154_sub_if_data *priv = netdev_priv(dev);
@@ -183,6 +205,38 @@ out:
183 return rc; 205 return rc;
184} 206}
185 207
208static int mac802154_set_header_security(struct mac802154_sub_if_data *priv,
209 struct ieee802154_hdr *hdr,
210 const struct ieee802154_mac_cb *cb)
211{
212 struct ieee802154_llsec_params params;
213 u8 level;
214
215 mac802154_llsec_get_params(&priv->sec, &params);
216
217 if (!params.enabled && cb->secen_override && cb->secen)
218 return -EINVAL;
219 if (!params.enabled ||
220 (cb->secen_override && !cb->secen) ||
221 !params.out_level)
222 return 0;
223 if (cb->seclevel_override && !cb->seclevel)
224 return -EINVAL;
225
226 level = cb->seclevel_override ? cb->seclevel : params.out_level;
227
228 hdr->fc.security_enabled = 1;
229 hdr->sec.level = level;
230 hdr->sec.key_id_mode = params.out_key.mode;
231 if (params.out_key.mode == IEEE802154_SCF_KEY_SHORT_INDEX)
232 hdr->sec.short_src = params.out_key.short_source;
233 else if (params.out_key.mode == IEEE802154_SCF_KEY_HW_INDEX)
234 hdr->sec.extended_src = params.out_key.extended_source;
235 hdr->sec.key_id = params.out_key.id;
236
237 return 0;
238}
239
186static int mac802154_header_create(struct sk_buff *skb, 240static int mac802154_header_create(struct sk_buff *skb,
187 struct net_device *dev, 241 struct net_device *dev,
188 unsigned short type, 242 unsigned short type,
@@ -192,15 +246,20 @@ static int mac802154_header_create(struct sk_buff *skb,
192{ 246{
193 struct ieee802154_hdr hdr; 247 struct ieee802154_hdr hdr;
194 struct mac802154_sub_if_data *priv = netdev_priv(dev); 248 struct mac802154_sub_if_data *priv = netdev_priv(dev);
249 struct ieee802154_mac_cb *cb = mac_cb(skb);
195 int hlen; 250 int hlen;
196 251
197 if (!daddr) 252 if (!daddr)
198 return -EINVAL; 253 return -EINVAL;
199 254
200 memset(&hdr.fc, 0, sizeof(hdr.fc)); 255 memset(&hdr.fc, 0, sizeof(hdr.fc));
201 hdr.fc.type = mac_cb_type(skb); 256 hdr.fc.type = cb->type;
202 hdr.fc.security_enabled = mac_cb_is_secen(skb); 257 hdr.fc.security_enabled = cb->secen;
203 hdr.fc.ack_request = mac_cb_is_ackreq(skb); 258 hdr.fc.ack_request = cb->ackreq;
259 hdr.seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
260
261 if (mac802154_set_header_security(priv, &hdr, cb) < 0)
262 return -EINVAL;
204 263
205 if (!saddr) { 264 if (!saddr) {
206 spin_lock_bh(&priv->mib_lock); 265 spin_lock_bh(&priv->mib_lock);
@@ -231,7 +290,7 @@ static int mac802154_header_create(struct sk_buff *skb,
231 skb_reset_mac_header(skb); 290 skb_reset_mac_header(skb);
232 skb->mac_len = hlen; 291 skb->mac_len = hlen;
233 292
234 if (hlen + len + 2 > dev->mtu) 293 if (len > ieee802154_max_payload(&hdr))
235 return -EMSGSIZE; 294 return -EMSGSIZE;
236 295
237 return hlen; 296 return hlen;
@@ -257,6 +316,7 @@ mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev)
257{ 316{
258 struct mac802154_sub_if_data *priv; 317 struct mac802154_sub_if_data *priv;
259 u8 chan, page; 318 u8 chan, page;
319 int rc;
260 320
261 priv = netdev_priv(dev); 321 priv = netdev_priv(dev);
262 322
@@ -272,6 +332,13 @@ mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev)
272 return NETDEV_TX_OK; 332 return NETDEV_TX_OK;
273 } 333 }
274 334
335 rc = mac802154_llsec_encrypt(&priv->sec, skb);
336 if (rc) {
337 pr_warn("encryption failed: %i\n", rc);
338 kfree_skb(skb);
339 return NETDEV_TX_OK;
340 }
341
275 skb->skb_iif = dev->ifindex; 342 skb->skb_iif = dev->ifindex;
276 dev->stats.tx_packets++; 343 dev->stats.tx_packets++;
277 dev->stats.tx_bytes += skb->len; 344 dev->stats.tx_bytes += skb->len;
@@ -292,6 +359,15 @@ static const struct net_device_ops mac802154_wpan_ops = {
292 .ndo_set_mac_address = mac802154_wpan_mac_addr, 359 .ndo_set_mac_address = mac802154_wpan_mac_addr,
293}; 360};
294 361
362static void mac802154_wpan_free(struct net_device *dev)
363{
364 struct mac802154_sub_if_data *priv = netdev_priv(dev);
365
366 mac802154_llsec_destroy(&priv->sec);
367
368 free_netdev(dev);
369}
370
295void mac802154_wpan_setup(struct net_device *dev) 371void mac802154_wpan_setup(struct net_device *dev)
296{ 372{
297 struct mac802154_sub_if_data *priv; 373 struct mac802154_sub_if_data *priv;
@@ -301,14 +377,14 @@ void mac802154_wpan_setup(struct net_device *dev)
301 377
302 dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN; 378 dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN;
303 dev->header_ops = &mac802154_header_ops; 379 dev->header_ops = &mac802154_header_ops;
304 dev->needed_tailroom = 2; /* FCS */ 380 dev->needed_tailroom = 2 + 16; /* FCS + MIC */
305 dev->mtu = IEEE802154_MTU; 381 dev->mtu = IEEE802154_MTU;
306 dev->tx_queue_len = 300; 382 dev->tx_queue_len = 300;
307 dev->type = ARPHRD_IEEE802154; 383 dev->type = ARPHRD_IEEE802154;
308 dev->flags = IFF_NOARP | IFF_BROADCAST; 384 dev->flags = IFF_NOARP | IFF_BROADCAST;
309 dev->watchdog_timeo = 0; 385 dev->watchdog_timeo = 0;
310 386
311 dev->destructor = free_netdev; 387 dev->destructor = mac802154_wpan_free;
312 dev->netdev_ops = &mac802154_wpan_ops; 388 dev->netdev_ops = &mac802154_wpan_ops;
313 dev->ml_priv = &mac802154_mlme_wpan; 389 dev->ml_priv = &mac802154_mlme_wpan;
314 390
@@ -319,6 +395,7 @@ void mac802154_wpan_setup(struct net_device *dev)
319 priv->page = 0; 395 priv->page = 0;
320 396
321 spin_lock_init(&priv->mib_lock); 397 spin_lock_init(&priv->mib_lock);
398 mutex_init(&priv->sec_mtx);
322 399
323 get_random_bytes(&priv->bsn, 1); 400 get_random_bytes(&priv->bsn, 1);
324 get_random_bytes(&priv->dsn, 1); 401 get_random_bytes(&priv->dsn, 1);
@@ -331,6 +408,8 @@ void mac802154_wpan_setup(struct net_device *dev)
331 408
332 priv->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST); 409 priv->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
333 priv->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); 410 priv->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
411
412 mac802154_llsec_init(&priv->sec);
334} 413}
335 414
336static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb) 415static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
@@ -339,9 +418,11 @@ static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
339} 418}
340 419
341static int 420static int
342mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb) 421mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb,
422 const struct ieee802154_hdr *hdr)
343{ 423{
344 __le16 span, sshort; 424 __le16 span, sshort;
425 int rc;
345 426
346 pr_debug("getting packet via slave interface %s\n", sdata->dev->name); 427 pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
347 428
@@ -388,15 +469,22 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb)
388 469
389 skb->dev = sdata->dev; 470 skb->dev = sdata->dev;
390 471
472 rc = mac802154_llsec_decrypt(&sdata->sec, skb);
473 if (rc) {
474 pr_debug("decryption failed: %i\n", rc);
475 kfree_skb(skb);
476 return NET_RX_DROP;
477 }
478
391 sdata->dev->stats.rx_packets++; 479 sdata->dev->stats.rx_packets++;
392 sdata->dev->stats.rx_bytes += skb->len; 480 sdata->dev->stats.rx_bytes += skb->len;
393 481
394 switch (mac_cb_type(skb)) { 482 switch (mac_cb(skb)->type) {
395 case IEEE802154_FC_TYPE_DATA: 483 case IEEE802154_FC_TYPE_DATA:
396 return mac802154_process_data(sdata->dev, skb); 484 return mac802154_process_data(sdata->dev, skb);
397 default: 485 default:
398 pr_warn("ieee802154: bad frame received (type = %d)\n", 486 pr_warn("ieee802154: bad frame received (type = %d)\n",
399 mac_cb_type(skb)); 487 mac_cb(skb)->type);
400 kfree_skb(skb); 488 kfree_skb(skb);
401 return NET_RX_DROP; 489 return NET_RX_DROP;
402 } 490 }
@@ -419,62 +507,58 @@ static void mac802154_print_addr(const char *name,
419 } 507 }
420} 508}
421 509
422static int mac802154_parse_frame_start(struct sk_buff *skb) 510static int mac802154_parse_frame_start(struct sk_buff *skb,
511 struct ieee802154_hdr *hdr)
423{ 512{
424 int hlen; 513 int hlen;
425 struct ieee802154_hdr hdr; 514 struct ieee802154_mac_cb *cb = mac_cb_init(skb);
426 515
427 hlen = ieee802154_hdr_pull(skb, &hdr); 516 hlen = ieee802154_hdr_pull(skb, hdr);
428 if (hlen < 0) 517 if (hlen < 0)
429 return -EINVAL; 518 return -EINVAL;
430 519
431 skb->mac_len = hlen; 520 skb->mac_len = hlen;
432 521
433 pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr.fc), 522 pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr->fc),
434 hdr.seq); 523 hdr->seq);
435
436 mac_cb(skb)->flags = hdr.fc.type;
437 524
438 if (hdr.fc.ack_request) 525 cb->type = hdr->fc.type;
439 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ; 526 cb->ackreq = hdr->fc.ack_request;
440 if (hdr.fc.security_enabled) 527 cb->secen = hdr->fc.security_enabled;
441 mac_cb(skb)->flags |= MAC_CB_FLAG_SECEN;
442 528
443 mac802154_print_addr("destination", &hdr.dest); 529 mac802154_print_addr("destination", &hdr->dest);
444 mac802154_print_addr("source", &hdr.source); 530 mac802154_print_addr("source", &hdr->source);
445 531
446 mac_cb(skb)->source = hdr.source; 532 cb->source = hdr->source;
447 mac_cb(skb)->dest = hdr.dest; 533 cb->dest = hdr->dest;
448 534
449 if (hdr.fc.security_enabled) { 535 if (hdr->fc.security_enabled) {
450 u64 key; 536 u64 key;
451 537
452 pr_debug("seclevel %i\n", hdr.sec.level); 538 pr_debug("seclevel %i\n", hdr->sec.level);
453 539
454 switch (hdr.sec.key_id_mode) { 540 switch (hdr->sec.key_id_mode) {
455 case IEEE802154_SCF_KEY_IMPLICIT: 541 case IEEE802154_SCF_KEY_IMPLICIT:
456 pr_debug("implicit key\n"); 542 pr_debug("implicit key\n");
457 break; 543 break;
458 544
459 case IEEE802154_SCF_KEY_INDEX: 545 case IEEE802154_SCF_KEY_INDEX:
460 pr_debug("key %02x\n", hdr.sec.key_id); 546 pr_debug("key %02x\n", hdr->sec.key_id);
461 break; 547 break;
462 548
463 case IEEE802154_SCF_KEY_SHORT_INDEX: 549 case IEEE802154_SCF_KEY_SHORT_INDEX:
464 pr_debug("key %04x:%04x %02x\n", 550 pr_debug("key %04x:%04x %02x\n",
465 le32_to_cpu(hdr.sec.short_src) >> 16, 551 le32_to_cpu(hdr->sec.short_src) >> 16,
466 le32_to_cpu(hdr.sec.short_src) & 0xffff, 552 le32_to_cpu(hdr->sec.short_src) & 0xffff,
467 hdr.sec.key_id); 553 hdr->sec.key_id);
468 break; 554 break;
469 555
470 case IEEE802154_SCF_KEY_HW_INDEX: 556 case IEEE802154_SCF_KEY_HW_INDEX:
471 key = swab64((__force u64) hdr.sec.extended_src); 557 key = swab64((__force u64) hdr->sec.extended_src);
472 pr_debug("key source %8phC %02x\n", &key, 558 pr_debug("key source %8phC %02x\n", &key,
473 hdr.sec.key_id); 559 hdr->sec.key_id);
474 break; 560 break;
475 } 561 }
476
477 return -EINVAL;
478 } 562 }
479 563
480 return 0; 564 return 0;
@@ -483,10 +567,10 @@ static int mac802154_parse_frame_start(struct sk_buff *skb)
483void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb) 567void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
484{ 568{
485 int ret; 569 int ret;
486 struct sk_buff *sskb;
487 struct mac802154_sub_if_data *sdata; 570 struct mac802154_sub_if_data *sdata;
571 struct ieee802154_hdr hdr;
488 572
489 ret = mac802154_parse_frame_start(skb); 573 ret = mac802154_parse_frame_start(skb, &hdr);
490 if (ret) { 574 if (ret) {
491 pr_debug("got invalid frame\n"); 575 pr_debug("got invalid frame\n");
492 return; 576 return;
@@ -494,12 +578,16 @@ void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
494 578
495 rcu_read_lock(); 579 rcu_read_lock();
496 list_for_each_entry_rcu(sdata, &priv->slaves, list) { 580 list_for_each_entry_rcu(sdata, &priv->slaves, list) {
497 if (sdata->type != IEEE802154_DEV_WPAN) 581 if (sdata->type != IEEE802154_DEV_WPAN ||
582 !netif_running(sdata->dev))
498 continue; 583 continue;
499 584
500 sskb = skb_clone(skb, GFP_ATOMIC); 585 mac802154_subif_frame(sdata, skb, &hdr);
501 if (sskb) 586 skb = NULL;
502 mac802154_subif_frame(sdata, sskb); 587 break;
503 } 588 }
504 rcu_read_unlock(); 589 rcu_read_unlock();
590
591 if (skb)
592 kfree_skb(skb);
505} 593}
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 851cd880b0c0..6b38d083e1c9 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -33,6 +33,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
33 SKB_GSO_DODGY | 33 SKB_GSO_DODGY |
34 SKB_GSO_TCP_ECN | 34 SKB_GSO_TCP_ECN |
35 SKB_GSO_GRE | 35 SKB_GSO_GRE |
36 SKB_GSO_GRE_CSUM |
36 SKB_GSO_IPIP | 37 SKB_GSO_IPIP |
37 SKB_GSO_MPLS))) 38 SKB_GSO_MPLS)))
38 goto out; 39 goto out;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 117208321f16..ec8114fae50b 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -271,10 +271,7 @@ ip_set_free(void *members)
271{ 271{
272 pr_debug("%p: free with %s\n", members, 272 pr_debug("%p: free with %s\n", members,
273 is_vmalloc_addr(members) ? "vfree" : "kfree"); 273 is_vmalloc_addr(members) ? "vfree" : "kfree");
274 if (is_vmalloc_addr(members)) 274 kvfree(members);
275 vfree(members);
276 else
277 kfree(members);
278} 275}
279EXPORT_SYMBOL_GPL(ip_set_free); 276EXPORT_SYMBOL_GPL(ip_set_free);
280 277
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 3d2d2c8108ca..e6836755c45d 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -97,7 +97,7 @@ const char *ip_vs_proto_name(unsigned int proto)
97 return "ICMPv6"; 97 return "ICMPv6";
98#endif 98#endif
99 default: 99 default:
100 sprintf(buf, "IP_%d", proto); 100 sprintf(buf, "IP_%u", proto);
101 return buf; 101 return buf;
102 } 102 }
103} 103}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index c47444e4cf8c..73ba1cc7a88d 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -562,7 +562,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
562 ip_send_check(iph); 562 ip_send_check(iph);
563 563
564 /* Another hack: avoid icmp_send in ip_fragment */ 564 /* Another hack: avoid icmp_send in ip_fragment */
565 skb->local_df = 1; 565 skb->ignore_df = 1;
566 566
567 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); 567 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
568 rcu_read_unlock(); 568 rcu_read_unlock();
@@ -590,7 +590,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
590 goto tx_error; 590 goto tx_error;
591 591
592 /* Another hack: avoid icmp_send in ip_fragment */ 592 /* Another hack: avoid icmp_send in ip_fragment */
593 skb->local_df = 1; 593 skb->ignore_df = 1;
594 594
595 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0); 595 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
596 rcu_read_unlock(); 596 rcu_read_unlock();
@@ -684,7 +684,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
684 MTU problem. */ 684 MTU problem. */
685 685
686 /* Another hack: avoid icmp_send in ip_fragment */ 686 /* Another hack: avoid icmp_send in ip_fragment */
687 skb->local_df = 1; 687 skb->ignore_df = 1;
688 688
689 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); 689 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
690 rcu_read_unlock(); 690 rcu_read_unlock();
@@ -774,7 +774,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
774 MTU problem. */ 774 MTU problem. */
775 775
776 /* Another hack: avoid icmp_send in ip_fragment */ 776 /* Another hack: avoid icmp_send in ip_fragment */
777 skb->local_df = 1; 777 skb->ignore_df = 1;
778 778
779 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); 779 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
780 rcu_read_unlock(); 780 rcu_read_unlock();
@@ -883,10 +883,10 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
883 iph->daddr = cp->daddr.ip; 883 iph->daddr = cp->daddr.ip;
884 iph->saddr = saddr; 884 iph->saddr = saddr;
885 iph->ttl = old_iph->ttl; 885 iph->ttl = old_iph->ttl;
886 ip_select_ident(skb, &rt->dst, NULL); 886 ip_select_ident(skb, NULL);
887 887
888 /* Another hack: avoid icmp_send in ip_fragment */ 888 /* Another hack: avoid icmp_send in ip_fragment */
889 skb->local_df = 1; 889 skb->ignore_df = 1;
890 890
891 ret = ip_vs_tunnel_xmit_prepare(skb, cp); 891 ret = ip_vs_tunnel_xmit_prepare(skb, cp);
892 if (ret == NF_ACCEPT) 892 if (ret == NF_ACCEPT)
@@ -974,7 +974,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
974 iph->hop_limit = old_iph->hop_limit; 974 iph->hop_limit = old_iph->hop_limit;
975 975
976 /* Another hack: avoid icmp_send in ip_fragment */ 976 /* Another hack: avoid icmp_send in ip_fragment */
977 skb->local_df = 1; 977 skb->ignore_df = 1;
978 978
979 ret = ip_vs_tunnel_xmit_prepare(skb, cp); 979 ret = ip_vs_tunnel_xmit_prepare(skb, cp);
980 if (ret == NF_ACCEPT) 980 if (ret == NF_ACCEPT)
@@ -1023,7 +1023,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1023 ip_send_check(ip_hdr(skb)); 1023 ip_send_check(ip_hdr(skb));
1024 1024
1025 /* Another hack: avoid icmp_send in ip_fragment */ 1025 /* Another hack: avoid icmp_send in ip_fragment */
1026 skb->local_df = 1; 1026 skb->ignore_df = 1;
1027 1027
1028 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); 1028 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
1029 rcu_read_unlock(); 1029 rcu_read_unlock();
@@ -1060,7 +1060,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1060 } 1060 }
1061 1061
1062 /* Another hack: avoid icmp_send in ip_fragment */ 1062 /* Another hack: avoid icmp_send in ip_fragment */
1063 skb->local_df = 1; 1063 skb->ignore_df = 1;
1064 1064
1065 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0); 1065 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
1066 rcu_read_unlock(); 1066 rcu_read_unlock();
@@ -1157,7 +1157,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1157 ip_vs_nat_icmp(skb, pp, cp, 0); 1157 ip_vs_nat_icmp(skb, pp, cp, 0);
1158 1158
1159 /* Another hack: avoid icmp_send in ip_fragment */ 1159 /* Another hack: avoid icmp_send in ip_fragment */
1160 skb->local_df = 1; 1160 skb->ignore_df = 1;
1161 1161
1162 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); 1162 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
1163 rcu_read_unlock(); 1163 rcu_read_unlock();
@@ -1249,7 +1249,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1249 ip_vs_nat_icmp_v6(skb, pp, cp, 0); 1249 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
1250 1250
1251 /* Another hack: avoid icmp_send in ip_fragment */ 1251 /* Another hack: avoid icmp_send in ip_fragment */
1252 skb->local_df = 1; 1252 skb->ignore_df = 1;
1253 1253
1254 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); 1254 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
1255 rcu_read_unlock(); 1255 rcu_read_unlock();
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 52ca952b802c..09096a670c45 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -358,6 +358,19 @@ out:
358 rcu_read_unlock(); 358 rcu_read_unlock();
359} 359}
360 360
361struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
362{
363 struct nf_conn_nat *nat = nfct_nat(ct);
364 if (nat)
365 return nat;
366
367 if (!nf_ct_is_confirmed(ct))
368 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
369
370 return nat;
371}
372EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
373
361unsigned int 374unsigned int
362nf_nat_setup_info(struct nf_conn *ct, 375nf_nat_setup_info(struct nf_conn *ct,
363 const struct nf_nat_range *range, 376 const struct nf_nat_range *range,
@@ -368,14 +381,9 @@ nf_nat_setup_info(struct nf_conn *ct,
368 struct nf_conn_nat *nat; 381 struct nf_conn_nat *nat;
369 382
370 /* nat helper or nfctnetlink also setup binding */ 383 /* nat helper or nfctnetlink also setup binding */
371 nat = nfct_nat(ct); 384 nat = nf_ct_nat_ext_add(ct);
372 if (!nat) { 385 if (nat == NULL)
373 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); 386 return NF_ACCEPT;
374 if (nat == NULL) {
375 pr_debug("failed to add NAT extension\n");
376 return NF_ACCEPT;
377 }
378 }
379 387
380 NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || 388 NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
381 maniptype == NF_NAT_MANIP_DST); 389 maniptype == NF_NAT_MANIP_DST);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 3fd159db9f06..624e083125b9 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -88,6 +88,45 @@ nf_tables_afinfo_lookup(struct net *net, int family, bool autoload)
88 return ERR_PTR(-EAFNOSUPPORT); 88 return ERR_PTR(-EAFNOSUPPORT);
89} 89}
90 90
91static void nft_ctx_init(struct nft_ctx *ctx,
92 const struct sk_buff *skb,
93 const struct nlmsghdr *nlh,
94 struct nft_af_info *afi,
95 struct nft_table *table,
96 struct nft_chain *chain,
97 const struct nlattr * const *nla)
98{
99 ctx->net = sock_net(skb->sk);
100 ctx->afi = afi;
101 ctx->table = table;
102 ctx->chain = chain;
103 ctx->nla = nla;
104 ctx->portid = NETLINK_CB(skb).portid;
105 ctx->report = nlmsg_report(nlh);
106 ctx->seq = nlh->nlmsg_seq;
107}
108
109static struct nft_trans *nft_trans_alloc(struct nft_ctx *ctx, int msg_type,
110 u32 size)
111{
112 struct nft_trans *trans;
113
114 trans = kzalloc(sizeof(struct nft_trans) + size, GFP_KERNEL);
115 if (trans == NULL)
116 return NULL;
117
118 trans->msg_type = msg_type;
119 trans->ctx = *ctx;
120
121 return trans;
122}
123
124static void nft_trans_destroy(struct nft_trans *trans)
125{
126 list_del(&trans->list);
127 kfree(trans);
128}
129
91/* 130/*
92 * Tables 131 * Tables
93 */ 132 */
@@ -197,20 +236,13 @@ nla_put_failure:
197 return -1; 236 return -1;
198} 237}
199 238
200static int nf_tables_table_notify(const struct sk_buff *oskb, 239static int nf_tables_table_notify(const struct nft_ctx *ctx, int event)
201 const struct nlmsghdr *nlh,
202 const struct nft_table *table,
203 int event, int family)
204{ 240{
205 struct sk_buff *skb; 241 struct sk_buff *skb;
206 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
207 u32 seq = nlh ? nlh->nlmsg_seq : 0;
208 struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
209 bool report;
210 int err; 242 int err;
211 243
212 report = nlh ? nlmsg_report(nlh) : false; 244 if (!ctx->report &&
213 if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) 245 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
214 return 0; 246 return 0;
215 247
216 err = -ENOBUFS; 248 err = -ENOBUFS;
@@ -218,18 +250,20 @@ static int nf_tables_table_notify(const struct sk_buff *oskb,
218 if (skb == NULL) 250 if (skb == NULL)
219 goto err; 251 goto err;
220 252
221 err = nf_tables_fill_table_info(skb, portid, seq, event, 0, 253 err = nf_tables_fill_table_info(skb, ctx->portid, ctx->seq, event, 0,
222 family, table); 254 ctx->afi->family, ctx->table);
223 if (err < 0) { 255 if (err < 0) {
224 kfree_skb(skb); 256 kfree_skb(skb);
225 goto err; 257 goto err;
226 } 258 }
227 259
228 err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, 260 err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
229 GFP_KERNEL); 261 ctx->report, GFP_KERNEL);
230err: 262err:
231 if (err < 0) 263 if (err < 0) {
232 nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err); 264 nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
265 err);
266 }
233 return err; 267 return err;
234} 268}
235 269
@@ -269,6 +303,9 @@ done:
269 return skb->len; 303 return skb->len;
270} 304}
271 305
306/* Internal table flags */
307#define NFT_TABLE_INACTIVE (1 << 15)
308
272static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb, 309static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
273 const struct nlmsghdr *nlh, 310 const struct nlmsghdr *nlh,
274 const struct nlattr * const nla[]) 311 const struct nlattr * const nla[])
@@ -295,6 +332,8 @@ static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
295 table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]); 332 table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
296 if (IS_ERR(table)) 333 if (IS_ERR(table))
297 return PTR_ERR(table); 334 return PTR_ERR(table);
335 if (table->flags & NFT_TABLE_INACTIVE)
336 return -ENOENT;
298 337
299 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 338 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
300 if (!skb2) 339 if (!skb2)
@@ -343,7 +382,7 @@ err:
343 return err; 382 return err;
344} 383}
345 384
346static int nf_tables_table_disable(const struct nft_af_info *afi, 385static void nf_tables_table_disable(const struct nft_af_info *afi,
347 struct nft_table *table) 386 struct nft_table *table)
348{ 387{
349 struct nft_chain *chain; 388 struct nft_chain *chain;
@@ -353,45 +392,63 @@ static int nf_tables_table_disable(const struct nft_af_info *afi,
353 nf_unregister_hooks(nft_base_chain(chain)->ops, 392 nf_unregister_hooks(nft_base_chain(chain)->ops,
354 afi->nops); 393 afi->nops);
355 } 394 }
356
357 return 0;
358} 395}
359 396
360static int nf_tables_updtable(struct sock *nlsk, struct sk_buff *skb, 397static int nf_tables_updtable(struct nft_ctx *ctx)
361 const struct nlmsghdr *nlh,
362 const struct nlattr * const nla[],
363 struct nft_af_info *afi, struct nft_table *table)
364{ 398{
365 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 399 struct nft_trans *trans;
366 int family = nfmsg->nfgen_family, ret = 0; 400 u32 flags;
401 int ret = 0;
367 402
368 if (nla[NFTA_TABLE_FLAGS]) { 403 if (!ctx->nla[NFTA_TABLE_FLAGS])
369 u32 flags; 404 return 0;
370 405
371 flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS])); 406 flags = ntohl(nla_get_be32(ctx->nla[NFTA_TABLE_FLAGS]));
372 if (flags & ~NFT_TABLE_F_DORMANT) 407 if (flags & ~NFT_TABLE_F_DORMANT)
373 return -EINVAL; 408 return -EINVAL;
409
410 trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
411 sizeof(struct nft_trans_table));
412 if (trans == NULL)
413 return -ENOMEM;
374 414
375 if ((flags & NFT_TABLE_F_DORMANT) && 415 if ((flags & NFT_TABLE_F_DORMANT) &&
376 !(table->flags & NFT_TABLE_F_DORMANT)) { 416 !(ctx->table->flags & NFT_TABLE_F_DORMANT)) {
377 ret = nf_tables_table_disable(afi, table); 417 nft_trans_table_enable(trans) = false;
378 if (ret >= 0) 418 } else if (!(flags & NFT_TABLE_F_DORMANT) &&
379 table->flags |= NFT_TABLE_F_DORMANT; 419 ctx->table->flags & NFT_TABLE_F_DORMANT) {
380 } else if (!(flags & NFT_TABLE_F_DORMANT) && 420 ret = nf_tables_table_enable(ctx->afi, ctx->table);
381 table->flags & NFT_TABLE_F_DORMANT) { 421 if (ret >= 0) {
382 ret = nf_tables_table_enable(afi, table); 422 ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
383 if (ret >= 0) 423 nft_trans_table_enable(trans) = true;
384 table->flags &= ~NFT_TABLE_F_DORMANT;
385 } 424 }
386 if (ret < 0)
387 goto err;
388 } 425 }
426 if (ret < 0)
427 goto err;
389 428
390 nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family); 429 nft_trans_table_update(trans) = true;
430 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
431 return 0;
391err: 432err:
433 nft_trans_destroy(trans);
392 return ret; 434 return ret;
393} 435}
394 436
437static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
438{
439 struct nft_trans *trans;
440
441 trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_table));
442 if (trans == NULL)
443 return -ENOMEM;
444
445 if (msg_type == NFT_MSG_NEWTABLE)
446 ctx->table->flags |= NFT_TABLE_INACTIVE;
447
448 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
449 return 0;
450}
451
395static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, 452static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
396 const struct nlmsghdr *nlh, 453 const struct nlmsghdr *nlh,
397 const struct nlattr * const nla[]) 454 const struct nlattr * const nla[])
@@ -403,6 +460,8 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
403 struct net *net = sock_net(skb->sk); 460 struct net *net = sock_net(skb->sk);
404 int family = nfmsg->nfgen_family; 461 int family = nfmsg->nfgen_family;
405 u32 flags = 0; 462 u32 flags = 0;
463 struct nft_ctx ctx;
464 int err;
406 465
407 afi = nf_tables_afinfo_lookup(net, family, true); 466 afi = nf_tables_afinfo_lookup(net, family, true);
408 if (IS_ERR(afi)) 467 if (IS_ERR(afi))
@@ -417,11 +476,15 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
417 } 476 }
418 477
419 if (table != NULL) { 478 if (table != NULL) {
479 if (table->flags & NFT_TABLE_INACTIVE)
480 return -ENOENT;
420 if (nlh->nlmsg_flags & NLM_F_EXCL) 481 if (nlh->nlmsg_flags & NLM_F_EXCL)
421 return -EEXIST; 482 return -EEXIST;
422 if (nlh->nlmsg_flags & NLM_F_REPLACE) 483 if (nlh->nlmsg_flags & NLM_F_REPLACE)
423 return -EOPNOTSUPP; 484 return -EOPNOTSUPP;
424 return nf_tables_updtable(nlsk, skb, nlh, nla, afi, table); 485
486 nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
487 return nf_tables_updtable(&ctx);
425 } 488 }
426 489
427 if (nla[NFTA_TABLE_FLAGS]) { 490 if (nla[NFTA_TABLE_FLAGS]) {
@@ -444,8 +507,14 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
444 INIT_LIST_HEAD(&table->sets); 507 INIT_LIST_HEAD(&table->sets);
445 table->flags = flags; 508 table->flags = flags;
446 509
510 nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
511 err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
512 if (err < 0) {
513 kfree(table);
514 module_put(afi->owner);
515 return err;
516 }
447 list_add_tail(&table->list, &afi->tables); 517 list_add_tail(&table->list, &afi->tables);
448 nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
449 return 0; 518 return 0;
450} 519}
451 520
@@ -457,7 +526,8 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
457 struct nft_af_info *afi; 526 struct nft_af_info *afi;
458 struct nft_table *table; 527 struct nft_table *table;
459 struct net *net = sock_net(skb->sk); 528 struct net *net = sock_net(skb->sk);
460 int family = nfmsg->nfgen_family; 529 int family = nfmsg->nfgen_family, err;
530 struct nft_ctx ctx;
461 531
462 afi = nf_tables_afinfo_lookup(net, family, false); 532 afi = nf_tables_afinfo_lookup(net, family, false);
463 if (IS_ERR(afi)) 533 if (IS_ERR(afi))
@@ -466,17 +536,28 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
466 table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]); 536 table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
467 if (IS_ERR(table)) 537 if (IS_ERR(table))
468 return PTR_ERR(table); 538 return PTR_ERR(table);
469 539 if (table->flags & NFT_TABLE_INACTIVE)
470 if (!list_empty(&table->chains) || !list_empty(&table->sets)) 540 return -ENOENT;
541 if (table->use > 0)
471 return -EBUSY; 542 return -EBUSY;
472 543
544 nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
545 err = nft_trans_table_add(&ctx, NFT_MSG_DELTABLE);
546 if (err < 0)
547 return err;
548
473 list_del(&table->list); 549 list_del(&table->list);
474 nf_tables_table_notify(skb, nlh, table, NFT_MSG_DELTABLE, family);
475 kfree(table);
476 module_put(afi->owner);
477 return 0; 550 return 0;
478} 551}
479 552
553static void nf_tables_table_destroy(struct nft_ctx *ctx)
554{
555 BUG_ON(ctx->table->use > 0);
556
557 kfree(ctx->table);
558 module_put(ctx->afi->owner);
559}
560
480int nft_register_chain_type(const struct nf_chain_type *ctype) 561int nft_register_chain_type(const struct nf_chain_type *ctype)
481{ 562{
482 int err = 0; 563 int err = 0;
@@ -541,7 +622,7 @@ static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
541 .len = NFT_CHAIN_MAXNAMELEN - 1 }, 622 .len = NFT_CHAIN_MAXNAMELEN - 1 },
542 [NFTA_CHAIN_HOOK] = { .type = NLA_NESTED }, 623 [NFTA_CHAIN_HOOK] = { .type = NLA_NESTED },
543 [NFTA_CHAIN_POLICY] = { .type = NLA_U32 }, 624 [NFTA_CHAIN_POLICY] = { .type = NLA_U32 },
544 [NFTA_CHAIN_TYPE] = { .type = NLA_NUL_STRING }, 625 [NFTA_CHAIN_TYPE] = { .type = NLA_STRING },
545 [NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED }, 626 [NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED },
546}; 627};
547 628
@@ -637,21 +718,13 @@ nla_put_failure:
637 return -1; 718 return -1;
638} 719}
639 720
640static int nf_tables_chain_notify(const struct sk_buff *oskb, 721static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
641 const struct nlmsghdr *nlh,
642 const struct nft_table *table,
643 const struct nft_chain *chain,
644 int event, int family)
645{ 722{
646 struct sk_buff *skb; 723 struct sk_buff *skb;
647 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
648 struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
649 u32 seq = nlh ? nlh->nlmsg_seq : 0;
650 bool report;
651 int err; 724 int err;
652 725
653 report = nlh ? nlmsg_report(nlh) : false; 726 if (!ctx->report &&
654 if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) 727 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
655 return 0; 728 return 0;
656 729
657 err = -ENOBUFS; 730 err = -ENOBUFS;
@@ -659,18 +732,21 @@ static int nf_tables_chain_notify(const struct sk_buff *oskb,
659 if (skb == NULL) 732 if (skb == NULL)
660 goto err; 733 goto err;
661 734
662 err = nf_tables_fill_chain_info(skb, portid, seq, event, 0, family, 735 err = nf_tables_fill_chain_info(skb, ctx->portid, ctx->seq, event, 0,
663 table, chain); 736 ctx->afi->family, ctx->table,
737 ctx->chain);
664 if (err < 0) { 738 if (err < 0) {
665 kfree_skb(skb); 739 kfree_skb(skb);
666 goto err; 740 goto err;
667 } 741 }
668 742
669 err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, 743 err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
670 GFP_KERNEL); 744 ctx->report, GFP_KERNEL);
671err: 745err:
672 if (err < 0) 746 if (err < 0) {
673 nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err); 747 nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
748 err);
749 }
674 return err; 750 return err;
675} 751}
676 752
@@ -740,10 +816,14 @@ static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
740 table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); 816 table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
741 if (IS_ERR(table)) 817 if (IS_ERR(table))
742 return PTR_ERR(table); 818 return PTR_ERR(table);
819 if (table->flags & NFT_TABLE_INACTIVE)
820 return -ENOENT;
743 821
744 chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]); 822 chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
745 if (IS_ERR(chain)) 823 if (IS_ERR(chain))
746 return PTR_ERR(chain); 824 return PTR_ERR(chain);
825 if (chain->flags & NFT_CHAIN_INACTIVE)
826 return -ENOENT;
747 827
748 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 828 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
749 if (!skb2) 829 if (!skb2)
@@ -767,8 +847,7 @@ static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
767 [NFTA_COUNTER_BYTES] = { .type = NLA_U64 }, 847 [NFTA_COUNTER_BYTES] = { .type = NLA_U64 },
768}; 848};
769 849
770static int 850static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
771nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
772{ 851{
773 struct nlattr *tb[NFTA_COUNTER_MAX+1]; 852 struct nlattr *tb[NFTA_COUNTER_MAX+1];
774 struct nft_stats __percpu *newstats; 853 struct nft_stats __percpu *newstats;
@@ -777,14 +856,14 @@ nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
777 856
778 err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy); 857 err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy);
779 if (err < 0) 858 if (err < 0)
780 return err; 859 return ERR_PTR(err);
781 860
782 if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS]) 861 if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
783 return -EINVAL; 862 return ERR_PTR(-EINVAL);
784 863
785 newstats = alloc_percpu(struct nft_stats); 864 newstats = alloc_percpu(struct nft_stats);
786 if (newstats == NULL) 865 if (newstats == NULL)
787 return -ENOMEM; 866 return ERR_PTR(-ENOMEM);
788 867
789 /* Restore old counters on this cpu, no problem. Per-cpu statistics 868 /* Restore old counters on this cpu, no problem. Per-cpu statistics
790 * are not exposed to userspace. 869 * are not exposed to userspace.
@@ -793,6 +872,12 @@ nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
793 stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); 872 stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
794 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); 873 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
795 874
875 return newstats;
876}
877
878static void nft_chain_stats_replace(struct nft_base_chain *chain,
879 struct nft_stats __percpu *newstats)
880{
796 if (chain->stats) { 881 if (chain->stats) {
797 struct nft_stats __percpu *oldstats = 882 struct nft_stats __percpu *oldstats =
798 nft_dereference(chain->stats); 883 nft_dereference(chain->stats);
@@ -802,17 +887,43 @@ nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
802 free_percpu(oldstats); 887 free_percpu(oldstats);
803 } else 888 } else
804 rcu_assign_pointer(chain->stats, newstats); 889 rcu_assign_pointer(chain->stats, newstats);
890}
891
892static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
893{
894 struct nft_trans *trans;
805 895
896 trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain));
897 if (trans == NULL)
898 return -ENOMEM;
899
900 if (msg_type == NFT_MSG_NEWCHAIN)
901 ctx->chain->flags |= NFT_CHAIN_INACTIVE;
902
903 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
806 return 0; 904 return 0;
807} 905}
808 906
907static void nf_tables_chain_destroy(struct nft_chain *chain)
908{
909 BUG_ON(chain->use > 0);
910
911 if (chain->flags & NFT_BASE_CHAIN) {
912 module_put(nft_base_chain(chain)->type->owner);
913 free_percpu(nft_base_chain(chain)->stats);
914 kfree(nft_base_chain(chain));
915 } else {
916 kfree(chain);
917 }
918}
919
809static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, 920static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
810 const struct nlmsghdr *nlh, 921 const struct nlmsghdr *nlh,
811 const struct nlattr * const nla[]) 922 const struct nlattr * const nla[])
812{ 923{
813 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 924 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
814 const struct nlattr * uninitialized_var(name); 925 const struct nlattr * uninitialized_var(name);
815 const struct nft_af_info *afi; 926 struct nft_af_info *afi;
816 struct nft_table *table; 927 struct nft_table *table;
817 struct nft_chain *chain; 928 struct nft_chain *chain;
818 struct nft_base_chain *basechain = NULL; 929 struct nft_base_chain *basechain = NULL;
@@ -822,8 +933,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
822 u8 policy = NF_ACCEPT; 933 u8 policy = NF_ACCEPT;
823 u64 handle = 0; 934 u64 handle = 0;
824 unsigned int i; 935 unsigned int i;
936 struct nft_stats __percpu *stats;
825 int err; 937 int err;
826 bool create; 938 bool create;
939 struct nft_ctx ctx;
827 940
828 create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; 941 create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
829 942
@@ -869,6 +982,11 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
869 } 982 }
870 983
871 if (chain != NULL) { 984 if (chain != NULL) {
985 struct nft_stats *stats = NULL;
986 struct nft_trans *trans;
987
988 if (chain->flags & NFT_CHAIN_INACTIVE)
989 return -ENOENT;
872 if (nlh->nlmsg_flags & NLM_F_EXCL) 990 if (nlh->nlmsg_flags & NLM_F_EXCL)
873 return -EEXIST; 991 return -EEXIST;
874 if (nlh->nlmsg_flags & NLM_F_REPLACE) 992 if (nlh->nlmsg_flags & NLM_F_REPLACE)
@@ -882,19 +1000,31 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
882 if (!(chain->flags & NFT_BASE_CHAIN)) 1000 if (!(chain->flags & NFT_BASE_CHAIN))
883 return -EOPNOTSUPP; 1001 return -EOPNOTSUPP;
884 1002
885 err = nf_tables_counters(nft_base_chain(chain), 1003 stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
886 nla[NFTA_CHAIN_COUNTERS]); 1004 if (IS_ERR(stats))
887 if (err < 0) 1005 return PTR_ERR(stats);
888 return err;
889 } 1006 }
890 1007
891 if (nla[NFTA_CHAIN_POLICY]) 1008 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
892 nft_base_chain(chain)->policy = policy; 1009 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
1010 sizeof(struct nft_trans_chain));
1011 if (trans == NULL)
1012 return -ENOMEM;
1013
1014 nft_trans_chain_stats(trans) = stats;
1015 nft_trans_chain_update(trans) = true;
893 1016
894 if (nla[NFTA_CHAIN_HANDLE] && name) 1017 if (nla[NFTA_CHAIN_POLICY])
895 nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN); 1018 nft_trans_chain_policy(trans) = policy;
1019 else
1020 nft_trans_chain_policy(trans) = -1;
896 1021
897 goto notify; 1022 if (nla[NFTA_CHAIN_HANDLE] && name) {
1023 nla_strlcpy(nft_trans_chain_name(trans), name,
1024 NFT_CHAIN_MAXNAMELEN);
1025 }
1026 list_add_tail(&trans->list, &net->nft.commit_list);
1027 return 0;
898 } 1028 }
899 1029
900 if (table->use == UINT_MAX) 1030 if (table->use == UINT_MAX)
@@ -939,23 +1069,21 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
939 return -ENOMEM; 1069 return -ENOMEM;
940 1070
941 if (nla[NFTA_CHAIN_COUNTERS]) { 1071 if (nla[NFTA_CHAIN_COUNTERS]) {
942 err = nf_tables_counters(basechain, 1072 stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
943 nla[NFTA_CHAIN_COUNTERS]); 1073 if (IS_ERR(stats)) {
944 if (err < 0) {
945 module_put(type->owner); 1074 module_put(type->owner);
946 kfree(basechain); 1075 kfree(basechain);
947 return err; 1076 return PTR_ERR(stats);
948 } 1077 }
1078 basechain->stats = stats;
949 } else { 1079 } else {
950 struct nft_stats __percpu *newstats; 1080 stats = alloc_percpu(struct nft_stats);
951 1081 if (IS_ERR(stats)) {
952 newstats = alloc_percpu(struct nft_stats);
953 if (newstats == NULL) {
954 module_put(type->owner); 1082 module_put(type->owner);
955 kfree(basechain); 1083 kfree(basechain);
956 return -ENOMEM; 1084 return PTR_ERR(stats);
957 } 1085 }
958 rcu_assign_pointer(basechain->stats, newstats); 1086 rcu_assign_pointer(basechain->stats, stats);
959 } 1087 }
960 1088
961 basechain->type = type; 1089 basechain->type = type;
@@ -992,31 +1120,27 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
992 if (!(table->flags & NFT_TABLE_F_DORMANT) && 1120 if (!(table->flags & NFT_TABLE_F_DORMANT) &&
993 chain->flags & NFT_BASE_CHAIN) { 1121 chain->flags & NFT_BASE_CHAIN) {
994 err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops); 1122 err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops);
995 if (err < 0) { 1123 if (err < 0)
996 module_put(basechain->type->owner); 1124 goto err1;
997 free_percpu(basechain->stats);
998 kfree(basechain);
999 return err;
1000 }
1001 } 1125 }
1002 list_add_tail(&chain->list, &table->chains);
1003 table->use++;
1004notify:
1005 nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_NEWCHAIN,
1006 family);
1007 return 0;
1008}
1009 1126
1010static void nf_tables_chain_destroy(struct nft_chain *chain) 1127 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
1011{ 1128 err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN);
1012 BUG_ON(chain->use > 0); 1129 if (err < 0)
1130 goto err2;
1013 1131
1014 if (chain->flags & NFT_BASE_CHAIN) { 1132 table->use++;
1015 module_put(nft_base_chain(chain)->type->owner); 1133 list_add_tail(&chain->list, &table->chains);
1016 free_percpu(nft_base_chain(chain)->stats); 1134 return 0;
1017 kfree(nft_base_chain(chain)); 1135err2:
1018 } else 1136 if (!(table->flags & NFT_TABLE_F_DORMANT) &&
1019 kfree(chain); 1137 chain->flags & NFT_BASE_CHAIN) {
1138 nf_unregister_hooks(nft_base_chain(chain)->ops,
1139 afi->nops);
1140 }
1141err1:
1142 nf_tables_chain_destroy(chain);
1143 return err;
1020} 1144}
1021 1145
1022static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, 1146static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
@@ -1024,11 +1148,13 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1024 const struct nlattr * const nla[]) 1148 const struct nlattr * const nla[])
1025{ 1149{
1026 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1150 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1027 const struct nft_af_info *afi; 1151 struct nft_af_info *afi;
1028 struct nft_table *table; 1152 struct nft_table *table;
1029 struct nft_chain *chain; 1153 struct nft_chain *chain;
1030 struct net *net = sock_net(skb->sk); 1154 struct net *net = sock_net(skb->sk);
1031 int family = nfmsg->nfgen_family; 1155 int family = nfmsg->nfgen_family;
1156 struct nft_ctx ctx;
1157 int err;
1032 1158
1033 afi = nf_tables_afinfo_lookup(net, family, false); 1159 afi = nf_tables_afinfo_lookup(net, family, false);
1034 if (IS_ERR(afi)) 1160 if (IS_ERR(afi))
@@ -1037,48 +1163,27 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1037 table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); 1163 table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
1038 if (IS_ERR(table)) 1164 if (IS_ERR(table))
1039 return PTR_ERR(table); 1165 return PTR_ERR(table);
1166 if (table->flags & NFT_TABLE_INACTIVE)
1167 return -ENOENT;
1040 1168
1041 chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]); 1169 chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
1042 if (IS_ERR(chain)) 1170 if (IS_ERR(chain))
1043 return PTR_ERR(chain); 1171 return PTR_ERR(chain);
1044 1172 if (chain->flags & NFT_CHAIN_INACTIVE)
1045 if (!list_empty(&chain->rules) || chain->use > 0) 1173 return -ENOENT;
1174 if (chain->use > 0)
1046 return -EBUSY; 1175 return -EBUSY;
1047 1176
1048 list_del(&chain->list); 1177 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
1049 table->use--; 1178 err = nft_trans_chain_add(&ctx, NFT_MSG_DELCHAIN);
1050 1179 if (err < 0)
1051 if (!(table->flags & NFT_TABLE_F_DORMANT) && 1180 return err;
1052 chain->flags & NFT_BASE_CHAIN)
1053 nf_unregister_hooks(nft_base_chain(chain)->ops, afi->nops);
1054
1055 nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_DELCHAIN,
1056 family);
1057
1058 /* Make sure all rule references are gone before this is released */
1059 synchronize_rcu();
1060 1181
1061 nf_tables_chain_destroy(chain); 1182 table->use--;
1183 list_del(&chain->list);
1062 return 0; 1184 return 0;
1063} 1185}
1064 1186
1065static void nft_ctx_init(struct nft_ctx *ctx,
1066 const struct sk_buff *skb,
1067 const struct nlmsghdr *nlh,
1068 const struct nft_af_info *afi,
1069 const struct nft_table *table,
1070 const struct nft_chain *chain,
1071 const struct nlattr * const *nla)
1072{
1073 ctx->net = sock_net(skb->sk);
1074 ctx->skb = skb;
1075 ctx->nlh = nlh;
1076 ctx->afi = afi;
1077 ctx->table = table;
1078 ctx->chain = chain;
1079 ctx->nla = nla;
1080}
1081
1082/* 1187/*
1083 * Expressions 1188 * Expressions
1084 */ 1189 */
@@ -1093,7 +1198,10 @@ static void nft_ctx_init(struct nft_ctx *ctx,
1093int nft_register_expr(struct nft_expr_type *type) 1198int nft_register_expr(struct nft_expr_type *type)
1094{ 1199{
1095 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1200 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1096 list_add_tail(&type->list, &nf_tables_expressions); 1201 if (type->family == NFPROTO_UNSPEC)
1202 list_add_tail(&type->list, &nf_tables_expressions);
1203 else
1204 list_add(&type->list, &nf_tables_expressions);
1097 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1205 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1098 return 0; 1206 return 0;
1099} 1207}
@@ -1361,22 +1469,15 @@ nla_put_failure:
1361 return -1; 1469 return -1;
1362} 1470}
1363 1471
1364static int nf_tables_rule_notify(const struct sk_buff *oskb, 1472static int nf_tables_rule_notify(const struct nft_ctx *ctx,
1365 const struct nlmsghdr *nlh,
1366 const struct nft_table *table,
1367 const struct nft_chain *chain,
1368 const struct nft_rule *rule, 1473 const struct nft_rule *rule,
1369 int event, u32 flags, int family) 1474 int event)
1370{ 1475{
1371 struct sk_buff *skb; 1476 struct sk_buff *skb;
1372 u32 portid = NETLINK_CB(oskb).portid;
1373 struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
1374 u32 seq = nlh->nlmsg_seq;
1375 bool report;
1376 int err; 1477 int err;
1377 1478
1378 report = nlmsg_report(nlh); 1479 if (!ctx->report &&
1379 if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) 1480 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
1380 return 0; 1481 return 0;
1381 1482
1382 err = -ENOBUFS; 1483 err = -ENOBUFS;
@@ -1384,18 +1485,21 @@ static int nf_tables_rule_notify(const struct sk_buff *oskb,
1384 if (skb == NULL) 1485 if (skb == NULL)
1385 goto err; 1486 goto err;
1386 1487
1387 err = nf_tables_fill_rule_info(skb, portid, seq, event, flags, 1488 err = nf_tables_fill_rule_info(skb, ctx->portid, ctx->seq, event, 0,
1388 family, table, chain, rule); 1489 ctx->afi->family, ctx->table,
1490 ctx->chain, rule);
1389 if (err < 0) { 1491 if (err < 0) {
1390 kfree_skb(skb); 1492 kfree_skb(skb);
1391 goto err; 1493 goto err;
1392 } 1494 }
1393 1495
1394 err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, 1496 err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
1395 GFP_KERNEL); 1497 ctx->report, GFP_KERNEL);
1396err: 1498err:
1397 if (err < 0) 1499 if (err < 0) {
1398 nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err); 1500 nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
1501 err);
1502 }
1399 return err; 1503 return err;
1400} 1504}
1401 1505
@@ -1511,10 +1615,14 @@ static int nf_tables_getrule(struct sock *nlsk, struct sk_buff *skb,
1511 table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); 1615 table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
1512 if (IS_ERR(table)) 1616 if (IS_ERR(table))
1513 return PTR_ERR(table); 1617 return PTR_ERR(table);
1618 if (table->flags & NFT_TABLE_INACTIVE)
1619 return -ENOENT;
1514 1620
1515 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); 1621 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
1516 if (IS_ERR(chain)) 1622 if (IS_ERR(chain))
1517 return PTR_ERR(chain); 1623 return PTR_ERR(chain);
1624 if (chain->flags & NFT_CHAIN_INACTIVE)
1625 return -ENOENT;
1518 1626
1519 rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]); 1627 rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
1520 if (IS_ERR(rule)) 1628 if (IS_ERR(rule))
@@ -1554,37 +1662,36 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
1554 kfree(rule); 1662 kfree(rule);
1555} 1663}
1556 1664
1557#define NFT_RULE_MAXEXPRS 128 1665static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type,
1558 1666 struct nft_rule *rule)
1559static struct nft_expr_info *info;
1560
1561static struct nft_rule_trans *
1562nf_tables_trans_add(struct nft_ctx *ctx, struct nft_rule *rule)
1563{ 1667{
1564 struct nft_rule_trans *rupd; 1668 struct nft_trans *trans;
1565 1669
1566 rupd = kmalloc(sizeof(struct nft_rule_trans), GFP_KERNEL); 1670 trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_rule));
1567 if (rupd == NULL) 1671 if (trans == NULL)
1568 return NULL; 1672 return NULL;
1569 1673
1570 rupd->ctx = *ctx; 1674 nft_trans_rule(trans) = rule;
1571 rupd->rule = rule; 1675 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
1572 list_add_tail(&rupd->list, &ctx->net->nft.commit_list);
1573 1676
1574 return rupd; 1677 return trans;
1575} 1678}
1576 1679
1680#define NFT_RULE_MAXEXPRS 128
1681
1682static struct nft_expr_info *info;
1683
1577static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, 1684static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1578 const struct nlmsghdr *nlh, 1685 const struct nlmsghdr *nlh,
1579 const struct nlattr * const nla[]) 1686 const struct nlattr * const nla[])
1580{ 1687{
1581 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1688 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1582 const struct nft_af_info *afi; 1689 struct nft_af_info *afi;
1583 struct net *net = sock_net(skb->sk); 1690 struct net *net = sock_net(skb->sk);
1584 struct nft_table *table; 1691 struct nft_table *table;
1585 struct nft_chain *chain; 1692 struct nft_chain *chain;
1586 struct nft_rule *rule, *old_rule = NULL; 1693 struct nft_rule *rule, *old_rule = NULL;
1587 struct nft_rule_trans *repl = NULL; 1694 struct nft_trans *trans = NULL;
1588 struct nft_expr *expr; 1695 struct nft_expr *expr;
1589 struct nft_ctx ctx; 1696 struct nft_ctx ctx;
1590 struct nlattr *tmp; 1697 struct nlattr *tmp;
@@ -1682,8 +1789,9 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1682 1789
1683 if (nlh->nlmsg_flags & NLM_F_REPLACE) { 1790 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
1684 if (nft_rule_is_active_next(net, old_rule)) { 1791 if (nft_rule_is_active_next(net, old_rule)) {
1685 repl = nf_tables_trans_add(&ctx, old_rule); 1792 trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE,
1686 if (repl == NULL) { 1793 old_rule);
1794 if (trans == NULL) {
1687 err = -ENOMEM; 1795 err = -ENOMEM;
1688 goto err2; 1796 goto err2;
1689 } 1797 }
@@ -1705,19 +1813,19 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1705 list_add_rcu(&rule->list, &chain->rules); 1813 list_add_rcu(&rule->list, &chain->rules);
1706 } 1814 }
1707 1815
1708 if (nf_tables_trans_add(&ctx, rule) == NULL) { 1816 if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
1709 err = -ENOMEM; 1817 err = -ENOMEM;
1710 goto err3; 1818 goto err3;
1711 } 1819 }
1820 chain->use++;
1712 return 0; 1821 return 0;
1713 1822
1714err3: 1823err3:
1715 list_del_rcu(&rule->list); 1824 list_del_rcu(&rule->list);
1716 if (repl) { 1825 if (trans) {
1717 list_del_rcu(&repl->rule->list); 1826 list_del_rcu(&nft_trans_rule(trans)->list);
1718 list_del(&repl->list); 1827 nft_rule_clear(net, nft_trans_rule(trans));
1719 nft_rule_clear(net, repl->rule); 1828 nft_trans_destroy(trans);
1720 kfree(repl);
1721 } 1829 }
1722err2: 1830err2:
1723 nf_tables_rule_destroy(&ctx, rule); 1831 nf_tables_rule_destroy(&ctx, rule);
@@ -1734,9 +1842,10 @@ nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule)
1734{ 1842{
1735 /* You cannot delete the same rule twice */ 1843 /* You cannot delete the same rule twice */
1736 if (nft_rule_is_active_next(ctx->net, rule)) { 1844 if (nft_rule_is_active_next(ctx->net, rule)) {
1737 if (nf_tables_trans_add(ctx, rule) == NULL) 1845 if (nft_trans_rule_add(ctx, NFT_MSG_DELRULE, rule) == NULL)
1738 return -ENOMEM; 1846 return -ENOMEM;
1739 nft_rule_disactivate_next(ctx->net, rule); 1847 nft_rule_disactivate_next(ctx->net, rule);
1848 ctx->chain->use--;
1740 return 0; 1849 return 0;
1741 } 1850 }
1742 return -ENOENT; 1851 return -ENOENT;
@@ -1760,9 +1869,9 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
1760 const struct nlattr * const nla[]) 1869 const struct nlattr * const nla[])
1761{ 1870{
1762 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1871 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1763 const struct nft_af_info *afi; 1872 struct nft_af_info *afi;
1764 struct net *net = sock_net(skb->sk); 1873 struct net *net = sock_net(skb->sk);
1765 const struct nft_table *table; 1874 struct nft_table *table;
1766 struct nft_chain *chain = NULL; 1875 struct nft_chain *chain = NULL;
1767 struct nft_rule *rule; 1876 struct nft_rule *rule;
1768 int family = nfmsg->nfgen_family, err = 0; 1877 int family = nfmsg->nfgen_family, err = 0;
@@ -1775,6 +1884,8 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
1775 table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); 1884 table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
1776 if (IS_ERR(table)) 1885 if (IS_ERR(table))
1777 return PTR_ERR(table); 1886 return PTR_ERR(table);
1887 if (table->flags & NFT_TABLE_INACTIVE)
1888 return -ENOENT;
1778 1889
1779 if (nla[NFTA_RULE_CHAIN]) { 1890 if (nla[NFTA_RULE_CHAIN]) {
1780 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); 1891 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
@@ -1807,88 +1918,6 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
1807 return err; 1918 return err;
1808} 1919}
1809 1920
1810static int nf_tables_commit(struct sk_buff *skb)
1811{
1812 struct net *net = sock_net(skb->sk);
1813 struct nft_rule_trans *rupd, *tmp;
1814
1815 /* Bump generation counter, invalidate any dump in progress */
1816 net->nft.genctr++;
1817
1818 /* A new generation has just started */
1819 net->nft.gencursor = gencursor_next(net);
1820
1821 /* Make sure all packets have left the previous generation before
1822 * purging old rules.
1823 */
1824 synchronize_rcu();
1825
1826 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1827 /* This rule was inactive in the past and just became active.
1828 * Clear the next bit of the genmask since its meaning has
1829 * changed, now it is the future.
1830 */
1831 if (nft_rule_is_active(net, rupd->rule)) {
1832 nft_rule_clear(net, rupd->rule);
1833 nf_tables_rule_notify(skb, rupd->ctx.nlh,
1834 rupd->ctx.table, rupd->ctx.chain,
1835 rupd->rule, NFT_MSG_NEWRULE, 0,
1836 rupd->ctx.afi->family);
1837 list_del(&rupd->list);
1838 kfree(rupd);
1839 continue;
1840 }
1841
1842 /* This rule is in the past, get rid of it */
1843 list_del_rcu(&rupd->rule->list);
1844 nf_tables_rule_notify(skb, rupd->ctx.nlh,
1845 rupd->ctx.table, rupd->ctx.chain,
1846 rupd->rule, NFT_MSG_DELRULE, 0,
1847 rupd->ctx.afi->family);
1848 }
1849
1850 /* Make sure we don't see any packet traversing old rules */
1851 synchronize_rcu();
1852
1853 /* Now we can safely release unused old rules */
1854 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1855 nf_tables_rule_destroy(&rupd->ctx, rupd->rule);
1856 list_del(&rupd->list);
1857 kfree(rupd);
1858 }
1859
1860 return 0;
1861}
1862
1863static int nf_tables_abort(struct sk_buff *skb)
1864{
1865 struct net *net = sock_net(skb->sk);
1866 struct nft_rule_trans *rupd, *tmp;
1867
1868 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1869 if (!nft_rule_is_active_next(net, rupd->rule)) {
1870 nft_rule_clear(net, rupd->rule);
1871 list_del(&rupd->list);
1872 kfree(rupd);
1873 continue;
1874 }
1875
1876 /* This rule is inactive, get rid of it */
1877 list_del_rcu(&rupd->rule->list);
1878 }
1879
1880 /* Make sure we don't see any packet accessing aborted rules */
1881 synchronize_rcu();
1882
1883 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1884 nf_tables_rule_destroy(&rupd->ctx, rupd->rule);
1885 list_del(&rupd->list);
1886 kfree(rupd);
1887 }
1888
1889 return 0;
1890}
1891
1892/* 1921/*
1893 * Sets 1922 * Sets
1894 */ 1923 */
@@ -1912,9 +1941,18 @@ void nft_unregister_set(struct nft_set_ops *ops)
1912} 1941}
1913EXPORT_SYMBOL_GPL(nft_unregister_set); 1942EXPORT_SYMBOL_GPL(nft_unregister_set);
1914 1943
1915static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const nla[]) 1944/*
1945 * Select a set implementation based on the data characteristics and the
1946 * given policy. The total memory use might not be known if no size is
1947 * given, in that case the amount of memory per element is used.
1948 */
1949static const struct nft_set_ops *
1950nft_select_set_ops(const struct nlattr * const nla[],
1951 const struct nft_set_desc *desc,
1952 enum nft_set_policies policy)
1916{ 1953{
1917 const struct nft_set_ops *ops; 1954 const struct nft_set_ops *ops, *bops;
1955 struct nft_set_estimate est, best;
1918 u32 features; 1956 u32 features;
1919 1957
1920#ifdef CONFIG_MODULES 1958#ifdef CONFIG_MODULES
@@ -1932,15 +1970,45 @@ static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const
1932 features &= NFT_SET_INTERVAL | NFT_SET_MAP; 1970 features &= NFT_SET_INTERVAL | NFT_SET_MAP;
1933 } 1971 }
1934 1972
1935 // FIXME: implement selection properly 1973 bops = NULL;
1974 best.size = ~0;
1975 best.class = ~0;
1976
1936 list_for_each_entry(ops, &nf_tables_set_ops, list) { 1977 list_for_each_entry(ops, &nf_tables_set_ops, list) {
1937 if ((ops->features & features) != features) 1978 if ((ops->features & features) != features)
1938 continue; 1979 continue;
1980 if (!ops->estimate(desc, features, &est))
1981 continue;
1982
1983 switch (policy) {
1984 case NFT_SET_POL_PERFORMANCE:
1985 if (est.class < best.class)
1986 break;
1987 if (est.class == best.class && est.size < best.size)
1988 break;
1989 continue;
1990 case NFT_SET_POL_MEMORY:
1991 if (est.size < best.size)
1992 break;
1993 if (est.size == best.size && est.class < best.class)
1994 break;
1995 continue;
1996 default:
1997 break;
1998 }
1999
1939 if (!try_module_get(ops->owner)) 2000 if (!try_module_get(ops->owner))
1940 continue; 2001 continue;
1941 return ops; 2002 if (bops != NULL)
2003 module_put(bops->owner);
2004
2005 bops = ops;
2006 best = est;
1942 } 2007 }
1943 2008
2009 if (bops != NULL)
2010 return bops;
2011
1944 return ERR_PTR(-EOPNOTSUPP); 2012 return ERR_PTR(-EOPNOTSUPP);
1945} 2013}
1946 2014
@@ -1953,6 +2021,13 @@ static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
1953 [NFTA_SET_KEY_LEN] = { .type = NLA_U32 }, 2021 [NFTA_SET_KEY_LEN] = { .type = NLA_U32 },
1954 [NFTA_SET_DATA_TYPE] = { .type = NLA_U32 }, 2022 [NFTA_SET_DATA_TYPE] = { .type = NLA_U32 },
1955 [NFTA_SET_DATA_LEN] = { .type = NLA_U32 }, 2023 [NFTA_SET_DATA_LEN] = { .type = NLA_U32 },
2024 [NFTA_SET_POLICY] = { .type = NLA_U32 },
2025 [NFTA_SET_DESC] = { .type = NLA_NESTED },
2026 [NFTA_SET_ID] = { .type = NLA_U32 },
2027};
2028
2029static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
2030 [NFTA_SET_DESC_SIZE] = { .type = NLA_U32 },
1956}; 2031};
1957 2032
1958static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, 2033static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
@@ -1962,8 +2037,8 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
1962{ 2037{
1963 struct net *net = sock_net(skb->sk); 2038 struct net *net = sock_net(skb->sk);
1964 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2039 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1965 const struct nft_af_info *afi = NULL; 2040 struct nft_af_info *afi = NULL;
1966 const struct nft_table *table = NULL; 2041 struct nft_table *table = NULL;
1967 2042
1968 if (nfmsg->nfgen_family != NFPROTO_UNSPEC) { 2043 if (nfmsg->nfgen_family != NFPROTO_UNSPEC) {
1969 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); 2044 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
@@ -1978,6 +2053,8 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
1978 table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); 2053 table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
1979 if (IS_ERR(table)) 2054 if (IS_ERR(table))
1980 return PTR_ERR(table); 2055 return PTR_ERR(table);
2056 if (table->flags & NFT_TABLE_INACTIVE)
2057 return -ENOENT;
1981 } 2058 }
1982 2059
1983 nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); 2060 nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
@@ -1999,13 +2076,27 @@ struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
1999 return ERR_PTR(-ENOENT); 2076 return ERR_PTR(-ENOENT);
2000} 2077}
2001 2078
2079struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
2080 const struct nlattr *nla)
2081{
2082 struct nft_trans *trans;
2083 u32 id = ntohl(nla_get_be32(nla));
2084
2085 list_for_each_entry(trans, &net->nft.commit_list, list) {
2086 if (trans->msg_type == NFT_MSG_NEWSET &&
2087 id == nft_trans_set_id(trans))
2088 return nft_trans_set(trans);
2089 }
2090 return ERR_PTR(-ENOENT);
2091}
2092
2002static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set, 2093static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
2003 const char *name) 2094 const char *name)
2004{ 2095{
2005 const struct nft_set *i; 2096 const struct nft_set *i;
2006 const char *p; 2097 const char *p;
2007 unsigned long *inuse; 2098 unsigned long *inuse;
2008 unsigned int n = 0; 2099 unsigned int n = 0, min = 0;
2009 2100
2010 p = strnchr(name, IFNAMSIZ, '%'); 2101 p = strnchr(name, IFNAMSIZ, '%');
2011 if (p != NULL) { 2102 if (p != NULL) {
@@ -2015,23 +2106,28 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
2015 inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL); 2106 inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
2016 if (inuse == NULL) 2107 if (inuse == NULL)
2017 return -ENOMEM; 2108 return -ENOMEM;
2018 2109cont:
2019 list_for_each_entry(i, &ctx->table->sets, list) { 2110 list_for_each_entry(i, &ctx->table->sets, list) {
2020 int tmp; 2111 int tmp;
2021 2112
2022 if (!sscanf(i->name, name, &tmp)) 2113 if (!sscanf(i->name, name, &tmp))
2023 continue; 2114 continue;
2024 if (tmp < 0 || tmp >= BITS_PER_BYTE * PAGE_SIZE) 2115 if (tmp < min || tmp >= min + BITS_PER_BYTE * PAGE_SIZE)
2025 continue; 2116 continue;
2026 2117
2027 set_bit(tmp, inuse); 2118 set_bit(tmp - min, inuse);
2028 } 2119 }
2029 2120
2030 n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE); 2121 n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE);
2122 if (n >= BITS_PER_BYTE * PAGE_SIZE) {
2123 min += BITS_PER_BYTE * PAGE_SIZE;
2124 memset(inuse, 0, PAGE_SIZE);
2125 goto cont;
2126 }
2031 free_page((unsigned long)inuse); 2127 free_page((unsigned long)inuse);
2032 } 2128 }
2033 2129
2034 snprintf(set->name, sizeof(set->name), name, n); 2130 snprintf(set->name, sizeof(set->name), name, min + n);
2035 list_for_each_entry(i, &ctx->table->sets, list) { 2131 list_for_each_entry(i, &ctx->table->sets, list) {
2036 if (!strcmp(set->name, i->name)) 2132 if (!strcmp(set->name, i->name))
2037 return -ENFILE; 2133 return -ENFILE;
@@ -2044,8 +2140,9 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
2044{ 2140{
2045 struct nfgenmsg *nfmsg; 2141 struct nfgenmsg *nfmsg;
2046 struct nlmsghdr *nlh; 2142 struct nlmsghdr *nlh;
2047 u32 portid = NETLINK_CB(ctx->skb).portid; 2143 struct nlattr *desc;
2048 u32 seq = ctx->nlh->nlmsg_seq; 2144 u32 portid = ctx->portid;
2145 u32 seq = ctx->seq;
2049 2146
2050 event |= NFNL_SUBSYS_NFTABLES << 8; 2147 event |= NFNL_SUBSYS_NFTABLES << 8;
2051 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), 2148 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
@@ -2077,6 +2174,14 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
2077 goto nla_put_failure; 2174 goto nla_put_failure;
2078 } 2175 }
2079 2176
2177 desc = nla_nest_start(skb, NFTA_SET_DESC);
2178 if (desc == NULL)
2179 goto nla_put_failure;
2180 if (set->size &&
2181 nla_put_be32(skb, NFTA_SET_DESC_SIZE, htonl(set->size)))
2182 goto nla_put_failure;
2183 nla_nest_end(skb, desc);
2184
2080 return nlmsg_end(skb, nlh); 2185 return nlmsg_end(skb, nlh);
2081 2186
2082nla_put_failure: 2187nla_put_failure:
@@ -2086,19 +2191,18 @@ nla_put_failure:
2086 2191
2087static int nf_tables_set_notify(const struct nft_ctx *ctx, 2192static int nf_tables_set_notify(const struct nft_ctx *ctx,
2088 const struct nft_set *set, 2193 const struct nft_set *set,
2089 int event) 2194 int event, gfp_t gfp_flags)
2090{ 2195{
2091 struct sk_buff *skb; 2196 struct sk_buff *skb;
2092 u32 portid = NETLINK_CB(ctx->skb).portid; 2197 u32 portid = ctx->portid;
2093 bool report;
2094 int err; 2198 int err;
2095 2199
2096 report = nlmsg_report(ctx->nlh); 2200 if (!ctx->report &&
2097 if (!report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) 2201 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
2098 return 0; 2202 return 0;
2099 2203
2100 err = -ENOBUFS; 2204 err = -ENOBUFS;
2101 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2205 skb = nlmsg_new(NLMSG_GOODSIZE, gfp_flags);
2102 if (skb == NULL) 2206 if (skb == NULL)
2103 goto err; 2207 goto err;
2104 2208
@@ -2108,8 +2212,8 @@ static int nf_tables_set_notify(const struct nft_ctx *ctx,
2108 goto err; 2212 goto err;
2109 } 2213 }
2110 2214
2111 err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, report, 2215 err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES,
2112 GFP_KERNEL); 2216 ctx->report, gfp_flags);
2113err: 2217err:
2114 if (err < 0) 2218 if (err < 0)
2115 nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err); 2219 nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err);
@@ -2183,7 +2287,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2183{ 2287{
2184 const struct nft_set *set; 2288 const struct nft_set *set;
2185 unsigned int idx, s_idx = cb->args[0]; 2289 unsigned int idx, s_idx = cb->args[0];
2186 const struct nft_af_info *afi; 2290 struct nft_af_info *afi;
2187 struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; 2291 struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
2188 struct net *net = sock_net(skb->sk); 2292 struct net *net = sock_net(skb->sk);
2189 int cur_family = cb->args[3]; 2293 int cur_family = cb->args[3];
@@ -2260,6 +2364,8 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
2260 return ret; 2364 return ret;
2261} 2365}
2262 2366
2367#define NFT_SET_INACTIVE (1 << 15) /* Internal set flag */
2368
2263static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb, 2369static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
2264 const struct nlmsghdr *nlh, 2370 const struct nlmsghdr *nlh,
2265 const struct nlattr * const nla[]) 2371 const struct nlattr * const nla[])
@@ -2289,6 +2395,8 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
2289 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); 2395 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
2290 if (IS_ERR(set)) 2396 if (IS_ERR(set))
2291 return PTR_ERR(set); 2397 return PTR_ERR(set);
2398 if (set->flags & NFT_SET_INACTIVE)
2399 return -ENOENT;
2292 2400
2293 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2401 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2294 if (skb2 == NULL) 2402 if (skb2 == NULL)
@@ -2305,13 +2413,50 @@ err:
2305 return err; 2413 return err;
2306} 2414}
2307 2415
2416static int nf_tables_set_desc_parse(const struct nft_ctx *ctx,
2417 struct nft_set_desc *desc,
2418 const struct nlattr *nla)
2419{
2420 struct nlattr *da[NFTA_SET_DESC_MAX + 1];
2421 int err;
2422
2423 err = nla_parse_nested(da, NFTA_SET_DESC_MAX, nla, nft_set_desc_policy);
2424 if (err < 0)
2425 return err;
2426
2427 if (da[NFTA_SET_DESC_SIZE] != NULL)
2428 desc->size = ntohl(nla_get_be32(da[NFTA_SET_DESC_SIZE]));
2429
2430 return 0;
2431}
2432
2433static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
2434 struct nft_set *set)
2435{
2436 struct nft_trans *trans;
2437
2438 trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set));
2439 if (trans == NULL)
2440 return -ENOMEM;
2441
2442 if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) {
2443 nft_trans_set_id(trans) =
2444 ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID]));
2445 set->flags |= NFT_SET_INACTIVE;
2446 }
2447 nft_trans_set(trans) = set;
2448 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
2449
2450 return 0;
2451}
2452
2308static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, 2453static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2309 const struct nlmsghdr *nlh, 2454 const struct nlmsghdr *nlh,
2310 const struct nlattr * const nla[]) 2455 const struct nlattr * const nla[])
2311{ 2456{
2312 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2457 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2313 const struct nft_set_ops *ops; 2458 const struct nft_set_ops *ops;
2314 const struct nft_af_info *afi; 2459 struct nft_af_info *afi;
2315 struct net *net = sock_net(skb->sk); 2460 struct net *net = sock_net(skb->sk);
2316 struct nft_table *table; 2461 struct nft_table *table;
2317 struct nft_set *set; 2462 struct nft_set *set;
@@ -2319,14 +2464,18 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2319 char name[IFNAMSIZ]; 2464 char name[IFNAMSIZ];
2320 unsigned int size; 2465 unsigned int size;
2321 bool create; 2466 bool create;
2322 u32 ktype, klen, dlen, dtype, flags; 2467 u32 ktype, dtype, flags, policy;
2468 struct nft_set_desc desc;
2323 int err; 2469 int err;
2324 2470
2325 if (nla[NFTA_SET_TABLE] == NULL || 2471 if (nla[NFTA_SET_TABLE] == NULL ||
2326 nla[NFTA_SET_NAME] == NULL || 2472 nla[NFTA_SET_NAME] == NULL ||
2327 nla[NFTA_SET_KEY_LEN] == NULL) 2473 nla[NFTA_SET_KEY_LEN] == NULL ||
2474 nla[NFTA_SET_ID] == NULL)
2328 return -EINVAL; 2475 return -EINVAL;
2329 2476
2477 memset(&desc, 0, sizeof(desc));
2478
2330 ktype = NFT_DATA_VALUE; 2479 ktype = NFT_DATA_VALUE;
2331 if (nla[NFTA_SET_KEY_TYPE] != NULL) { 2480 if (nla[NFTA_SET_KEY_TYPE] != NULL) {
2332 ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE])); 2481 ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
@@ -2334,8 +2483,8 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2334 return -EINVAL; 2483 return -EINVAL;
2335 } 2484 }
2336 2485
2337 klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN])); 2486 desc.klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
2338 if (klen == 0 || klen > FIELD_SIZEOF(struct nft_data, data)) 2487 if (desc.klen == 0 || desc.klen > FIELD_SIZEOF(struct nft_data, data))
2339 return -EINVAL; 2488 return -EINVAL;
2340 2489
2341 flags = 0; 2490 flags = 0;
@@ -2347,7 +2496,6 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2347 } 2496 }
2348 2497
2349 dtype = 0; 2498 dtype = 0;
2350 dlen = 0;
2351 if (nla[NFTA_SET_DATA_TYPE] != NULL) { 2499 if (nla[NFTA_SET_DATA_TYPE] != NULL) {
2352 if (!(flags & NFT_SET_MAP)) 2500 if (!(flags & NFT_SET_MAP))
2353 return -EINVAL; 2501 return -EINVAL;
@@ -2360,15 +2508,25 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2360 if (dtype != NFT_DATA_VERDICT) { 2508 if (dtype != NFT_DATA_VERDICT) {
2361 if (nla[NFTA_SET_DATA_LEN] == NULL) 2509 if (nla[NFTA_SET_DATA_LEN] == NULL)
2362 return -EINVAL; 2510 return -EINVAL;
2363 dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN])); 2511 desc.dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
2364 if (dlen == 0 || 2512 if (desc.dlen == 0 ||
2365 dlen > FIELD_SIZEOF(struct nft_data, data)) 2513 desc.dlen > FIELD_SIZEOF(struct nft_data, data))
2366 return -EINVAL; 2514 return -EINVAL;
2367 } else 2515 } else
2368 dlen = sizeof(struct nft_data); 2516 desc.dlen = sizeof(struct nft_data);
2369 } else if (flags & NFT_SET_MAP) 2517 } else if (flags & NFT_SET_MAP)
2370 return -EINVAL; 2518 return -EINVAL;
2371 2519
2520 policy = NFT_SET_POL_PERFORMANCE;
2521 if (nla[NFTA_SET_POLICY] != NULL)
2522 policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
2523
2524 if (nla[NFTA_SET_DESC] != NULL) {
2525 err = nf_tables_set_desc_parse(&ctx, &desc, nla[NFTA_SET_DESC]);
2526 if (err < 0)
2527 return err;
2528 }
2529
2372 create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; 2530 create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
2373 2531
2374 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create); 2532 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
@@ -2399,7 +2557,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2399 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) 2557 if (!(nlh->nlmsg_flags & NLM_F_CREATE))
2400 return -ENOENT; 2558 return -ENOENT;
2401 2559
2402 ops = nft_select_set_ops(nla); 2560 ops = nft_select_set_ops(nla, &desc, policy);
2403 if (IS_ERR(ops)) 2561 if (IS_ERR(ops))
2404 return PTR_ERR(ops); 2562 return PTR_ERR(ops);
2405 2563
@@ -2420,17 +2578,22 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2420 INIT_LIST_HEAD(&set->bindings); 2578 INIT_LIST_HEAD(&set->bindings);
2421 set->ops = ops; 2579 set->ops = ops;
2422 set->ktype = ktype; 2580 set->ktype = ktype;
2423 set->klen = klen; 2581 set->klen = desc.klen;
2424 set->dtype = dtype; 2582 set->dtype = dtype;
2425 set->dlen = dlen; 2583 set->dlen = desc.dlen;
2426 set->flags = flags; 2584 set->flags = flags;
2585 set->size = desc.size;
2586
2587 err = ops->init(set, &desc, nla);
2588 if (err < 0)
2589 goto err2;
2427 2590
2428 err = ops->init(set, nla); 2591 err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
2429 if (err < 0) 2592 if (err < 0)
2430 goto err2; 2593 goto err2;
2431 2594
2432 list_add_tail(&set->list, &table->sets); 2595 list_add_tail(&set->list, &table->sets);
2433 nf_tables_set_notify(&ctx, set, NFT_MSG_NEWSET); 2596 table->use++;
2434 return 0; 2597 return 0;
2435 2598
2436err2: 2599err2:
@@ -2440,16 +2603,20 @@ err1:
2440 return err; 2603 return err;
2441} 2604}
2442 2605
2443static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) 2606static void nft_set_destroy(struct nft_set *set)
2444{ 2607{
2445 list_del(&set->list);
2446 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET);
2447
2448 set->ops->destroy(set); 2608 set->ops->destroy(set);
2449 module_put(set->ops->owner); 2609 module_put(set->ops->owner);
2450 kfree(set); 2610 kfree(set);
2451} 2611}
2452 2612
2613static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
2614{
2615 list_del(&set->list);
2616 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
2617 nft_set_destroy(set);
2618}
2619
2453static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, 2620static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2454 const struct nlmsghdr *nlh, 2621 const struct nlmsghdr *nlh,
2455 const struct nlattr * const nla[]) 2622 const struct nlattr * const nla[])
@@ -2471,10 +2638,17 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2471 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); 2638 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
2472 if (IS_ERR(set)) 2639 if (IS_ERR(set))
2473 return PTR_ERR(set); 2640 return PTR_ERR(set);
2641 if (set->flags & NFT_SET_INACTIVE)
2642 return -ENOENT;
2474 if (!list_empty(&set->bindings)) 2643 if (!list_empty(&set->bindings))
2475 return -EBUSY; 2644 return -EBUSY;
2476 2645
2477 nf_tables_set_destroy(&ctx, set); 2646 err = nft_trans_set_add(&ctx, NFT_MSG_DELSET, set);
2647 if (err < 0)
2648 return err;
2649
2650 list_del(&set->list);
2651 ctx.table->use--;
2478 return 0; 2652 return 0;
2479} 2653}
2480 2654
@@ -2534,7 +2708,8 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
2534{ 2708{
2535 list_del(&binding->list); 2709 list_del(&binding->list);
2536 2710
2537 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS) 2711 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
2712 !(set->flags & NFT_SET_INACTIVE))
2538 nf_tables_set_destroy(ctx, set); 2713 nf_tables_set_destroy(ctx, set);
2539} 2714}
2540 2715
@@ -2552,16 +2727,18 @@ static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX +
2552 [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING }, 2727 [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING },
2553 [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING }, 2728 [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING },
2554 [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED }, 2729 [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED },
2730 [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 },
2555}; 2731};
2556 2732
2557static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, 2733static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
2558 const struct sk_buff *skb, 2734 const struct sk_buff *skb,
2559 const struct nlmsghdr *nlh, 2735 const struct nlmsghdr *nlh,
2560 const struct nlattr * const nla[]) 2736 const struct nlattr * const nla[],
2737 bool trans)
2561{ 2738{
2562 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2739 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2563 const struct nft_af_info *afi; 2740 struct nft_af_info *afi;
2564 const struct nft_table *table; 2741 struct nft_table *table;
2565 struct net *net = sock_net(skb->sk); 2742 struct net *net = sock_net(skb->sk);
2566 2743
2567 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); 2744 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
@@ -2571,6 +2748,8 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
2571 table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]); 2748 table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]);
2572 if (IS_ERR(table)) 2749 if (IS_ERR(table))
2573 return PTR_ERR(table); 2750 return PTR_ERR(table);
2751 if (!trans && (table->flags & NFT_TABLE_INACTIVE))
2752 return -ENOENT;
2574 2753
2575 nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); 2754 nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
2576 return 0; 2755 return 0;
@@ -2644,13 +2823,16 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
2644 if (err < 0) 2823 if (err < 0)
2645 return err; 2824 return err;
2646 2825
2647 err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla); 2826 err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla,
2827 false);
2648 if (err < 0) 2828 if (err < 0)
2649 return err; 2829 return err;
2650 2830
2651 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); 2831 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
2652 if (IS_ERR(set)) 2832 if (IS_ERR(set))
2653 return PTR_ERR(set); 2833 return PTR_ERR(set);
2834 if (set->flags & NFT_SET_INACTIVE)
2835 return -ENOENT;
2654 2836
2655 event = NFT_MSG_NEWSETELEM; 2837 event = NFT_MSG_NEWSETELEM;
2656 event |= NFNL_SUBSYS_NFTABLES << 8; 2838 event |= NFNL_SUBSYS_NFTABLES << 8;
@@ -2707,13 +2889,15 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
2707 struct nft_ctx ctx; 2889 struct nft_ctx ctx;
2708 int err; 2890 int err;
2709 2891
2710 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla); 2892 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false);
2711 if (err < 0) 2893 if (err < 0)
2712 return err; 2894 return err;
2713 2895
2714 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); 2896 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
2715 if (IS_ERR(set)) 2897 if (IS_ERR(set))
2716 return PTR_ERR(set); 2898 return PTR_ERR(set);
2899 if (set->flags & NFT_SET_INACTIVE)
2900 return -ENOENT;
2717 2901
2718 if (nlh->nlmsg_flags & NLM_F_DUMP) { 2902 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2719 struct netlink_dump_control c = { 2903 struct netlink_dump_control c = {
@@ -2724,7 +2908,98 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
2724 return -EOPNOTSUPP; 2908 return -EOPNOTSUPP;
2725} 2909}
2726 2910
2727static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set, 2911static int nf_tables_fill_setelem_info(struct sk_buff *skb,
2912 const struct nft_ctx *ctx, u32 seq,
2913 u32 portid, int event, u16 flags,
2914 const struct nft_set *set,
2915 const struct nft_set_elem *elem)
2916{
2917 struct nfgenmsg *nfmsg;
2918 struct nlmsghdr *nlh;
2919 struct nlattr *nest;
2920 int err;
2921
2922 event |= NFNL_SUBSYS_NFTABLES << 8;
2923 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
2924 flags);
2925 if (nlh == NULL)
2926 goto nla_put_failure;
2927
2928 nfmsg = nlmsg_data(nlh);
2929 nfmsg->nfgen_family = ctx->afi->family;
2930 nfmsg->version = NFNETLINK_V0;
2931 nfmsg->res_id = 0;
2932
2933 if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
2934 goto nla_put_failure;
2935 if (nla_put_string(skb, NFTA_SET_NAME, set->name))
2936 goto nla_put_failure;
2937
2938 nest = nla_nest_start(skb, NFTA_SET_ELEM_LIST_ELEMENTS);
2939 if (nest == NULL)
2940 goto nla_put_failure;
2941
2942 err = nf_tables_fill_setelem(skb, set, elem);
2943 if (err < 0)
2944 goto nla_put_failure;
2945
2946 nla_nest_end(skb, nest);
2947
2948 return nlmsg_end(skb, nlh);
2949
2950nla_put_failure:
2951 nlmsg_trim(skb, nlh);
2952 return -1;
2953}
2954
2955static int nf_tables_setelem_notify(const struct nft_ctx *ctx,
2956 const struct nft_set *set,
2957 const struct nft_set_elem *elem,
2958 int event, u16 flags)
2959{
2960 struct net *net = ctx->net;
2961 u32 portid = ctx->portid;
2962 struct sk_buff *skb;
2963 int err;
2964
2965 if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
2966 return 0;
2967
2968 err = -ENOBUFS;
2969 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2970 if (skb == NULL)
2971 goto err;
2972
2973 err = nf_tables_fill_setelem_info(skb, ctx, 0, portid, event, flags,
2974 set, elem);
2975 if (err < 0) {
2976 kfree_skb(skb);
2977 goto err;
2978 }
2979
2980 err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report,
2981 GFP_KERNEL);
2982err:
2983 if (err < 0)
2984 nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
2985 return err;
2986}
2987
2988static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
2989 int msg_type,
2990 struct nft_set *set)
2991{
2992 struct nft_trans *trans;
2993
2994 trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_elem));
2995 if (trans == NULL)
2996 return NULL;
2997
2998 nft_trans_elem_set(trans) = set;
2999 return trans;
3000}
3001
3002static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
2728 const struct nlattr *attr) 3003 const struct nlattr *attr)
2729{ 3004{
2730 struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; 3005 struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
@@ -2732,8 +3007,12 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
2732 struct nft_set_elem elem; 3007 struct nft_set_elem elem;
2733 struct nft_set_binding *binding; 3008 struct nft_set_binding *binding;
2734 enum nft_registers dreg; 3009 enum nft_registers dreg;
3010 struct nft_trans *trans;
2735 int err; 3011 int err;
2736 3012
3013 if (set->size && set->nelems == set->size)
3014 return -ENFILE;
3015
2737 err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr, 3016 err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
2738 nft_set_elem_policy); 3017 nft_set_elem_policy);
2739 if (err < 0) 3018 if (err < 0)
@@ -2786,7 +3065,7 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
2786 struct nft_ctx bind_ctx = { 3065 struct nft_ctx bind_ctx = {
2787 .afi = ctx->afi, 3066 .afi = ctx->afi,
2788 .table = ctx->table, 3067 .table = ctx->table,
2789 .chain = binding->chain, 3068 .chain = (struct nft_chain *)binding->chain,
2790 }; 3069 };
2791 3070
2792 err = nft_validate_data_load(&bind_ctx, dreg, 3071 err = nft_validate_data_load(&bind_ctx, dreg,
@@ -2796,12 +3075,20 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
2796 } 3075 }
2797 } 3076 }
2798 3077
3078 trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
3079 if (trans == NULL)
3080 goto err3;
3081
2799 err = set->ops->insert(set, &elem); 3082 err = set->ops->insert(set, &elem);
2800 if (err < 0) 3083 if (err < 0)
2801 goto err3; 3084 goto err4;
2802 3085
3086 nft_trans_elem(trans) = elem;
3087 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
2803 return 0; 3088 return 0;
2804 3089
3090err4:
3091 kfree(trans);
2805err3: 3092err3:
2806 if (nla[NFTA_SET_ELEM_DATA] != NULL) 3093 if (nla[NFTA_SET_ELEM_DATA] != NULL)
2807 nft_data_uninit(&elem.data, d2.type); 3094 nft_data_uninit(&elem.data, d2.type);
@@ -2815,35 +3102,46 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
2815 const struct nlmsghdr *nlh, 3102 const struct nlmsghdr *nlh,
2816 const struct nlattr * const nla[]) 3103 const struct nlattr * const nla[])
2817{ 3104{
3105 struct net *net = sock_net(skb->sk);
2818 const struct nlattr *attr; 3106 const struct nlattr *attr;
2819 struct nft_set *set; 3107 struct nft_set *set;
2820 struct nft_ctx ctx; 3108 struct nft_ctx ctx;
2821 int rem, err; 3109 int rem, err = 0;
2822 3110
2823 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla); 3111 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true);
2824 if (err < 0) 3112 if (err < 0)
2825 return err; 3113 return err;
2826 3114
2827 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); 3115 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
2828 if (IS_ERR(set)) 3116 if (IS_ERR(set)) {
2829 return PTR_ERR(set); 3117 if (nla[NFTA_SET_ELEM_LIST_SET_ID]) {
3118 set = nf_tables_set_lookup_byid(net,
3119 nla[NFTA_SET_ELEM_LIST_SET_ID]);
3120 }
3121 if (IS_ERR(set))
3122 return PTR_ERR(set);
3123 }
3124
2830 if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT) 3125 if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
2831 return -EBUSY; 3126 return -EBUSY;
2832 3127
2833 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { 3128 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
2834 err = nft_add_set_elem(&ctx, set, attr); 3129 err = nft_add_set_elem(&ctx, set, attr);
2835 if (err < 0) 3130 if (err < 0)
2836 return err; 3131 break;
3132
3133 set->nelems++;
2837 } 3134 }
2838 return 0; 3135 return err;
2839} 3136}
2840 3137
2841static int nft_del_setelem(const struct nft_ctx *ctx, struct nft_set *set, 3138static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
2842 const struct nlattr *attr) 3139 const struct nlattr *attr)
2843{ 3140{
2844 struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; 3141 struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
2845 struct nft_data_desc desc; 3142 struct nft_data_desc desc;
2846 struct nft_set_elem elem; 3143 struct nft_set_elem elem;
3144 struct nft_trans *trans;
2847 int err; 3145 int err;
2848 3146
2849 err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr, 3147 err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
@@ -2867,7 +3165,12 @@ static int nft_del_setelem(const struct nft_ctx *ctx, struct nft_set *set,
2867 if (err < 0) 3165 if (err < 0)
2868 goto err2; 3166 goto err2;
2869 3167
2870 set->ops->remove(set, &elem); 3168 trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
3169 if (trans == NULL)
3170 goto err2;
3171
3172 nft_trans_elem(trans) = elem;
3173 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
2871 3174
2872 nft_data_uninit(&elem.key, NFT_DATA_VALUE); 3175 nft_data_uninit(&elem.key, NFT_DATA_VALUE);
2873 if (set->flags & NFT_SET_MAP) 3176 if (set->flags & NFT_SET_MAP)
@@ -2886,9 +3189,9 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
2886 const struct nlattr *attr; 3189 const struct nlattr *attr;
2887 struct nft_set *set; 3190 struct nft_set *set;
2888 struct nft_ctx ctx; 3191 struct nft_ctx ctx;
2889 int rem, err; 3192 int rem, err = 0;
2890 3193
2891 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla); 3194 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false);
2892 if (err < 0) 3195 if (err < 0)
2893 return err; 3196 return err;
2894 3197
@@ -2901,14 +3204,16 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
2901 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { 3204 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
2902 err = nft_del_setelem(&ctx, set, attr); 3205 err = nft_del_setelem(&ctx, set, attr);
2903 if (err < 0) 3206 if (err < 0)
2904 return err; 3207 break;
3208
3209 set->nelems--;
2905 } 3210 }
2906 return 0; 3211 return err;
2907} 3212}
2908 3213
2909static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = { 3214static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
2910 [NFT_MSG_NEWTABLE] = { 3215 [NFT_MSG_NEWTABLE] = {
2911 .call = nf_tables_newtable, 3216 .call_batch = nf_tables_newtable,
2912 .attr_count = NFTA_TABLE_MAX, 3217 .attr_count = NFTA_TABLE_MAX,
2913 .policy = nft_table_policy, 3218 .policy = nft_table_policy,
2914 }, 3219 },
@@ -2918,12 +3223,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
2918 .policy = nft_table_policy, 3223 .policy = nft_table_policy,
2919 }, 3224 },
2920 [NFT_MSG_DELTABLE] = { 3225 [NFT_MSG_DELTABLE] = {
2921 .call = nf_tables_deltable, 3226 .call_batch = nf_tables_deltable,
2922 .attr_count = NFTA_TABLE_MAX, 3227 .attr_count = NFTA_TABLE_MAX,
2923 .policy = nft_table_policy, 3228 .policy = nft_table_policy,
2924 }, 3229 },
2925 [NFT_MSG_NEWCHAIN] = { 3230 [NFT_MSG_NEWCHAIN] = {
2926 .call = nf_tables_newchain, 3231 .call_batch = nf_tables_newchain,
2927 .attr_count = NFTA_CHAIN_MAX, 3232 .attr_count = NFTA_CHAIN_MAX,
2928 .policy = nft_chain_policy, 3233 .policy = nft_chain_policy,
2929 }, 3234 },
@@ -2933,7 +3238,7 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
2933 .policy = nft_chain_policy, 3238 .policy = nft_chain_policy,
2934 }, 3239 },
2935 [NFT_MSG_DELCHAIN] = { 3240 [NFT_MSG_DELCHAIN] = {
2936 .call = nf_tables_delchain, 3241 .call_batch = nf_tables_delchain,
2937 .attr_count = NFTA_CHAIN_MAX, 3242 .attr_count = NFTA_CHAIN_MAX,
2938 .policy = nft_chain_policy, 3243 .policy = nft_chain_policy,
2939 }, 3244 },
@@ -2953,7 +3258,7 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
2953 .policy = nft_rule_policy, 3258 .policy = nft_rule_policy,
2954 }, 3259 },
2955 [NFT_MSG_NEWSET] = { 3260 [NFT_MSG_NEWSET] = {
2956 .call = nf_tables_newset, 3261 .call_batch = nf_tables_newset,
2957 .attr_count = NFTA_SET_MAX, 3262 .attr_count = NFTA_SET_MAX,
2958 .policy = nft_set_policy, 3263 .policy = nft_set_policy,
2959 }, 3264 },
@@ -2963,12 +3268,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
2963 .policy = nft_set_policy, 3268 .policy = nft_set_policy,
2964 }, 3269 },
2965 [NFT_MSG_DELSET] = { 3270 [NFT_MSG_DELSET] = {
2966 .call = nf_tables_delset, 3271 .call_batch = nf_tables_delset,
2967 .attr_count = NFTA_SET_MAX, 3272 .attr_count = NFTA_SET_MAX,
2968 .policy = nft_set_policy, 3273 .policy = nft_set_policy,
2969 }, 3274 },
2970 [NFT_MSG_NEWSETELEM] = { 3275 [NFT_MSG_NEWSETELEM] = {
2971 .call = nf_tables_newsetelem, 3276 .call_batch = nf_tables_newsetelem,
2972 .attr_count = NFTA_SET_ELEM_LIST_MAX, 3277 .attr_count = NFTA_SET_ELEM_LIST_MAX,
2973 .policy = nft_set_elem_list_policy, 3278 .policy = nft_set_elem_list_policy,
2974 }, 3279 },
@@ -2978,12 +3283,282 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
2978 .policy = nft_set_elem_list_policy, 3283 .policy = nft_set_elem_list_policy,
2979 }, 3284 },
2980 [NFT_MSG_DELSETELEM] = { 3285 [NFT_MSG_DELSETELEM] = {
2981 .call = nf_tables_delsetelem, 3286 .call_batch = nf_tables_delsetelem,
2982 .attr_count = NFTA_SET_ELEM_LIST_MAX, 3287 .attr_count = NFTA_SET_ELEM_LIST_MAX,
2983 .policy = nft_set_elem_list_policy, 3288 .policy = nft_set_elem_list_policy,
2984 }, 3289 },
2985}; 3290};
2986 3291
3292static void nft_chain_commit_update(struct nft_trans *trans)
3293{
3294 struct nft_base_chain *basechain;
3295
3296 if (nft_trans_chain_name(trans)[0])
3297 strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans));
3298
3299 if (!(trans->ctx.chain->flags & NFT_BASE_CHAIN))
3300 return;
3301
3302 basechain = nft_base_chain(trans->ctx.chain);
3303 nft_chain_stats_replace(basechain, nft_trans_chain_stats(trans));
3304
3305 switch (nft_trans_chain_policy(trans)) {
3306 case NF_DROP:
3307 case NF_ACCEPT:
3308 basechain->policy = nft_trans_chain_policy(trans);
3309 break;
3310 }
3311}
3312
3313/* Schedule objects for release via rcu to make sure no packets are accesing
3314 * removed rules.
3315 */
3316static void nf_tables_commit_release_rcu(struct rcu_head *rt)
3317{
3318 struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head);
3319
3320 switch (trans->msg_type) {
3321 case NFT_MSG_DELTABLE:
3322 nf_tables_table_destroy(&trans->ctx);
3323 break;
3324 case NFT_MSG_DELCHAIN:
3325 nf_tables_chain_destroy(trans->ctx.chain);
3326 break;
3327 case NFT_MSG_DELRULE:
3328 nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
3329 break;
3330 case NFT_MSG_DELSET:
3331 nft_set_destroy(nft_trans_set(trans));
3332 break;
3333 }
3334 kfree(trans);
3335}
3336
3337static int nf_tables_commit(struct sk_buff *skb)
3338{
3339 struct net *net = sock_net(skb->sk);
3340 struct nft_trans *trans, *next;
3341 struct nft_set *set;
3342
3343 /* Bump generation counter, invalidate any dump in progress */
3344 net->nft.genctr++;
3345
3346 /* A new generation has just started */
3347 net->nft.gencursor = gencursor_next(net);
3348
3349 /* Make sure all packets have left the previous generation before
3350 * purging old rules.
3351 */
3352 synchronize_rcu();
3353
3354 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
3355 switch (trans->msg_type) {
3356 case NFT_MSG_NEWTABLE:
3357 if (nft_trans_table_update(trans)) {
3358 if (!nft_trans_table_enable(trans)) {
3359 nf_tables_table_disable(trans->ctx.afi,
3360 trans->ctx.table);
3361 trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
3362 }
3363 } else {
3364 trans->ctx.table->flags &= ~NFT_TABLE_INACTIVE;
3365 }
3366 nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE);
3367 nft_trans_destroy(trans);
3368 break;
3369 case NFT_MSG_DELTABLE:
3370 nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
3371 break;
3372 case NFT_MSG_NEWCHAIN:
3373 if (nft_trans_chain_update(trans))
3374 nft_chain_commit_update(trans);
3375 else
3376 trans->ctx.chain->flags &= ~NFT_CHAIN_INACTIVE;
3377
3378 nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
3379 nft_trans_destroy(trans);
3380 break;
3381 case NFT_MSG_DELCHAIN:
3382 nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN);
3383 if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
3384 trans->ctx.chain->flags & NFT_BASE_CHAIN) {
3385 nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
3386 trans->ctx.afi->nops);
3387 }
3388 break;
3389 case NFT_MSG_NEWRULE:
3390 nft_rule_clear(trans->ctx.net, nft_trans_rule(trans));
3391 nf_tables_rule_notify(&trans->ctx,
3392 nft_trans_rule(trans),
3393 NFT_MSG_NEWRULE);
3394 nft_trans_destroy(trans);
3395 break;
3396 case NFT_MSG_DELRULE:
3397 list_del_rcu(&nft_trans_rule(trans)->list);
3398 nf_tables_rule_notify(&trans->ctx,
3399 nft_trans_rule(trans),
3400 NFT_MSG_DELRULE);
3401 break;
3402 case NFT_MSG_NEWSET:
3403 nft_trans_set(trans)->flags &= ~NFT_SET_INACTIVE;
3404 /* This avoids hitting -EBUSY when deleting the table
3405 * from the transaction.
3406 */
3407 if (nft_trans_set(trans)->flags & NFT_SET_ANONYMOUS &&
3408 !list_empty(&nft_trans_set(trans)->bindings))
3409 trans->ctx.table->use--;
3410
3411 nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
3412 NFT_MSG_NEWSET, GFP_KERNEL);
3413 nft_trans_destroy(trans);
3414 break;
3415 case NFT_MSG_DELSET:
3416 nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
3417 NFT_MSG_DELSET, GFP_KERNEL);
3418 break;
3419 case NFT_MSG_NEWSETELEM:
3420 nf_tables_setelem_notify(&trans->ctx,
3421 nft_trans_elem_set(trans),
3422 &nft_trans_elem(trans),
3423 NFT_MSG_NEWSETELEM, 0);
3424 nft_trans_destroy(trans);
3425 break;
3426 case NFT_MSG_DELSETELEM:
3427 nf_tables_setelem_notify(&trans->ctx,
3428 nft_trans_elem_set(trans),
3429 &nft_trans_elem(trans),
3430 NFT_MSG_DELSETELEM, 0);
3431 set = nft_trans_elem_set(trans);
3432 set->ops->get(set, &nft_trans_elem(trans));
3433 set->ops->remove(set, &nft_trans_elem(trans));
3434 nft_trans_destroy(trans);
3435 break;
3436 }
3437 }
3438
3439 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
3440 list_del(&trans->list);
3441 trans->ctx.nla = NULL;
3442 call_rcu(&trans->rcu_head, nf_tables_commit_release_rcu);
3443 }
3444
3445 return 0;
3446}
3447
3448/* Schedule objects for release via rcu to make sure no packets are accesing
3449 * aborted rules.
3450 */
3451static void nf_tables_abort_release_rcu(struct rcu_head *rt)
3452{
3453 struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head);
3454
3455 switch (trans->msg_type) {
3456 case NFT_MSG_NEWTABLE:
3457 nf_tables_table_destroy(&trans->ctx);
3458 break;
3459 case NFT_MSG_NEWCHAIN:
3460 nf_tables_chain_destroy(trans->ctx.chain);
3461 break;
3462 case NFT_MSG_NEWRULE:
3463 nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
3464 break;
3465 case NFT_MSG_NEWSET:
3466 nft_set_destroy(nft_trans_set(trans));
3467 break;
3468 }
3469 kfree(trans);
3470}
3471
3472static int nf_tables_abort(struct sk_buff *skb)
3473{
3474 struct net *net = sock_net(skb->sk);
3475 struct nft_trans *trans, *next;
3476 struct nft_set *set;
3477
3478 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
3479 switch (trans->msg_type) {
3480 case NFT_MSG_NEWTABLE:
3481 if (nft_trans_table_update(trans)) {
3482 if (nft_trans_table_enable(trans)) {
3483 nf_tables_table_disable(trans->ctx.afi,
3484 trans->ctx.table);
3485 trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
3486 }
3487 nft_trans_destroy(trans);
3488 } else {
3489 list_del(&trans->ctx.table->list);
3490 }
3491 break;
3492 case NFT_MSG_DELTABLE:
3493 list_add_tail(&trans->ctx.table->list,
3494 &trans->ctx.afi->tables);
3495 nft_trans_destroy(trans);
3496 break;
3497 case NFT_MSG_NEWCHAIN:
3498 if (nft_trans_chain_update(trans)) {
3499 if (nft_trans_chain_stats(trans))
3500 free_percpu(nft_trans_chain_stats(trans));
3501
3502 nft_trans_destroy(trans);
3503 } else {
3504 trans->ctx.table->use--;
3505 list_del(&trans->ctx.chain->list);
3506 if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
3507 trans->ctx.chain->flags & NFT_BASE_CHAIN) {
3508 nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
3509 trans->ctx.afi->nops);
3510 }
3511 }
3512 break;
3513 case NFT_MSG_DELCHAIN:
3514 trans->ctx.table->use++;
3515 list_add_tail(&trans->ctx.chain->list,
3516 &trans->ctx.table->chains);
3517 nft_trans_destroy(trans);
3518 break;
3519 case NFT_MSG_NEWRULE:
3520 trans->ctx.chain->use--;
3521 list_del_rcu(&nft_trans_rule(trans)->list);
3522 break;
3523 case NFT_MSG_DELRULE:
3524 trans->ctx.chain->use++;
3525 nft_rule_clear(trans->ctx.net, nft_trans_rule(trans));
3526 nft_trans_destroy(trans);
3527 break;
3528 case NFT_MSG_NEWSET:
3529 trans->ctx.table->use--;
3530 list_del(&nft_trans_set(trans)->list);
3531 break;
3532 case NFT_MSG_DELSET:
3533 trans->ctx.table->use++;
3534 list_add_tail(&nft_trans_set(trans)->list,
3535 &trans->ctx.table->sets);
3536 nft_trans_destroy(trans);
3537 break;
3538 case NFT_MSG_NEWSETELEM:
3539 nft_trans_elem_set(trans)->nelems--;
3540 set = nft_trans_elem_set(trans);
3541 set->ops->get(set, &nft_trans_elem(trans));
3542 set->ops->remove(set, &nft_trans_elem(trans));
3543 nft_trans_destroy(trans);
3544 break;
3545 case NFT_MSG_DELSETELEM:
3546 nft_trans_elem_set(trans)->nelems++;
3547 nft_trans_destroy(trans);
3548 break;
3549 }
3550 }
3551
3552 list_for_each_entry_safe_reverse(trans, next,
3553 &net->nft.commit_list, list) {
3554 list_del(&trans->list);
3555 trans->ctx.nla = NULL;
3556 call_rcu(&trans->rcu_head, nf_tables_abort_release_rcu);
3557 }
3558
3559 return 0;
3560}
3561
2987static const struct nfnetlink_subsystem nf_tables_subsys = { 3562static const struct nfnetlink_subsystem nf_tables_subsys = {
2988 .name = "nf_tables", 3563 .name = "nf_tables",
2989 .subsys_id = NFNL_SUBSYS_NFTABLES, 3564 .subsys_id = NFNL_SUBSYS_NFTABLES,
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 23ef77c60fff..c138b8fbe280 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -399,19 +399,17 @@ static void nfnetlink_rcv(struct sk_buff *skb)
399} 399}
400 400
401#ifdef CONFIG_MODULES 401#ifdef CONFIG_MODULES
402static void nfnetlink_bind(int group) 402static int nfnetlink_bind(int group)
403{ 403{
404 const struct nfnetlink_subsystem *ss; 404 const struct nfnetlink_subsystem *ss;
405 int type = nfnl_group2type[group]; 405 int type = nfnl_group2type[group];
406 406
407 rcu_read_lock(); 407 rcu_read_lock();
408 ss = nfnetlink_get_subsys(type); 408 ss = nfnetlink_get_subsys(type);
409 if (!ss) {
410 rcu_read_unlock();
411 request_module("nfnetlink-subsys-%d", type);
412 return;
413 }
414 rcu_read_unlock(); 409 rcu_read_unlock();
410 if (!ss)
411 request_module("nfnetlink-subsys-%d", type);
412 return 0;
415} 413}
416#endif 414#endif
417 415
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index c7b6d466a662..2baa125c2e8d 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -32,18 +32,24 @@ static LIST_HEAD(nfnl_acct_list);
32struct nf_acct { 32struct nf_acct {
33 atomic64_t pkts; 33 atomic64_t pkts;
34 atomic64_t bytes; 34 atomic64_t bytes;
35 unsigned long flags;
35 struct list_head head; 36 struct list_head head;
36 atomic_t refcnt; 37 atomic_t refcnt;
37 char name[NFACCT_NAME_MAX]; 38 char name[NFACCT_NAME_MAX];
38 struct rcu_head rcu_head; 39 struct rcu_head rcu_head;
40 char data[0];
39}; 41};
40 42
43#define NFACCT_F_QUOTA (NFACCT_F_QUOTA_PKTS | NFACCT_F_QUOTA_BYTES)
44
41static int 45static int
42nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb, 46nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
43 const struct nlmsghdr *nlh, const struct nlattr * const tb[]) 47 const struct nlmsghdr *nlh, const struct nlattr * const tb[])
44{ 48{
45 struct nf_acct *nfacct, *matching = NULL; 49 struct nf_acct *nfacct, *matching = NULL;
46 char *acct_name; 50 char *acct_name;
51 unsigned int size = 0;
52 u32 flags = 0;
47 53
48 if (!tb[NFACCT_NAME]) 54 if (!tb[NFACCT_NAME])
49 return -EINVAL; 55 return -EINVAL;
@@ -68,15 +74,38 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
68 /* reset counters if you request a replacement. */ 74 /* reset counters if you request a replacement. */
69 atomic64_set(&matching->pkts, 0); 75 atomic64_set(&matching->pkts, 0);
70 atomic64_set(&matching->bytes, 0); 76 atomic64_set(&matching->bytes, 0);
77 smp_mb__before_atomic();
78 /* reset overquota flag if quota is enabled. */
79 if ((matching->flags & NFACCT_F_QUOTA))
80 clear_bit(NFACCT_F_OVERQUOTA, &matching->flags);
71 return 0; 81 return 0;
72 } 82 }
73 return -EBUSY; 83 return -EBUSY;
74 } 84 }
75 85
76 nfacct = kzalloc(sizeof(struct nf_acct), GFP_KERNEL); 86 if (tb[NFACCT_FLAGS]) {
87 flags = ntohl(nla_get_be32(tb[NFACCT_FLAGS]));
88 if (flags & ~NFACCT_F_QUOTA)
89 return -EOPNOTSUPP;
90 if ((flags & NFACCT_F_QUOTA) == NFACCT_F_QUOTA)
91 return -EINVAL;
92 if (flags & NFACCT_F_OVERQUOTA)
93 return -EINVAL;
94
95 size += sizeof(u64);
96 }
97
98 nfacct = kzalloc(sizeof(struct nf_acct) + size, GFP_KERNEL);
77 if (nfacct == NULL) 99 if (nfacct == NULL)
78 return -ENOMEM; 100 return -ENOMEM;
79 101
102 if (flags & NFACCT_F_QUOTA) {
103 u64 *quota = (u64 *)nfacct->data;
104
105 *quota = be64_to_cpu(nla_get_be64(tb[NFACCT_QUOTA]));
106 nfacct->flags = flags;
107 }
108
80 strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX); 109 strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX);
81 110
82 if (tb[NFACCT_BYTES]) { 111 if (tb[NFACCT_BYTES]) {
@@ -117,6 +146,9 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
117 if (type == NFNL_MSG_ACCT_GET_CTRZERO) { 146 if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
118 pkts = atomic64_xchg(&acct->pkts, 0); 147 pkts = atomic64_xchg(&acct->pkts, 0);
119 bytes = atomic64_xchg(&acct->bytes, 0); 148 bytes = atomic64_xchg(&acct->bytes, 0);
149 smp_mb__before_atomic();
150 if (acct->flags & NFACCT_F_QUOTA)
151 clear_bit(NFACCT_F_OVERQUOTA, &acct->flags);
120 } else { 152 } else {
121 pkts = atomic64_read(&acct->pkts); 153 pkts = atomic64_read(&acct->pkts);
122 bytes = atomic64_read(&acct->bytes); 154 bytes = atomic64_read(&acct->bytes);
@@ -125,7 +157,13 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
125 nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) || 157 nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) ||
126 nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)))) 158 nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))))
127 goto nla_put_failure; 159 goto nla_put_failure;
160 if (acct->flags & NFACCT_F_QUOTA) {
161 u64 *quota = (u64 *)acct->data;
128 162
163 if (nla_put_be32(skb, NFACCT_FLAGS, htonl(acct->flags)) ||
164 nla_put_be64(skb, NFACCT_QUOTA, cpu_to_be64(*quota)))
165 goto nla_put_failure;
166 }
129 nlmsg_end(skb, nlh); 167 nlmsg_end(skb, nlh);
130 return skb->len; 168 return skb->len;
131 169
@@ -270,6 +308,8 @@ static const struct nla_policy nfnl_acct_policy[NFACCT_MAX+1] = {
270 [NFACCT_NAME] = { .type = NLA_NUL_STRING, .len = NFACCT_NAME_MAX-1 }, 308 [NFACCT_NAME] = { .type = NLA_NUL_STRING, .len = NFACCT_NAME_MAX-1 },
271 [NFACCT_BYTES] = { .type = NLA_U64 }, 309 [NFACCT_BYTES] = { .type = NLA_U64 },
272 [NFACCT_PKTS] = { .type = NLA_U64 }, 310 [NFACCT_PKTS] = { .type = NLA_U64 },
311 [NFACCT_FLAGS] = { .type = NLA_U32 },
312 [NFACCT_QUOTA] = { .type = NLA_U64 },
273}; 313};
274 314
275static const struct nfnl_callback nfnl_acct_cb[NFNL_MSG_ACCT_MAX] = { 315static const struct nfnl_callback nfnl_acct_cb[NFNL_MSG_ACCT_MAX] = {
@@ -336,6 +376,50 @@ void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct)
336} 376}
337EXPORT_SYMBOL_GPL(nfnl_acct_update); 377EXPORT_SYMBOL_GPL(nfnl_acct_update);
338 378
379static void nfnl_overquota_report(struct nf_acct *nfacct)
380{
381 int ret;
382 struct sk_buff *skb;
383
384 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
385 if (skb == NULL)
386 return;
387
388 ret = nfnl_acct_fill_info(skb, 0, 0, NFNL_MSG_ACCT_OVERQUOTA, 0,
389 nfacct);
390 if (ret <= 0) {
391 kfree_skb(skb);
392 return;
393 }
394 netlink_broadcast(init_net.nfnl, skb, 0, NFNLGRP_ACCT_QUOTA,
395 GFP_ATOMIC);
396}
397
398int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct)
399{
400 u64 now;
401 u64 *quota;
402 int ret = NFACCT_UNDERQUOTA;
403
404 /* no place here if we don't have a quota */
405 if (!(nfacct->flags & NFACCT_F_QUOTA))
406 return NFACCT_NO_QUOTA;
407
408 quota = (u64 *)nfacct->data;
409 now = (nfacct->flags & NFACCT_F_QUOTA_PKTS) ?
410 atomic64_read(&nfacct->pkts) : atomic64_read(&nfacct->bytes);
411
412 ret = now > *quota;
413
414 if (now >= *quota &&
415 !test_and_set_bit(NFACCT_F_OVERQUOTA, &nfacct->flags)) {
416 nfnl_overquota_report(nfacct);
417 }
418
419 return ret;
420}
421EXPORT_SYMBOL_GPL(nfnl_acct_overquota);
422
339static int __init nfnl_acct_init(void) 423static int __init nfnl_acct_init(void)
340{ 424{
341 int ret; 425 int ret;
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index bd0d41e69341..cc5603016242 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -215,22 +215,14 @@ static void nft_ct_l3proto_module_put(uint8_t family)
215 nf_ct_l3proto_module_put(family); 215 nf_ct_l3proto_module_put(family);
216} 216}
217 217
218static int nft_ct_init_validate_get(const struct nft_expr *expr, 218static int nft_ct_get_init(const struct nft_ctx *ctx,
219 const struct nlattr * const tb[]) 219 const struct nft_expr *expr,
220 const struct nlattr * const tb[])
220{ 221{
221 struct nft_ct *priv = nft_expr_priv(expr); 222 struct nft_ct *priv = nft_expr_priv(expr);
223 int err;
222 224
223 if (tb[NFTA_CT_DIRECTION] != NULL) { 225 priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
224 priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
225 switch (priv->dir) {
226 case IP_CT_DIR_ORIGINAL:
227 case IP_CT_DIR_REPLY:
228 break;
229 default:
230 return -EINVAL;
231 }
232 }
233
234 switch (priv->key) { 226 switch (priv->key) {
235 case NFT_CT_STATE: 227 case NFT_CT_STATE:
236 case NFT_CT_DIRECTION: 228 case NFT_CT_DIRECTION:
@@ -262,55 +254,55 @@ static int nft_ct_init_validate_get(const struct nft_expr *expr,
262 return -EOPNOTSUPP; 254 return -EOPNOTSUPP;
263 } 255 }
264 256
265 return 0; 257 if (tb[NFTA_CT_DIRECTION] != NULL) {
266} 258 priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
267 259 switch (priv->dir) {
268static int nft_ct_init_validate_set(uint32_t key) 260 case IP_CT_DIR_ORIGINAL:
269{ 261 case IP_CT_DIR_REPLY:
270 switch (key) { 262 break;
271 case NFT_CT_MARK: 263 default:
272 break; 264 return -EINVAL;
273 default: 265 }
274 return -EOPNOTSUPP;
275 } 266 }
276 267
268 priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
269 err = nft_validate_output_register(priv->dreg);
270 if (err < 0)
271 return err;
272
273 err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
274 if (err < 0)
275 return err;
276
277 err = nft_ct_l3proto_try_module_get(ctx->afi->family);
278 if (err < 0)
279 return err;
280
277 return 0; 281 return 0;
278} 282}
279 283
280static int nft_ct_init(const struct nft_ctx *ctx, 284static int nft_ct_set_init(const struct nft_ctx *ctx,
281 const struct nft_expr *expr, 285 const struct nft_expr *expr,
282 const struct nlattr * const tb[]) 286 const struct nlattr * const tb[])
283{ 287{
284 struct nft_ct *priv = nft_expr_priv(expr); 288 struct nft_ct *priv = nft_expr_priv(expr);
285 int err; 289 int err;
286 290
287 priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY])); 291 priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
288 292 switch (priv->key) {
289 if (tb[NFTA_CT_DREG]) { 293#ifdef CONFIG_NF_CONNTRACK_MARK
290 err = nft_ct_init_validate_get(expr, tb); 294 case NFT_CT_MARK:
291 if (err < 0) 295 break;
292 return err; 296#endif
293 297 default:
294 priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG])); 298 return -EOPNOTSUPP;
295 err = nft_validate_output_register(priv->dreg);
296 if (err < 0)
297 return err;
298
299 err = nft_validate_data_load(ctx, priv->dreg, NULL,
300 NFT_DATA_VALUE);
301 if (err < 0)
302 return err;
303 } else {
304 err = nft_ct_init_validate_set(priv->key);
305 if (err < 0)
306 return err;
307
308 priv->sreg = ntohl(nla_get_be32(tb[NFTA_CT_SREG]));
309 err = nft_validate_input_register(priv->sreg);
310 if (err < 0)
311 return err;
312 } 299 }
313 300
301 priv->sreg = ntohl(nla_get_be32(tb[NFTA_CT_SREG]));
302 err = nft_validate_input_register(priv->sreg);
303 if (err < 0)
304 return err;
305
314 err = nft_ct_l3proto_try_module_get(ctx->afi->family); 306 err = nft_ct_l3proto_try_module_get(ctx->afi->family);
315 if (err < 0) 307 if (err < 0)
316 return err; 308 return err;
@@ -370,7 +362,7 @@ static const struct nft_expr_ops nft_ct_get_ops = {
370 .type = &nft_ct_type, 362 .type = &nft_ct_type,
371 .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)), 363 .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
372 .eval = nft_ct_get_eval, 364 .eval = nft_ct_get_eval,
373 .init = nft_ct_init, 365 .init = nft_ct_get_init,
374 .destroy = nft_ct_destroy, 366 .destroy = nft_ct_destroy,
375 .dump = nft_ct_get_dump, 367 .dump = nft_ct_get_dump,
376}; 368};
@@ -379,7 +371,7 @@ static const struct nft_expr_ops nft_ct_set_ops = {
379 .type = &nft_ct_type, 371 .type = &nft_ct_type,
380 .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)), 372 .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
381 .eval = nft_ct_set_eval, 373 .eval = nft_ct_set_eval,
382 .init = nft_ct_init, 374 .init = nft_ct_set_init,
383 .destroy = nft_ct_destroy, 375 .destroy = nft_ct_destroy,
384 .dump = nft_ct_set_dump, 376 .dump = nft_ct_set_dump,
385}; 377};
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 3b1ad876d6b0..4080ed6a072b 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/log2.h>
15#include <linux/jhash.h> 16#include <linux/jhash.h>
16#include <linux/netlink.h> 17#include <linux/netlink.h>
17#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
@@ -19,7 +20,7 @@
19#include <linux/netfilter/nf_tables.h> 20#include <linux/netfilter/nf_tables.h>
20#include <net/netfilter/nf_tables.h> 21#include <net/netfilter/nf_tables.h>
21 22
22#define NFT_HASH_MIN_SIZE 4 23#define NFT_HASH_MIN_SIZE 4UL
23 24
24struct nft_hash { 25struct nft_hash {
25 struct nft_hash_table __rcu *tbl; 26 struct nft_hash_table __rcu *tbl;
@@ -27,7 +28,6 @@ struct nft_hash {
27 28
28struct nft_hash_table { 29struct nft_hash_table {
29 unsigned int size; 30 unsigned int size;
30 unsigned int elements;
31 struct nft_hash_elem __rcu *buckets[]; 31 struct nft_hash_elem __rcu *buckets[];
32}; 32};
33 33
@@ -76,10 +76,12 @@ static bool nft_hash_lookup(const struct nft_set *set,
76 76
77static void nft_hash_tbl_free(const struct nft_hash_table *tbl) 77static void nft_hash_tbl_free(const struct nft_hash_table *tbl)
78{ 78{
79 if (is_vmalloc_addr(tbl)) 79 kvfree(tbl);
80 vfree(tbl); 80}
81 else 81
82 kfree(tbl); 82static unsigned int nft_hash_tbl_size(unsigned int nelem)
83{
84 return max(roundup_pow_of_two(nelem * 4 / 3), NFT_HASH_MIN_SIZE);
83} 85}
84 86
85static struct nft_hash_table *nft_hash_tbl_alloc(unsigned int nbuckets) 87static struct nft_hash_table *nft_hash_tbl_alloc(unsigned int nbuckets)
@@ -161,7 +163,6 @@ static int nft_hash_tbl_expand(const struct nft_set *set, struct nft_hash *priv)
161 break; 163 break;
162 } 164 }
163 } 165 }
164 ntbl->elements = tbl->elements;
165 166
166 /* Publish new table */ 167 /* Publish new table */
167 rcu_assign_pointer(priv->tbl, ntbl); 168 rcu_assign_pointer(priv->tbl, ntbl);
@@ -201,7 +202,6 @@ static int nft_hash_tbl_shrink(const struct nft_set *set, struct nft_hash *priv)
201 ; 202 ;
202 RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]); 203 RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
203 } 204 }
204 ntbl->elements = tbl->elements;
205 205
206 /* Publish new table */ 206 /* Publish new table */
207 rcu_assign_pointer(priv->tbl, ntbl); 207 rcu_assign_pointer(priv->tbl, ntbl);
@@ -237,10 +237,9 @@ static int nft_hash_insert(const struct nft_set *set,
237 h = nft_hash_data(&he->key, tbl->size, set->klen); 237 h = nft_hash_data(&he->key, tbl->size, set->klen);
238 RCU_INIT_POINTER(he->next, tbl->buckets[h]); 238 RCU_INIT_POINTER(he->next, tbl->buckets[h]);
239 rcu_assign_pointer(tbl->buckets[h], he); 239 rcu_assign_pointer(tbl->buckets[h], he);
240 tbl->elements++;
241 240
242 /* Expand table when exceeding 75% load */ 241 /* Expand table when exceeding 75% load */
243 if (tbl->elements > tbl->size / 4 * 3) 242 if (set->nelems + 1 > tbl->size / 4 * 3)
244 nft_hash_tbl_expand(set, priv); 243 nft_hash_tbl_expand(set, priv);
245 244
246 return 0; 245 return 0;
@@ -268,10 +267,9 @@ static void nft_hash_remove(const struct nft_set *set,
268 RCU_INIT_POINTER(*pprev, he->next); 267 RCU_INIT_POINTER(*pprev, he->next);
269 synchronize_rcu(); 268 synchronize_rcu();
270 kfree(he); 269 kfree(he);
271 tbl->elements--;
272 270
273 /* Shrink table beneath 30% load */ 271 /* Shrink table beneath 30% load */
274 if (tbl->elements < tbl->size * 3 / 10 && 272 if (set->nelems - 1 < tbl->size * 3 / 10 &&
275 tbl->size > NFT_HASH_MIN_SIZE) 273 tbl->size > NFT_HASH_MIN_SIZE)
276 nft_hash_tbl_shrink(set, priv); 274 nft_hash_tbl_shrink(set, priv);
277} 275}
@@ -335,17 +333,23 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
335} 333}
336 334
337static int nft_hash_init(const struct nft_set *set, 335static int nft_hash_init(const struct nft_set *set,
336 const struct nft_set_desc *desc,
338 const struct nlattr * const tb[]) 337 const struct nlattr * const tb[])
339{ 338{
340 struct nft_hash *priv = nft_set_priv(set); 339 struct nft_hash *priv = nft_set_priv(set);
341 struct nft_hash_table *tbl; 340 struct nft_hash_table *tbl;
341 unsigned int size;
342 342
343 if (unlikely(!nft_hash_rnd_initted)) { 343 if (unlikely(!nft_hash_rnd_initted)) {
344 get_random_bytes(&nft_hash_rnd, 4); 344 get_random_bytes(&nft_hash_rnd, 4);
345 nft_hash_rnd_initted = true; 345 nft_hash_rnd_initted = true;
346 } 346 }
347 347
348 tbl = nft_hash_tbl_alloc(NFT_HASH_MIN_SIZE); 348 size = NFT_HASH_MIN_SIZE;
349 if (desc->size)
350 size = nft_hash_tbl_size(desc->size);
351
352 tbl = nft_hash_tbl_alloc(size);
349 if (tbl == NULL) 353 if (tbl == NULL)
350 return -ENOMEM; 354 return -ENOMEM;
351 RCU_INIT_POINTER(priv->tbl, tbl); 355 RCU_INIT_POINTER(priv->tbl, tbl);
@@ -369,8 +373,37 @@ static void nft_hash_destroy(const struct nft_set *set)
369 kfree(tbl); 373 kfree(tbl);
370} 374}
371 375
376static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
377 struct nft_set_estimate *est)
378{
379 unsigned int esize;
380
381 esize = sizeof(struct nft_hash_elem);
382 if (features & NFT_SET_MAP)
383 esize += FIELD_SIZEOF(struct nft_hash_elem, data[0]);
384
385 if (desc->size) {
386 est->size = sizeof(struct nft_hash) +
387 nft_hash_tbl_size(desc->size) *
388 sizeof(struct nft_hash_elem *) +
389 desc->size * esize;
390 } else {
391 /* Resizing happens when the load drops below 30% or goes
392 * above 75%. The average of 52.5% load (approximated by 50%)
393 * is used for the size estimation of the hash buckets,
394 * meaning we calculate two buckets per element.
395 */
396 est->size = esize + 2 * sizeof(struct nft_hash_elem *);
397 }
398
399 est->class = NFT_SET_CLASS_O_1;
400
401 return true;
402}
403
372static struct nft_set_ops nft_hash_ops __read_mostly = { 404static struct nft_set_ops nft_hash_ops __read_mostly = {
373 .privsize = nft_hash_privsize, 405 .privsize = nft_hash_privsize,
406 .estimate = nft_hash_estimate,
374 .init = nft_hash_init, 407 .init = nft_hash_init,
375 .destroy = nft_hash_destroy, 408 .destroy = nft_hash_destroy,
376 .get = nft_hash_get, 409 .get = nft_hash_get,
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 7fd2bea8aa23..6404a726d17b 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -56,8 +56,14 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
56 return -EINVAL; 56 return -EINVAL;
57 57
58 set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]); 58 set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]);
59 if (IS_ERR(set)) 59 if (IS_ERR(set)) {
60 return PTR_ERR(set); 60 if (tb[NFTA_LOOKUP_SET_ID]) {
61 set = nf_tables_set_lookup_byid(ctx->net,
62 tb[NFTA_LOOKUP_SET_ID]);
63 }
64 if (IS_ERR(set))
65 return PTR_ERR(set);
66 }
61 67
62 priv->sreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_SREG])); 68 priv->sreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_SREG]));
63 err = nft_validate_input_register(priv->sreg); 69 err = nft_validate_input_register(priv->sreg);
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 425cf39af890..852b178c6ae7 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -18,18 +18,11 @@
18#include <net/sock.h> 18#include <net/sock.h>
19#include <net/tcp_states.h> /* for TCP_TIME_WAIT */ 19#include <net/tcp_states.h> /* for TCP_TIME_WAIT */
20#include <net/netfilter/nf_tables.h> 20#include <net/netfilter/nf_tables.h>
21#include <net/netfilter/nft_meta.h>
21 22
22struct nft_meta { 23void nft_meta_get_eval(const struct nft_expr *expr,
23 enum nft_meta_keys key:8; 24 struct nft_data data[NFT_REG_MAX + 1],
24 union { 25 const struct nft_pktinfo *pkt)
25 enum nft_registers dreg:8;
26 enum nft_registers sreg:8;
27 };
28};
29
30static void nft_meta_get_eval(const struct nft_expr *expr,
31 struct nft_data data[NFT_REG_MAX + 1],
32 const struct nft_pktinfo *pkt)
33{ 26{
34 const struct nft_meta *priv = nft_expr_priv(expr); 27 const struct nft_meta *priv = nft_expr_priv(expr);
35 const struct sk_buff *skb = pkt->skb; 28 const struct sk_buff *skb = pkt->skb;
@@ -140,10 +133,11 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
140err: 133err:
141 data[NFT_REG_VERDICT].verdict = NFT_BREAK; 134 data[NFT_REG_VERDICT].verdict = NFT_BREAK;
142} 135}
136EXPORT_SYMBOL_GPL(nft_meta_get_eval);
143 137
144static void nft_meta_set_eval(const struct nft_expr *expr, 138void nft_meta_set_eval(const struct nft_expr *expr,
145 struct nft_data data[NFT_REG_MAX + 1], 139 struct nft_data data[NFT_REG_MAX + 1],
146 const struct nft_pktinfo *pkt) 140 const struct nft_pktinfo *pkt)
147{ 141{
148 const struct nft_meta *meta = nft_expr_priv(expr); 142 const struct nft_meta *meta = nft_expr_priv(expr);
149 struct sk_buff *skb = pkt->skb; 143 struct sk_buff *skb = pkt->skb;
@@ -163,28 +157,24 @@ static void nft_meta_set_eval(const struct nft_expr *expr,
163 WARN_ON(1); 157 WARN_ON(1);
164 } 158 }
165} 159}
160EXPORT_SYMBOL_GPL(nft_meta_set_eval);
166 161
167static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = { 162const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
168 [NFTA_META_DREG] = { .type = NLA_U32 }, 163 [NFTA_META_DREG] = { .type = NLA_U32 },
169 [NFTA_META_KEY] = { .type = NLA_U32 }, 164 [NFTA_META_KEY] = { .type = NLA_U32 },
170 [NFTA_META_SREG] = { .type = NLA_U32 }, 165 [NFTA_META_SREG] = { .type = NLA_U32 },
171}; 166};
167EXPORT_SYMBOL_GPL(nft_meta_policy);
172 168
173static int nft_meta_init_validate_set(uint32_t key) 169int nft_meta_get_init(const struct nft_ctx *ctx,
170 const struct nft_expr *expr,
171 const struct nlattr * const tb[])
174{ 172{
175 switch (key) { 173 struct nft_meta *priv = nft_expr_priv(expr);
176 case NFT_META_MARK: 174 int err;
177 case NFT_META_PRIORITY:
178 case NFT_META_NFTRACE:
179 return 0;
180 default:
181 return -EOPNOTSUPP;
182 }
183}
184 175
185static int nft_meta_init_validate_get(uint32_t key) 176 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
186{ 177 switch (priv->key) {
187 switch (key) {
188 case NFT_META_LEN: 178 case NFT_META_LEN:
189 case NFT_META_PROTOCOL: 179 case NFT_META_PROTOCOL:
190 case NFT_META_NFPROTO: 180 case NFT_META_NFPROTO:
@@ -205,39 +195,41 @@ static int nft_meta_init_validate_get(uint32_t key)
205#ifdef CONFIG_NETWORK_SECMARK 195#ifdef CONFIG_NETWORK_SECMARK
206 case NFT_META_SECMARK: 196 case NFT_META_SECMARK:
207#endif 197#endif
208 return 0; 198 break;
209 default: 199 default:
210 return -EOPNOTSUPP; 200 return -EOPNOTSUPP;
211 } 201 }
212 202
203 priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
204 err = nft_validate_output_register(priv->dreg);
205 if (err < 0)
206 return err;
207
208 err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
209 if (err < 0)
210 return err;
211
212 return 0;
213} 213}
214EXPORT_SYMBOL_GPL(nft_meta_get_init);
214 215
215static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr, 216int nft_meta_set_init(const struct nft_ctx *ctx,
216 const struct nlattr * const tb[]) 217 const struct nft_expr *expr,
218 const struct nlattr * const tb[])
217{ 219{
218 struct nft_meta *priv = nft_expr_priv(expr); 220 struct nft_meta *priv = nft_expr_priv(expr);
219 int err; 221 int err;
220 222
221 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY])); 223 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
222 224 switch (priv->key) {
223 if (tb[NFTA_META_DREG]) { 225 case NFT_META_MARK:
224 err = nft_meta_init_validate_get(priv->key); 226 case NFT_META_PRIORITY:
225 if (err < 0) 227 case NFT_META_NFTRACE:
226 return err; 228 break;
227 229 default:
228 priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG])); 230 return -EOPNOTSUPP;
229 err = nft_validate_output_register(priv->dreg);
230 if (err < 0)
231 return err;
232
233 return nft_validate_data_load(ctx, priv->dreg, NULL,
234 NFT_DATA_VALUE);
235 } 231 }
236 232
237 err = nft_meta_init_validate_set(priv->key);
238 if (err < 0)
239 return err;
240
241 priv->sreg = ntohl(nla_get_be32(tb[NFTA_META_SREG])); 233 priv->sreg = ntohl(nla_get_be32(tb[NFTA_META_SREG]));
242 err = nft_validate_input_register(priv->sreg); 234 err = nft_validate_input_register(priv->sreg);
243 if (err < 0) 235 if (err < 0)
@@ -245,9 +237,10 @@ static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
245 237
246 return 0; 238 return 0;
247} 239}
240EXPORT_SYMBOL_GPL(nft_meta_set_init);
248 241
249static int nft_meta_get_dump(struct sk_buff *skb, 242int nft_meta_get_dump(struct sk_buff *skb,
250 const struct nft_expr *expr) 243 const struct nft_expr *expr)
251{ 244{
252 const struct nft_meta *priv = nft_expr_priv(expr); 245 const struct nft_meta *priv = nft_expr_priv(expr);
253 246
@@ -260,9 +253,10 @@ static int nft_meta_get_dump(struct sk_buff *skb,
260nla_put_failure: 253nla_put_failure:
261 return -1; 254 return -1;
262} 255}
256EXPORT_SYMBOL_GPL(nft_meta_get_dump);
263 257
264static int nft_meta_set_dump(struct sk_buff *skb, 258int nft_meta_set_dump(struct sk_buff *skb,
265 const struct nft_expr *expr) 259 const struct nft_expr *expr)
266{ 260{
267 const struct nft_meta *priv = nft_expr_priv(expr); 261 const struct nft_meta *priv = nft_expr_priv(expr);
268 262
@@ -276,13 +270,14 @@ static int nft_meta_set_dump(struct sk_buff *skb,
276nla_put_failure: 270nla_put_failure:
277 return -1; 271 return -1;
278} 272}
273EXPORT_SYMBOL_GPL(nft_meta_set_dump);
279 274
280static struct nft_expr_type nft_meta_type; 275static struct nft_expr_type nft_meta_type;
281static const struct nft_expr_ops nft_meta_get_ops = { 276static const struct nft_expr_ops nft_meta_get_ops = {
282 .type = &nft_meta_type, 277 .type = &nft_meta_type,
283 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)), 278 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
284 .eval = nft_meta_get_eval, 279 .eval = nft_meta_get_eval,
285 .init = nft_meta_init, 280 .init = nft_meta_get_init,
286 .dump = nft_meta_get_dump, 281 .dump = nft_meta_get_dump,
287}; 282};
288 283
@@ -290,7 +285,7 @@ static const struct nft_expr_ops nft_meta_set_ops = {
290 .type = &nft_meta_type, 285 .type = &nft_meta_type,
291 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)), 286 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
292 .eval = nft_meta_set_eval, 287 .eval = nft_meta_set_eval,
293 .init = nft_meta_init, 288 .init = nft_meta_set_init,
294 .dump = nft_meta_set_dump, 289 .dump = nft_meta_set_dump,
295}; 290};
296 291
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index e21d69d13506..e1836ff88199 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -18,6 +18,8 @@
18#include <linux/netfilter/nf_tables.h> 18#include <linux/netfilter/nf_tables.h>
19#include <net/netfilter/nf_tables.h> 19#include <net/netfilter/nf_tables.h>
20 20
21static DEFINE_SPINLOCK(nft_rbtree_lock);
22
21struct nft_rbtree { 23struct nft_rbtree {
22 struct rb_root root; 24 struct rb_root root;
23}; 25};
@@ -38,6 +40,7 @@ static bool nft_rbtree_lookup(const struct nft_set *set,
38 const struct rb_node *parent = priv->root.rb_node; 40 const struct rb_node *parent = priv->root.rb_node;
39 int d; 41 int d;
40 42
43 spin_lock_bh(&nft_rbtree_lock);
41 while (parent != NULL) { 44 while (parent != NULL) {
42 rbe = rb_entry(parent, struct nft_rbtree_elem, node); 45 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
43 46
@@ -53,6 +56,8 @@ found:
53 goto out; 56 goto out;
54 if (set->flags & NFT_SET_MAP) 57 if (set->flags & NFT_SET_MAP)
55 nft_data_copy(data, rbe->data); 58 nft_data_copy(data, rbe->data);
59
60 spin_unlock_bh(&nft_rbtree_lock);
56 return true; 61 return true;
57 } 62 }
58 } 63 }
@@ -62,6 +67,7 @@ found:
62 goto found; 67 goto found;
63 } 68 }
64out: 69out:
70 spin_unlock_bh(&nft_rbtree_lock);
65 return false; 71 return false;
66} 72}
67 73
@@ -124,9 +130,12 @@ static int nft_rbtree_insert(const struct nft_set *set,
124 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) 130 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
125 nft_data_copy(rbe->data, &elem->data); 131 nft_data_copy(rbe->data, &elem->data);
126 132
133 spin_lock_bh(&nft_rbtree_lock);
127 err = __nft_rbtree_insert(set, rbe); 134 err = __nft_rbtree_insert(set, rbe);
128 if (err < 0) 135 if (err < 0)
129 kfree(rbe); 136 kfree(rbe);
137
138 spin_unlock_bh(&nft_rbtree_lock);
130 return err; 139 return err;
131} 140}
132 141
@@ -136,7 +145,9 @@ static void nft_rbtree_remove(const struct nft_set *set,
136 struct nft_rbtree *priv = nft_set_priv(set); 145 struct nft_rbtree *priv = nft_set_priv(set);
137 struct nft_rbtree_elem *rbe = elem->cookie; 146 struct nft_rbtree_elem *rbe = elem->cookie;
138 147
148 spin_lock_bh(&nft_rbtree_lock);
139 rb_erase(&rbe->node, &priv->root); 149 rb_erase(&rbe->node, &priv->root);
150 spin_unlock_bh(&nft_rbtree_lock);
140 kfree(rbe); 151 kfree(rbe);
141} 152}
142 153
@@ -147,6 +158,7 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
147 struct nft_rbtree_elem *rbe; 158 struct nft_rbtree_elem *rbe;
148 int d; 159 int d;
149 160
161 spin_lock_bh(&nft_rbtree_lock);
150 while (parent != NULL) { 162 while (parent != NULL) {
151 rbe = rb_entry(parent, struct nft_rbtree_elem, node); 163 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
152 164
@@ -161,9 +173,11 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
161 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) 173 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
162 nft_data_copy(&elem->data, rbe->data); 174 nft_data_copy(&elem->data, rbe->data);
163 elem->flags = rbe->flags; 175 elem->flags = rbe->flags;
176 spin_unlock_bh(&nft_rbtree_lock);
164 return 0; 177 return 0;
165 } 178 }
166 } 179 }
180 spin_unlock_bh(&nft_rbtree_lock);
167 return -ENOENT; 181 return -ENOENT;
168} 182}
169 183
@@ -176,6 +190,7 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
176 struct nft_set_elem elem; 190 struct nft_set_elem elem;
177 struct rb_node *node; 191 struct rb_node *node;
178 192
193 spin_lock_bh(&nft_rbtree_lock);
179 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { 194 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
180 if (iter->count < iter->skip) 195 if (iter->count < iter->skip)
181 goto cont; 196 goto cont;
@@ -188,11 +203,14 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
188 elem.flags = rbe->flags; 203 elem.flags = rbe->flags;
189 204
190 iter->err = iter->fn(ctx, set, iter, &elem); 205 iter->err = iter->fn(ctx, set, iter, &elem);
191 if (iter->err < 0) 206 if (iter->err < 0) {
207 spin_unlock_bh(&nft_rbtree_lock);
192 return; 208 return;
209 }
193cont: 210cont:
194 iter->count++; 211 iter->count++;
195 } 212 }
213 spin_unlock_bh(&nft_rbtree_lock);
196} 214}
197 215
198static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[]) 216static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
@@ -201,6 +219,7 @@ static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
201} 219}
202 220
203static int nft_rbtree_init(const struct nft_set *set, 221static int nft_rbtree_init(const struct nft_set *set,
222 const struct nft_set_desc *desc,
204 const struct nlattr * const nla[]) 223 const struct nlattr * const nla[])
205{ 224{
206 struct nft_rbtree *priv = nft_set_priv(set); 225 struct nft_rbtree *priv = nft_set_priv(set);
@@ -215,15 +234,37 @@ static void nft_rbtree_destroy(const struct nft_set *set)
215 struct nft_rbtree_elem *rbe; 234 struct nft_rbtree_elem *rbe;
216 struct rb_node *node; 235 struct rb_node *node;
217 236
237 spin_lock_bh(&nft_rbtree_lock);
218 while ((node = priv->root.rb_node) != NULL) { 238 while ((node = priv->root.rb_node) != NULL) {
219 rb_erase(node, &priv->root); 239 rb_erase(node, &priv->root);
220 rbe = rb_entry(node, struct nft_rbtree_elem, node); 240 rbe = rb_entry(node, struct nft_rbtree_elem, node);
221 nft_rbtree_elem_destroy(set, rbe); 241 nft_rbtree_elem_destroy(set, rbe);
222 } 242 }
243 spin_unlock_bh(&nft_rbtree_lock);
244}
245
246static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
247 struct nft_set_estimate *est)
248{
249 unsigned int nsize;
250
251 nsize = sizeof(struct nft_rbtree_elem);
252 if (features & NFT_SET_MAP)
253 nsize += FIELD_SIZEOF(struct nft_rbtree_elem, data[0]);
254
255 if (desc->size)
256 est->size = sizeof(struct nft_rbtree) + desc->size * nsize;
257 else
258 est->size = nsize;
259
260 est->class = NFT_SET_CLASS_O_LOG_N;
261
262 return true;
223} 263}
224 264
225static struct nft_set_ops nft_rbtree_ops __read_mostly = { 265static struct nft_set_ops nft_rbtree_ops __read_mostly = {
226 .privsize = nft_rbtree_privsize, 266 .privsize = nft_rbtree_privsize,
267 .estimate = nft_rbtree_estimate,
227 .init = nft_rbtree_init, 268 .init = nft_rbtree_init,
228 .destroy = nft_rbtree_destroy, 269 .destroy = nft_rbtree_destroy,
229 .insert = nft_rbtree_insert, 270 .insert = nft_rbtree_insert,
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index 12d4da8e6c77..bbffdbdaf603 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -23,10 +23,11 @@ MODULE_ALIAS("ip6t_bpf");
23static int bpf_mt_check(const struct xt_mtchk_param *par) 23static int bpf_mt_check(const struct xt_mtchk_param *par)
24{ 24{
25 struct xt_bpf_info *info = par->matchinfo; 25 struct xt_bpf_info *info = par->matchinfo;
26 struct sock_fprog program; 26 struct sock_fprog_kern program;
27 27
28 program.len = info->bpf_program_num_elem; 28 program.len = info->bpf_program_num_elem;
29 program.filter = (struct sock_filter __user *) info->bpf_program; 29 program.filter = info->bpf_program;
30
30 if (sk_unattached_filter_create(&info->filter, &program)) { 31 if (sk_unattached_filter_create(&info->filter, &program)) {
31 pr_info("bpf: check failed: parse error\n"); 32 pr_info("bpf: check failed: parse error\n");
32 return -EINVAL; 33 return -EINVAL;
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index b3be0ef21f19..8c646ed9c921 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -21,11 +21,14 @@ MODULE_ALIAS("ip6t_nfacct");
21 21
22static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par) 22static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par)
23{ 23{
24 int overquota;
24 const struct xt_nfacct_match_info *info = par->targinfo; 25 const struct xt_nfacct_match_info *info = par->targinfo;
25 26
26 nfnl_acct_update(skb, info->nfacct); 27 nfnl_acct_update(skb, info->nfacct);
27 28
28 return true; 29 overquota = nfnl_acct_overquota(skb, info->nfacct);
30
31 return overquota == NFACCT_UNDERQUOTA ? false : true;
29} 32}
30 33
31static int 34static int
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 1e657cf715c4..a9faae89f955 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -313,10 +313,7 @@ out:
313 313
314static void recent_table_free(void *addr) 314static void recent_table_free(void *addr)
315{ 315{
316 if (is_vmalloc_addr(addr)) 316 kvfree(addr);
317 vfree(addr);
318 else
319 kfree(addr);
320} 317}
321 318
322static int recent_mt_check(const struct xt_mtchk_param *par, 319static int recent_mt_check(const struct xt_mtchk_param *par,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index f22757a29cd0..15c731f03fa6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1206,7 +1206,8 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
1206 struct module *module = NULL; 1206 struct module *module = NULL;
1207 struct mutex *cb_mutex; 1207 struct mutex *cb_mutex;
1208 struct netlink_sock *nlk; 1208 struct netlink_sock *nlk;
1209 void (*bind)(int group); 1209 int (*bind)(int group);
1210 void (*unbind)(int group);
1210 int err = 0; 1211 int err = 0;
1211 1212
1212 sock->state = SS_UNCONNECTED; 1213 sock->state = SS_UNCONNECTED;
@@ -1232,6 +1233,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
1232 err = -EPROTONOSUPPORT; 1233 err = -EPROTONOSUPPORT;
1233 cb_mutex = nl_table[protocol].cb_mutex; 1234 cb_mutex = nl_table[protocol].cb_mutex;
1234 bind = nl_table[protocol].bind; 1235 bind = nl_table[protocol].bind;
1236 unbind = nl_table[protocol].unbind;
1235 netlink_unlock_table(); 1237 netlink_unlock_table();
1236 1238
1237 if (err < 0) 1239 if (err < 0)
@@ -1248,6 +1250,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
1248 nlk = nlk_sk(sock->sk); 1250 nlk = nlk_sk(sock->sk);
1249 nlk->module = module; 1251 nlk->module = module;
1250 nlk->netlink_bind = bind; 1252 nlk->netlink_bind = bind;
1253 nlk->netlink_unbind = unbind;
1251out: 1254out:
1252 return err; 1255 return err;
1253 1256
@@ -1301,6 +1304,7 @@ static int netlink_release(struct socket *sock)
1301 kfree_rcu(old, rcu); 1304 kfree_rcu(old, rcu);
1302 nl_table[sk->sk_protocol].module = NULL; 1305 nl_table[sk->sk_protocol].module = NULL;
1303 nl_table[sk->sk_protocol].bind = NULL; 1306 nl_table[sk->sk_protocol].bind = NULL;
1307 nl_table[sk->sk_protocol].unbind = NULL;
1304 nl_table[sk->sk_protocol].flags = 0; 1308 nl_table[sk->sk_protocol].flags = 0;
1305 nl_table[sk->sk_protocol].registered = 0; 1309 nl_table[sk->sk_protocol].registered = 0;
1306 } 1310 }
@@ -1478,6 +1482,19 @@ static int netlink_realloc_groups(struct sock *sk)
1478 return err; 1482 return err;
1479} 1483}
1480 1484
1485static void netlink_unbind(int group, long unsigned int groups,
1486 struct netlink_sock *nlk)
1487{
1488 int undo;
1489
1490 if (!nlk->netlink_unbind)
1491 return;
1492
1493 for (undo = 0; undo < group; undo++)
1494 if (test_bit(group, &groups))
1495 nlk->netlink_unbind(undo);
1496}
1497
1481static int netlink_bind(struct socket *sock, struct sockaddr *addr, 1498static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1482 int addr_len) 1499 int addr_len)
1483{ 1500{
@@ -1486,6 +1503,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1486 struct netlink_sock *nlk = nlk_sk(sk); 1503 struct netlink_sock *nlk = nlk_sk(sk);
1487 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 1504 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1488 int err; 1505 int err;
1506 long unsigned int groups = nladdr->nl_groups;
1489 1507
1490 if (addr_len < sizeof(struct sockaddr_nl)) 1508 if (addr_len < sizeof(struct sockaddr_nl))
1491 return -EINVAL; 1509 return -EINVAL;
@@ -1494,7 +1512,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1494 return -EINVAL; 1512 return -EINVAL;
1495 1513
1496 /* Only superuser is allowed to listen multicasts */ 1514 /* Only superuser is allowed to listen multicasts */
1497 if (nladdr->nl_groups) { 1515 if (groups) {
1498 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV)) 1516 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1499 return -EPERM; 1517 return -EPERM;
1500 err = netlink_realloc_groups(sk); 1518 err = netlink_realloc_groups(sk);
@@ -1502,37 +1520,45 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1502 return err; 1520 return err;
1503 } 1521 }
1504 1522
1505 if (nlk->portid) { 1523 if (nlk->portid)
1506 if (nladdr->nl_pid != nlk->portid) 1524 if (nladdr->nl_pid != nlk->portid)
1507 return -EINVAL; 1525 return -EINVAL;
1508 } else { 1526
1527 if (nlk->netlink_bind && groups) {
1528 int group;
1529
1530 for (group = 0; group < nlk->ngroups; group++) {
1531 if (!test_bit(group, &groups))
1532 continue;
1533 err = nlk->netlink_bind(group);
1534 if (!err)
1535 continue;
1536 netlink_unbind(group, groups, nlk);
1537 return err;
1538 }
1539 }
1540
1541 if (!nlk->portid) {
1509 err = nladdr->nl_pid ? 1542 err = nladdr->nl_pid ?
1510 netlink_insert(sk, net, nladdr->nl_pid) : 1543 netlink_insert(sk, net, nladdr->nl_pid) :
1511 netlink_autobind(sock); 1544 netlink_autobind(sock);
1512 if (err) 1545 if (err) {
1546 netlink_unbind(nlk->ngroups - 1, groups, nlk);
1513 return err; 1547 return err;
1548 }
1514 } 1549 }
1515 1550
1516 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0])) 1551 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1517 return 0; 1552 return 0;
1518 1553
1519 netlink_table_grab(); 1554 netlink_table_grab();
1520 netlink_update_subscriptions(sk, nlk->subscriptions + 1555 netlink_update_subscriptions(sk, nlk->subscriptions +
1521 hweight32(nladdr->nl_groups) - 1556 hweight32(groups) -
1522 hweight32(nlk->groups[0])); 1557 hweight32(nlk->groups[0]));
1523 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups; 1558 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
1524 netlink_update_listeners(sk); 1559 netlink_update_listeners(sk);
1525 netlink_table_ungrab(); 1560 netlink_table_ungrab();
1526 1561
1527 if (nlk->netlink_bind && nlk->groups[0]) {
1528 int i;
1529
1530 for (i = 0; i < nlk->ngroups; i++) {
1531 if (test_bit(i, nlk->groups))
1532 nlk->netlink_bind(i);
1533 }
1534 }
1535
1536 return 0; 1562 return 0;
1537} 1563}
1538 1564
@@ -2170,13 +2196,17 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
2170 return err; 2196 return err;
2171 if (!val || val - 1 >= nlk->ngroups) 2197 if (!val || val - 1 >= nlk->ngroups)
2172 return -EINVAL; 2198 return -EINVAL;
2199 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
2200 err = nlk->netlink_bind(val);
2201 if (err)
2202 return err;
2203 }
2173 netlink_table_grab(); 2204 netlink_table_grab();
2174 netlink_update_socket_mc(nlk, val, 2205 netlink_update_socket_mc(nlk, val,
2175 optname == NETLINK_ADD_MEMBERSHIP); 2206 optname == NETLINK_ADD_MEMBERSHIP);
2176 netlink_table_ungrab(); 2207 netlink_table_ungrab();
2177 2208 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
2178 if (nlk->netlink_bind) 2209 nlk->netlink_unbind(val);
2179 nlk->netlink_bind(val);
2180 2210
2181 err = 0; 2211 err = 0;
2182 break; 2212 break;
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index ed13a790b00e..0b59d441f5b6 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -38,7 +38,8 @@ struct netlink_sock {
38 struct mutex *cb_mutex; 38 struct mutex *cb_mutex;
39 struct mutex cb_def_mutex; 39 struct mutex cb_def_mutex;
40 void (*netlink_rcv)(struct sk_buff *skb); 40 void (*netlink_rcv)(struct sk_buff *skb);
41 void (*netlink_bind)(int group); 41 int (*netlink_bind)(int group);
42 void (*netlink_unbind)(int group);
42 struct module *module; 43 struct module *module;
43#ifdef CONFIG_NETLINK_MMAP 44#ifdef CONFIG_NETLINK_MMAP
44 struct mutex pg_vec_lock; 45 struct mutex pg_vec_lock;
@@ -74,7 +75,8 @@ struct netlink_table {
74 unsigned int groups; 75 unsigned int groups;
75 struct mutex *cb_mutex; 76 struct mutex *cb_mutex;
76 struct module *module; 77 struct module *module;
77 void (*bind)(int group); 78 int (*bind)(int group);
79 void (*unbind)(int group);
78 bool (*compare)(struct net *net, struct sock *sock); 80 bool (*compare)(struct net *net, struct sock *sock);
79 int registered; 81 int registered;
80}; 82};
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index a3ba3ca0ff92..76393f2f4b22 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -317,7 +317,7 @@ static void genl_unregister_mc_groups(struct genl_family *family)
317 } 317 }
318} 318}
319 319
320static int genl_validate_ops(struct genl_family *family) 320static int genl_validate_ops(const struct genl_family *family)
321{ 321{
322 const struct genl_ops *ops = family->ops; 322 const struct genl_ops *ops = family->ops;
323 unsigned int n_ops = family->n_ops; 323 unsigned int n_ops = family->n_ops;
@@ -337,10 +337,6 @@ static int genl_validate_ops(struct genl_family *family)
337 return -EINVAL; 337 return -EINVAL;
338 } 338 }
339 339
340 /* family is not registered yet, so no locking needed */
341 family->ops = ops;
342 family->n_ops = n_ops;
343
344 return 0; 340 return 0;
345} 341}
346 342
diff --git a/net/nfc/digital.h b/net/nfc/digital.h
index 3759add68b1b..71ad7eefddd4 100644
--- a/net/nfc/digital.h
+++ b/net/nfc/digital.h
@@ -71,6 +71,7 @@ static inline int digital_in_send_cmd(struct nfc_digital_dev *ddev,
71void digital_poll_next_tech(struct nfc_digital_dev *ddev); 71void digital_poll_next_tech(struct nfc_digital_dev *ddev);
72 72
73int digital_in_send_sens_req(struct nfc_digital_dev *ddev, u8 rf_tech); 73int digital_in_send_sens_req(struct nfc_digital_dev *ddev, u8 rf_tech);
74int digital_in_send_sensb_req(struct nfc_digital_dev *ddev, u8 rf_tech);
74int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech); 75int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech);
75int digital_in_send_iso15693_inv_req(struct nfc_digital_dev *ddev, u8 rf_tech); 76int digital_in_send_iso15693_inv_req(struct nfc_digital_dev *ddev, u8 rf_tech);
76 77
diff --git a/net/nfc/digital_core.c b/net/nfc/digital_core.c
index e01e15dbf1ab..a6ce3c627e4e 100644
--- a/net/nfc/digital_core.c
+++ b/net/nfc/digital_core.c
@@ -22,6 +22,8 @@
22#define DIGITAL_PROTO_NFCA_RF_TECH \ 22#define DIGITAL_PROTO_NFCA_RF_TECH \
23 (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_NFC_DEP_MASK) 23 (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_NFC_DEP_MASK)
24 24
25#define DIGITAL_PROTO_NFCB_RF_TECH NFC_PROTO_ISO14443_B_MASK
26
25#define DIGITAL_PROTO_NFCF_RF_TECH \ 27#define DIGITAL_PROTO_NFCF_RF_TECH \
26 (NFC_PROTO_FELICA_MASK | NFC_PROTO_NFC_DEP_MASK) 28 (NFC_PROTO_FELICA_MASK | NFC_PROTO_NFC_DEP_MASK)
27 29
@@ -345,6 +347,12 @@ int digital_target_found(struct nfc_digital_dev *ddev,
345 add_crc = digital_skb_add_crc_a; 347 add_crc = digital_skb_add_crc_a;
346 break; 348 break;
347 349
350 case NFC_PROTO_ISO14443_B:
351 framing = NFC_DIGITAL_FRAMING_NFCB_T4T;
352 check_crc = digital_skb_check_crc_b;
353 add_crc = digital_skb_add_crc_b;
354 break;
355
348 default: 356 default:
349 pr_err("Invalid protocol %d\n", protocol); 357 pr_err("Invalid protocol %d\n", protocol);
350 return -EINVAL; 358 return -EINVAL;
@@ -378,6 +386,8 @@ int digital_target_found(struct nfc_digital_dev *ddev,
378 386
379void digital_poll_next_tech(struct nfc_digital_dev *ddev) 387void digital_poll_next_tech(struct nfc_digital_dev *ddev)
380{ 388{
389 u8 rand_mod;
390
381 digital_switch_rf(ddev, 0); 391 digital_switch_rf(ddev, 0);
382 392
383 mutex_lock(&ddev->poll_lock); 393 mutex_lock(&ddev->poll_lock);
@@ -387,8 +397,8 @@ void digital_poll_next_tech(struct nfc_digital_dev *ddev)
387 return; 397 return;
388 } 398 }
389 399
390 ddev->poll_tech_index = (ddev->poll_tech_index + 1) % 400 get_random_bytes(&rand_mod, sizeof(rand_mod));
391 ddev->poll_tech_count; 401 ddev->poll_tech_index = rand_mod % ddev->poll_tech_count;
392 402
393 mutex_unlock(&ddev->poll_lock); 403 mutex_unlock(&ddev->poll_lock);
394 404
@@ -475,6 +485,10 @@ static int digital_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols,
475 digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A, 485 digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A,
476 digital_in_send_sens_req); 486 digital_in_send_sens_req);
477 487
488 if (matching_im_protocols & DIGITAL_PROTO_NFCB_RF_TECH)
489 digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106B,
490 digital_in_send_sensb_req);
491
478 if (matching_im_protocols & DIGITAL_PROTO_NFCF_RF_TECH) { 492 if (matching_im_protocols & DIGITAL_PROTO_NFCF_RF_TECH) {
479 digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F, 493 digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F,
480 digital_in_send_sensf_req); 494 digital_in_send_sensf_req);
@@ -635,7 +649,8 @@ static void digital_in_send_complete(struct nfc_digital_dev *ddev, void *arg,
635 goto done; 649 goto done;
636 } 650 }
637 651
638 if (ddev->curr_protocol == NFC_PROTO_ISO14443) { 652 if ((ddev->curr_protocol == NFC_PROTO_ISO14443) ||
653 (ddev->curr_protocol == NFC_PROTO_ISO14443_B)) {
639 rc = digital_in_iso_dep_pull_sod(ddev, resp); 654 rc = digital_in_iso_dep_pull_sod(ddev, resp);
640 if (rc) 655 if (rc)
641 goto done; 656 goto done;
@@ -676,7 +691,8 @@ static int digital_in_send(struct nfc_dev *nfc_dev, struct nfc_target *target,
676 goto exit; 691 goto exit;
677 } 692 }
678 693
679 if (ddev->curr_protocol == NFC_PROTO_ISO14443) { 694 if ((ddev->curr_protocol == NFC_PROTO_ISO14443) ||
695 (ddev->curr_protocol == NFC_PROTO_ISO14443_B)) {
680 rc = digital_in_iso_dep_push_sod(ddev, skb); 696 rc = digital_in_iso_dep_push_sod(ddev, skb);
681 if (rc) 697 if (rc)
682 goto exit; 698 goto exit;
@@ -747,6 +763,8 @@ struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops,
747 ddev->protocols |= NFC_PROTO_ISO15693_MASK; 763 ddev->protocols |= NFC_PROTO_ISO15693_MASK;
748 if (supported_protocols & NFC_PROTO_ISO14443_MASK) 764 if (supported_protocols & NFC_PROTO_ISO14443_MASK)
749 ddev->protocols |= NFC_PROTO_ISO14443_MASK; 765 ddev->protocols |= NFC_PROTO_ISO14443_MASK;
766 if (supported_protocols & NFC_PROTO_ISO14443_B_MASK)
767 ddev->protocols |= NFC_PROTO_ISO14443_B_MASK;
750 768
751 ddev->tx_headroom = tx_headroom + DIGITAL_MAX_HEADER_LEN; 769 ddev->tx_headroom = tx_headroom + DIGITAL_MAX_HEADER_LEN;
752 ddev->tx_tailroom = tx_tailroom + DIGITAL_CRC_LEN; 770 ddev->tx_tailroom = tx_tailroom + DIGITAL_CRC_LEN;
diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
index d4ed25ff723f..171cb9949ab5 100644
--- a/net/nfc/digital_dep.c
+++ b/net/nfc/digital_dep.c
@@ -224,9 +224,8 @@ int digital_in_send_atr_req(struct nfc_digital_dev *ddev,
224 224
225 ddev->skb_add_crc(skb); 225 ddev->skb_add_crc(skb);
226 226
227 digital_in_send_cmd(ddev, skb, 500, digital_in_recv_atr_res, target); 227 return digital_in_send_cmd(ddev, skb, 500, digital_in_recv_atr_res,
228 228 target);
229 return 0;
230} 229}
231 230
232static int digital_in_send_rtox(struct nfc_digital_dev *ddev, 231static int digital_in_send_rtox(struct nfc_digital_dev *ddev,
diff --git a/net/nfc/digital_technology.c b/net/nfc/digital_technology.c
index 278c3fed27e0..c2c1c0189b7c 100644
--- a/net/nfc/digital_technology.c
+++ b/net/nfc/digital_technology.c
@@ -41,6 +41,24 @@
41#define DIGITAL_MIFARE_READ_RES_LEN 16 41#define DIGITAL_MIFARE_READ_RES_LEN 16
42#define DIGITAL_MIFARE_ACK_RES 0x0A 42#define DIGITAL_MIFARE_ACK_RES 0x0A
43 43
44#define DIGITAL_CMD_SENSB_REQ 0x05
45#define DIGITAL_SENSB_ADVANCED BIT(5)
46#define DIGITAL_SENSB_EXTENDED BIT(4)
47#define DIGITAL_SENSB_ALLB_REQ BIT(3)
48#define DIGITAL_SENSB_N(n) ((n) & 0x7)
49
50#define DIGITAL_CMD_SENSB_RES 0x50
51
52#define DIGITAL_CMD_ATTRIB_REQ 0x1D
53#define DIGITAL_ATTRIB_P1_TR0_DEFAULT (0x0 << 6)
54#define DIGITAL_ATTRIB_P1_TR1_DEFAULT (0x0 << 4)
55#define DIGITAL_ATTRIB_P1_SUPRESS_EOS BIT(3)
56#define DIGITAL_ATTRIB_P1_SUPRESS_SOS BIT(2)
57#define DIGITAL_ATTRIB_P2_LISTEN_POLL_1 (0x0 << 6)
58#define DIGITAL_ATTRIB_P2_POLL_LISTEN_1 (0x0 << 4)
59#define DIGITAL_ATTRIB_P2_MAX_FRAME_256 0x8
60#define DIGITAL_ATTRIB_P4_DID(n) ((n) & 0xf)
61
44#define DIGITAL_CMD_SENSF_REQ 0x00 62#define DIGITAL_CMD_SENSF_REQ 0x00
45#define DIGITAL_CMD_SENSF_RES 0x01 63#define DIGITAL_CMD_SENSF_RES 0x01
46 64
@@ -75,6 +93,7 @@ static const u8 digital_ats_fsc[] = {
75}; 93};
76 94
77#define DIGITAL_ATS_FSCI(t0) ((t0) & 0x0F) 95#define DIGITAL_ATS_FSCI(t0) ((t0) & 0x0F)
96#define DIGITAL_SENSB_FSCI(pi2) (((pi2) & 0xF0) >> 4)
78#define DIGITAL_ATS_MAX_FSC 256 97#define DIGITAL_ATS_MAX_FSC 256
79 98
80#define DIGITAL_RATS_BYTE1 0xE0 99#define DIGITAL_RATS_BYTE1 0xE0
@@ -92,6 +111,32 @@ struct digital_sel_req {
92 u8 bcc; 111 u8 bcc;
93} __packed; 112} __packed;
94 113
114struct digital_sensb_req {
115 u8 cmd;
116 u8 afi;
117 u8 param;
118} __packed;
119
120struct digital_sensb_res {
121 u8 cmd;
122 u8 nfcid0[4];
123 u8 app_data[4];
124 u8 proto_info[3];
125} __packed;
126
127struct digital_attrib_req {
128 u8 cmd;
129 u8 nfcid0[4];
130 u8 param1;
131 u8 param2;
132 u8 param3;
133 u8 param4;
134} __packed;
135
136struct digital_attrib_res {
137 u8 mbli_did;
138} __packed;
139
95struct digital_sensf_req { 140struct digital_sensf_req {
96 u8 cmd; 141 u8 cmd;
97 u8 sc1; 142 u8 sc1;
@@ -531,6 +576,175 @@ int digital_in_recv_mifare_res(struct sk_buff *resp)
531 return -EIO; 576 return -EIO;
532} 577}
533 578
579static void digital_in_recv_attrib_res(struct nfc_digital_dev *ddev, void *arg,
580 struct sk_buff *resp)
581{
582 struct nfc_target *target = arg;
583 struct digital_attrib_res *attrib_res;
584 int rc;
585
586 if (IS_ERR(resp)) {
587 rc = PTR_ERR(resp);
588 resp = NULL;
589 goto exit;
590 }
591
592 if (resp->len < sizeof(*attrib_res)) {
593 PROTOCOL_ERR("12.6.2");
594 rc = -EIO;
595 goto exit;
596 }
597
598 attrib_res = (struct digital_attrib_res *)resp->data;
599
600 if (attrib_res->mbli_did & 0x0f) {
601 PROTOCOL_ERR("12.6.2.1");
602 rc = -EIO;
603 goto exit;
604 }
605
606 rc = digital_target_found(ddev, target, NFC_PROTO_ISO14443_B);
607
608exit:
609 dev_kfree_skb(resp);
610 kfree(target);
611
612 if (rc)
613 digital_poll_next_tech(ddev);
614}
615
616static int digital_in_send_attrib_req(struct nfc_digital_dev *ddev,
617 struct nfc_target *target,
618 struct digital_sensb_res *sensb_res)
619{
620 struct digital_attrib_req *attrib_req;
621 struct sk_buff *skb;
622 int rc;
623
624 skb = digital_skb_alloc(ddev, sizeof(*attrib_req));
625 if (!skb)
626 return -ENOMEM;
627
628 attrib_req = (struct digital_attrib_req *)skb_put(skb,
629 sizeof(*attrib_req));
630
631 attrib_req->cmd = DIGITAL_CMD_ATTRIB_REQ;
632 memcpy(attrib_req->nfcid0, sensb_res->nfcid0,
633 sizeof(attrib_req->nfcid0));
634 attrib_req->param1 = DIGITAL_ATTRIB_P1_TR0_DEFAULT |
635 DIGITAL_ATTRIB_P1_TR1_DEFAULT;
636 attrib_req->param2 = DIGITAL_ATTRIB_P2_LISTEN_POLL_1 |
637 DIGITAL_ATTRIB_P2_POLL_LISTEN_1 |
638 DIGITAL_ATTRIB_P2_MAX_FRAME_256;
639 attrib_req->param3 = sensb_res->proto_info[1] & 0x07;
640 attrib_req->param4 = DIGITAL_ATTRIB_P4_DID(0);
641
642 rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_attrib_res,
643 target);
644 if (rc)
645 kfree_skb(skb);
646
647 return rc;
648}
649
650static void digital_in_recv_sensb_res(struct nfc_digital_dev *ddev, void *arg,
651 struct sk_buff *resp)
652{
653 struct nfc_target *target = NULL;
654 struct digital_sensb_res *sensb_res;
655 u8 fsci;
656 int rc;
657
658 if (IS_ERR(resp)) {
659 rc = PTR_ERR(resp);
660 resp = NULL;
661 goto exit;
662 }
663
664 if (resp->len != sizeof(*sensb_res)) {
665 PROTOCOL_ERR("5.6.2.1");
666 rc = -EIO;
667 goto exit;
668 }
669
670 sensb_res = (struct digital_sensb_res *)resp->data;
671
672 if (sensb_res->cmd != DIGITAL_CMD_SENSB_RES) {
673 PROTOCOL_ERR("5.6.2");
674 rc = -EIO;
675 goto exit;
676 }
677
678 if (!(sensb_res->proto_info[1] & BIT(0))) {
679 PROTOCOL_ERR("5.6.2.12");
680 rc = -EIO;
681 goto exit;
682 }
683
684 if (sensb_res->proto_info[1] & BIT(3)) {
685 PROTOCOL_ERR("5.6.2.16");
686 rc = -EIO;
687 goto exit;
688 }
689
690 fsci = DIGITAL_SENSB_FSCI(sensb_res->proto_info[1]);
691 if (fsci >= 8)
692 ddev->target_fsc = DIGITAL_ATS_MAX_FSC;
693 else
694 ddev->target_fsc = digital_ats_fsc[fsci];
695
696 target = kzalloc(sizeof(struct nfc_target), GFP_KERNEL);
697 if (!target) {
698 rc = -ENOMEM;
699 goto exit;
700 }
701
702 rc = digital_in_send_attrib_req(ddev, target, sensb_res);
703
704exit:
705 dev_kfree_skb(resp);
706
707 if (rc) {
708 kfree(target);
709 digital_poll_next_tech(ddev);
710 }
711}
712
713int digital_in_send_sensb_req(struct nfc_digital_dev *ddev, u8 rf_tech)
714{
715 struct digital_sensb_req *sensb_req;
716 struct sk_buff *skb;
717 int rc;
718
719 rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH,
720 NFC_DIGITAL_RF_TECH_106B);
721 if (rc)
722 return rc;
723
724 rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
725 NFC_DIGITAL_FRAMING_NFCB);
726 if (rc)
727 return rc;
728
729 skb = digital_skb_alloc(ddev, sizeof(*sensb_req));
730 if (!skb)
731 return -ENOMEM;
732
733 sensb_req = (struct digital_sensb_req *)skb_put(skb,
734 sizeof(*sensb_req));
735
736 sensb_req->cmd = DIGITAL_CMD_SENSB_REQ;
737 sensb_req->afi = 0x00; /* All families and sub-families */
738 sensb_req->param = DIGITAL_SENSB_N(0);
739
740 rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sensb_res,
741 NULL);
742 if (rc)
743 kfree_skb(skb);
744
745 return rc;
746}
747
534static void digital_in_recv_sensf_res(struct nfc_digital_dev *ddev, void *arg, 748static void digital_in_recv_sensf_res(struct nfc_digital_dev *ddev, void *arg,
535 struct sk_buff *resp) 749 struct sk_buff *resp)
536{ 750{
@@ -877,6 +1091,18 @@ exit:
877 dev_kfree_skb(resp); 1091 dev_kfree_skb(resp);
878} 1092}
879 1093
1094static void digital_tg_recv_atr_or_sensf_req(struct nfc_digital_dev *ddev,
1095 void *arg, struct sk_buff *resp)
1096{
1097 if (!IS_ERR(resp) && (resp->len >= 2) &&
1098 (resp->data[1] == DIGITAL_CMD_SENSF_REQ))
1099 digital_tg_recv_sensf_req(ddev, arg, resp);
1100 else
1101 digital_tg_recv_atr_req(ddev, arg, resp);
1102
1103 return;
1104}
1105
880static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev, 1106static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev,
881 struct digital_sensf_req *sensf_req) 1107 struct digital_sensf_req *sensf_req)
882{ 1108{
@@ -887,7 +1113,7 @@ static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev,
887 1113
888 size = sizeof(struct digital_sensf_res); 1114 size = sizeof(struct digital_sensf_res);
889 1115
890 if (sensf_req->rc != DIGITAL_SENSF_REQ_RC_NONE) 1116 if (sensf_req->rc == DIGITAL_SENSF_REQ_RC_NONE)
891 size -= sizeof(sensf_res->rd); 1117 size -= sizeof(sensf_res->rd);
892 1118
893 skb = digital_skb_alloc(ddev, size); 1119 skb = digital_skb_alloc(ddev, size);
@@ -922,7 +1148,7 @@ static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev,
922 digital_skb_add_crc_f(skb); 1148 digital_skb_add_crc_f(skb);
923 1149
924 rc = digital_tg_send_cmd(ddev, skb, 300, 1150 rc = digital_tg_send_cmd(ddev, skb, 300,
925 digital_tg_recv_atr_req, NULL); 1151 digital_tg_recv_atr_or_sensf_req, NULL);
926 if (rc) 1152 if (rc)
927 kfree_skb(skb); 1153 kfree_skb(skb);
928 1154
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
index a9f4d2e62d8d..677d24bb70f8 100644
--- a/net/nfc/hci/command.c
+++ b/net/nfc/hci/command.c
@@ -26,6 +26,8 @@
26 26
27#include "hci.h" 27#include "hci.h"
28 28
29#define MAX_FWI 4949
30
29static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, 31static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
30 const u8 *param, size_t param_len, 32 const u8 *param, size_t param_len,
31 data_exchange_cb_t cb, void *cb_context) 33 data_exchange_cb_t cb, void *cb_context)
@@ -37,7 +39,7 @@ static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
37 * for all commands? 39 * for all commands?
38 */ 40 */
39 return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_COMMAND, cmd, 41 return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_COMMAND, cmd,
40 param, param_len, cb, cb_context, 3000); 42 param, param_len, cb, cb_context, MAX_FWI);
41} 43}
42 44
43/* 45/*
@@ -82,7 +84,7 @@ static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
82 NFC_HCI_HCP_COMMAND, cmd, 84 NFC_HCI_HCP_COMMAND, cmd,
83 param, param_len, 85 param, param_len,
84 nfc_hci_execute_cb, &hcp_ew, 86 nfc_hci_execute_cb, &hcp_ew,
85 3000); 87 MAX_FWI);
86 if (hcp_ew.exec_result < 0) 88 if (hcp_ew.exec_result < 0)
87 return hcp_ew.exec_result; 89 return hcp_ew.exec_result;
88 90
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index d45b638e77c7..47403705197e 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -225,7 +225,7 @@ int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
225 goto exit; 225 goto exit;
226 } 226 }
227 227
228 targets->sens_res = be16_to_cpu(*(u16 *)atqa_skb->data); 228 targets->sens_res = be16_to_cpu(*(__be16 *)atqa_skb->data);
229 targets->sel_res = sak_skb->data[0]; 229 targets->sel_res = sak_skb->data[0];
230 230
231 r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE, 231 r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
@@ -380,34 +380,31 @@ static int hci_dev_session_init(struct nfc_hci_dev *hdev)
380 if (r < 0) 380 if (r < 0)
381 goto disconnect_all; 381 goto disconnect_all;
382 382
383 if (skb->len && skb->len == strlen(hdev->init_data.session_id)) 383 if (skb->len && skb->len == strlen(hdev->init_data.session_id) &&
384 if (memcmp(hdev->init_data.session_id, skb->data, 384 (memcmp(hdev->init_data.session_id, skb->data,
385 skb->len) == 0) { 385 skb->len) == 0) && hdev->ops->load_session) {
386 /* TODO ELa: restore gate<->pipe table from 386 /* Restore gate<->pipe table from some proprietary location. */
387 * some TBD location.
388 * note: it doesn't seem possible to get the chip
389 * currently open gate/pipe table.
390 * It is only possible to obtain the supported
391 * gate list.
392 */
393 387
394 /* goto exit 388 r = hdev->ops->load_session(hdev);
395 * For now, always do a full initialization */
396 }
397 389
398 r = nfc_hci_disconnect_all_gates(hdev); 390 if (r < 0)
399 if (r < 0) 391 goto disconnect_all;
400 goto exit; 392 } else {
401 393
402 r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count, 394 r = nfc_hci_disconnect_all_gates(hdev);
403 hdev->init_data.gates); 395 if (r < 0)
404 if (r < 0) 396 goto exit;
405 goto disconnect_all;
406 397
407 r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE, 398 r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count,
408 NFC_HCI_ADMIN_SESSION_IDENTITY, 399 hdev->init_data.gates);
409 hdev->init_data.session_id, 400 if (r < 0)
410 strlen(hdev->init_data.session_id)); 401 goto disconnect_all;
402
403 r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
404 NFC_HCI_ADMIN_SESSION_IDENTITY,
405 hdev->init_data.session_id,
406 strlen(hdev->init_data.session_id));
407 }
411 if (r == 0) 408 if (r == 0)
412 goto exit; 409 goto exit;
413 410
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index bec6ed15f503..a3ad69a4c648 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -387,7 +387,7 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
387 387
388 __net_timestamp(skb); 388 __net_timestamp(skb);
389 389
390 nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_TX); 390 nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX);
391 391
392 return nfc_data_exchange(dev, local->target_idx, skb, 392 return nfc_data_exchange(dev, local->target_idx, skb,
393 nfc_llcp_recv, local); 393 nfc_llcp_recv, local);
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index b4671958fcf9..51e788797317 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -680,16 +680,17 @@ void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
680 continue; 680 continue;
681 681
682 if (skb_copy == NULL) { 682 if (skb_copy == NULL) {
683 skb_copy = __pskb_copy(skb, NFC_LLCP_RAW_HEADER_SIZE, 683 skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE,
684 GFP_ATOMIC); 684 GFP_ATOMIC, true);
685 685
686 if (skb_copy == NULL) 686 if (skb_copy == NULL)
687 continue; 687 continue;
688 688
689 data = skb_push(skb_copy, NFC_LLCP_RAW_HEADER_SIZE); 689 data = skb_push(skb_copy, NFC_RAW_HEADER_SIZE);
690 690
691 data[0] = local->dev ? local->dev->idx : 0xFF; 691 data[0] = local->dev ? local->dev->idx : 0xFF;
692 data[1] = direction; 692 data[1] = direction & 0x01;
693 data[1] |= (RAW_PAYLOAD_LLCP << 1);
693 } 694 }
694 695
695 nskb = skb_clone(skb_copy, GFP_ATOMIC); 696 nskb = skb_clone(skb_copy, GFP_ATOMIC);
@@ -747,7 +748,7 @@ static void nfc_llcp_tx_work(struct work_struct *work)
747 __net_timestamp(skb); 748 __net_timestamp(skb);
748 749
749 nfc_llcp_send_to_raw_sock(local, skb, 750 nfc_llcp_send_to_raw_sock(local, skb,
750 NFC_LLCP_DIRECTION_TX); 751 NFC_DIRECTION_TX);
751 752
752 ret = nfc_data_exchange(local->dev, local->target_idx, 753 ret = nfc_data_exchange(local->dev, local->target_idx,
753 skb, nfc_llcp_recv, local); 754 skb, nfc_llcp_recv, local);
@@ -1476,7 +1477,7 @@ static void nfc_llcp_rx_work(struct work_struct *work)
1476 1477
1477 __net_timestamp(skb); 1478 __net_timestamp(skb);
1478 1479
1479 nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_RX); 1480 nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_RX);
1480 1481
1481 nfc_llcp_rx_skb(local, skb); 1482 nfc_llcp_rx_skb(local, skb);
1482 1483
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 6c34ac978501..2b400e1a8695 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -861,6 +861,10 @@ static int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb)
861 /* Get rid of skb owner, prior to sending to the driver. */ 861 /* Get rid of skb owner, prior to sending to the driver. */
862 skb_orphan(skb); 862 skb_orphan(skb);
863 863
864 /* Send copy to sniffer */
865 nfc_send_to_raw_sock(ndev->nfc_dev, skb,
866 RAW_PAYLOAD_NCI, NFC_DIRECTION_TX);
867
864 return ndev->ops->send(ndev, skb); 868 return ndev->ops->send(ndev, skb);
865} 869}
866 870
@@ -935,6 +939,11 @@ static void nci_rx_work(struct work_struct *work)
935 struct sk_buff *skb; 939 struct sk_buff *skb;
936 940
937 while ((skb = skb_dequeue(&ndev->rx_q))) { 941 while ((skb = skb_dequeue(&ndev->rx_q))) {
942
943 /* Send copy to sniffer */
944 nfc_send_to_raw_sock(ndev->nfc_dev, skb,
945 RAW_PAYLOAD_NCI, NFC_DIRECTION_RX);
946
938 /* Process frame */ 947 /* Process frame */
939 switch (nci_mt(skb->data)) { 948 switch (nci_mt(skb->data)) {
940 case NCI_MT_RSP_PKT: 949 case NCI_MT_RSP_PKT:
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 1e905097456b..f8f6af231381 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -366,7 +366,6 @@ static int nci_extract_activation_params_nfc_dep(struct nci_dev *ndev,
366 struct nci_rf_intf_activated_ntf *ntf, __u8 *data) 366 struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
367{ 367{
368 struct activation_params_poll_nfc_dep *poll; 368 struct activation_params_poll_nfc_dep *poll;
369 int i;
370 369
371 switch (ntf->activation_rf_tech_and_mode) { 370 switch (ntf->activation_rf_tech_and_mode) {
372 case NCI_NFC_A_PASSIVE_POLL_MODE: 371 case NCI_NFC_A_PASSIVE_POLL_MODE:
@@ -374,10 +373,8 @@ static int nci_extract_activation_params_nfc_dep(struct nci_dev *ndev,
374 poll = &ntf->activation_params.poll_nfc_dep; 373 poll = &ntf->activation_params.poll_nfc_dep;
375 poll->atr_res_len = min_t(__u8, *data++, 63); 374 poll->atr_res_len = min_t(__u8, *data++, 63);
376 pr_debug("atr_res_len %d\n", poll->atr_res_len); 375 pr_debug("atr_res_len %d\n", poll->atr_res_len);
377 if (poll->atr_res_len > 0) { 376 if (poll->atr_res_len > 0)
378 for (i = 0; i < poll->atr_res_len; i++) 377 memcpy(poll->atr_res, data, poll->atr_res_len);
379 poll->atr_res[poll->atr_res_len-1-i] = data[i];
380 }
381 break; 378 break;
382 379
383 default: 380 default:
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 9d6e74f7e6b3..88d60064890e 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -40,6 +40,12 @@ struct nfc_rawsock {
40 struct work_struct tx_work; 40 struct work_struct tx_work;
41 bool tx_work_scheduled; 41 bool tx_work_scheduled;
42}; 42};
43
44struct nfc_sock_list {
45 struct hlist_head head;
46 rwlock_t lock;
47};
48
43#define nfc_rawsock(sk) ((struct nfc_rawsock *) sk) 49#define nfc_rawsock(sk) ((struct nfc_rawsock *) sk)
44#define to_rawsock_sk(_tx_work) \ 50#define to_rawsock_sk(_tx_work) \
45 ((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work)) 51 ((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work))
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index c27a6e86cae4..11c3544ea546 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -27,6 +27,24 @@
27 27
28#include "nfc.h" 28#include "nfc.h"
29 29
30static struct nfc_sock_list raw_sk_list = {
31 .lock = __RW_LOCK_UNLOCKED(raw_sk_list.lock)
32};
33
34static void nfc_sock_link(struct nfc_sock_list *l, struct sock *sk)
35{
36 write_lock(&l->lock);
37 sk_add_node(sk, &l->head);
38 write_unlock(&l->lock);
39}
40
41static void nfc_sock_unlink(struct nfc_sock_list *l, struct sock *sk)
42{
43 write_lock(&l->lock);
44 sk_del_node_init(sk);
45 write_unlock(&l->lock);
46}
47
30static void rawsock_write_queue_purge(struct sock *sk) 48static void rawsock_write_queue_purge(struct sock *sk)
31{ 49{
32 pr_debug("sk=%p\n", sk); 50 pr_debug("sk=%p\n", sk);
@@ -57,6 +75,9 @@ static int rawsock_release(struct socket *sock)
57 if (!sk) 75 if (!sk)
58 return 0; 76 return 0;
59 77
78 if (sock->type == SOCK_RAW)
79 nfc_sock_unlink(&raw_sk_list, sk);
80
60 sock_orphan(sk); 81 sock_orphan(sk);
61 sock_put(sk); 82 sock_put(sk);
62 83
@@ -275,6 +296,26 @@ static const struct proto_ops rawsock_ops = {
275 .mmap = sock_no_mmap, 296 .mmap = sock_no_mmap,
276}; 297};
277 298
299static const struct proto_ops rawsock_raw_ops = {
300 .family = PF_NFC,
301 .owner = THIS_MODULE,
302 .release = rawsock_release,
303 .bind = sock_no_bind,
304 .connect = sock_no_connect,
305 .socketpair = sock_no_socketpair,
306 .accept = sock_no_accept,
307 .getname = sock_no_getname,
308 .poll = datagram_poll,
309 .ioctl = sock_no_ioctl,
310 .listen = sock_no_listen,
311 .shutdown = sock_no_shutdown,
312 .setsockopt = sock_no_setsockopt,
313 .getsockopt = sock_no_getsockopt,
314 .sendmsg = sock_no_sendmsg,
315 .recvmsg = rawsock_recvmsg,
316 .mmap = sock_no_mmap,
317};
318
278static void rawsock_destruct(struct sock *sk) 319static void rawsock_destruct(struct sock *sk)
279{ 320{
280 pr_debug("sk=%p\n", sk); 321 pr_debug("sk=%p\n", sk);
@@ -300,10 +341,13 @@ static int rawsock_create(struct net *net, struct socket *sock,
300 341
301 pr_debug("sock=%p\n", sock); 342 pr_debug("sock=%p\n", sock);
302 343
303 if (sock->type != SOCK_SEQPACKET) 344 if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW))
304 return -ESOCKTNOSUPPORT; 345 return -ESOCKTNOSUPPORT;
305 346
306 sock->ops = &rawsock_ops; 347 if (sock->type == SOCK_RAW)
348 sock->ops = &rawsock_raw_ops;
349 else
350 sock->ops = &rawsock_ops;
307 351
308 sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto); 352 sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto);
309 if (!sk) 353 if (!sk)
@@ -313,13 +357,53 @@ static int rawsock_create(struct net *net, struct socket *sock,
313 sk->sk_protocol = nfc_proto->id; 357 sk->sk_protocol = nfc_proto->id;
314 sk->sk_destruct = rawsock_destruct; 358 sk->sk_destruct = rawsock_destruct;
315 sock->state = SS_UNCONNECTED; 359 sock->state = SS_UNCONNECTED;
316 360 if (sock->type == SOCK_RAW)
317 INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work); 361 nfc_sock_link(&raw_sk_list, sk);
318 nfc_rawsock(sk)->tx_work_scheduled = false; 362 else {
363 INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work);
364 nfc_rawsock(sk)->tx_work_scheduled = false;
365 }
319 366
320 return 0; 367 return 0;
321} 368}
322 369
370void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
371 u8 payload_type, u8 direction)
372{
373 struct sk_buff *skb_copy = NULL, *nskb;
374 struct sock *sk;
375 u8 *data;
376
377 read_lock(&raw_sk_list.lock);
378
379 sk_for_each(sk, &raw_sk_list.head) {
380 if (!skb_copy) {
381 skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE,
382 GFP_ATOMIC, true);
383 if (!skb_copy)
384 continue;
385
386 data = skb_push(skb_copy, NFC_RAW_HEADER_SIZE);
387
388 data[0] = dev ? dev->idx : 0xFF;
389 data[1] = direction & 0x01;
390 data[1] |= (payload_type << 1);
391 }
392
393 nskb = skb_clone(skb_copy, GFP_ATOMIC);
394 if (!nskb)
395 continue;
396
397 if (sock_queue_rcv_skb(sk, nskb))
398 kfree_skb(nskb);
399 }
400
401 read_unlock(&raw_sk_list.lock);
402
403 kfree_skb(skb_copy);
404}
405EXPORT_SYMBOL(nfc_send_to_raw_sock);
406
323static struct proto rawsock_proto = { 407static struct proto rawsock_proto = {
324 .name = "NFC_RAW", 408 .name = "NFC_RAW",
325 .owner = THIS_MODULE, 409 .owner = THIS_MODULE,
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 2c77e7b1a913..c36856a457ca 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -134,8 +134,8 @@ static int set_eth_addr(struct sk_buff *skb,
134 134
135 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); 135 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
136 136
137 memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN); 137 ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
138 memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN); 138 ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
139 139
140 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); 140 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
141 141
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index a3276e3c4feb..0d407bca81e3 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -44,11 +44,11 @@
44#include <linux/netfilter_ipv4.h> 44#include <linux/netfilter_ipv4.h>
45#include <linux/inetdevice.h> 45#include <linux/inetdevice.h>
46#include <linux/list.h> 46#include <linux/list.h>
47#include <linux/lockdep.h>
48#include <linux/openvswitch.h> 47#include <linux/openvswitch.h>
49#include <linux/rculist.h> 48#include <linux/rculist.h>
50#include <linux/dmi.h> 49#include <linux/dmi.h>
51#include <linux/workqueue.h> 50#include <linux/genetlink.h>
51#include <net/genetlink.h>
52#include <net/genetlink.h> 52#include <net/genetlink.h>
53#include <net/net_namespace.h> 53#include <net/net_namespace.h>
54#include <net/netns/generic.h> 54#include <net/netns/generic.h>
@@ -62,6 +62,31 @@
62 62
63int ovs_net_id __read_mostly; 63int ovs_net_id __read_mostly;
64 64
65static struct genl_family dp_packet_genl_family;
66static struct genl_family dp_flow_genl_family;
67static struct genl_family dp_datapath_genl_family;
68
69static struct genl_multicast_group ovs_dp_flow_multicast_group = {
70 .name = OVS_FLOW_MCGROUP
71};
72
73static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
74 .name = OVS_DATAPATH_MCGROUP
75};
76
77struct genl_multicast_group ovs_dp_vport_multicast_group = {
78 .name = OVS_VPORT_MCGROUP
79};
80
81/* Check if need to build a reply message.
82 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
83static bool ovs_must_notify(struct genl_info *info,
84 const struct genl_multicast_group *grp)
85{
86 return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
87 netlink_has_listeners(genl_info_net(info)->genl_sock, 0);
88}
89
65static void ovs_notify(struct genl_family *family, 90static void ovs_notify(struct genl_family *family,
66 struct sk_buff *skb, struct genl_info *info) 91 struct sk_buff *skb, struct genl_info *info)
67{ 92{
@@ -173,6 +198,7 @@ static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
173 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)]; 198 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
174} 199}
175 200
201/* Called with ovs_mutex or RCU read lock. */
176struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) 202struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
177{ 203{
178 struct vport *vport; 204 struct vport *vport;
@@ -262,16 +288,6 @@ out:
262 u64_stats_update_end(&stats->syncp); 288 u64_stats_update_end(&stats->syncp);
263} 289}
264 290
265static struct genl_family dp_packet_genl_family = {
266 .id = GENL_ID_GENERATE,
267 .hdrsize = sizeof(struct ovs_header),
268 .name = OVS_PACKET_FAMILY,
269 .version = OVS_PACKET_VERSION,
270 .maxattr = OVS_PACKET_ATTR_MAX,
271 .netnsok = true,
272 .parallel_ops = true,
273};
274
275int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, 291int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
276 const struct dp_upcall_info *upcall_info) 292 const struct dp_upcall_info *upcall_info)
277{ 293{
@@ -524,7 +540,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
524 packet->protocol = htons(ETH_P_802_2); 540 packet->protocol = htons(ETH_P_802_2);
525 541
526 /* Build an sw_flow for sending this packet. */ 542 /* Build an sw_flow for sending this packet. */
527 flow = ovs_flow_alloc(false); 543 flow = ovs_flow_alloc();
528 err = PTR_ERR(flow); 544 err = PTR_ERR(flow);
529 if (IS_ERR(flow)) 545 if (IS_ERR(flow))
530 goto err_kfree_skb; 546 goto err_kfree_skb;
@@ -590,6 +606,18 @@ static const struct genl_ops dp_packet_genl_ops[] = {
590 } 606 }
591}; 607};
592 608
609static struct genl_family dp_packet_genl_family = {
610 .id = GENL_ID_GENERATE,
611 .hdrsize = sizeof(struct ovs_header),
612 .name = OVS_PACKET_FAMILY,
613 .version = OVS_PACKET_VERSION,
614 .maxattr = OVS_PACKET_ATTR_MAX,
615 .netnsok = true,
616 .parallel_ops = true,
617 .ops = dp_packet_genl_ops,
618 .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
619};
620
593static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats, 621static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
594 struct ovs_dp_megaflow_stats *mega_stats) 622 struct ovs_dp_megaflow_stats *mega_stats)
595{ 623{
@@ -621,26 +649,6 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
621 } 649 }
622} 650}
623 651
624static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
625 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
626 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
627 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
628};
629
630static struct genl_family dp_flow_genl_family = {
631 .id = GENL_ID_GENERATE,
632 .hdrsize = sizeof(struct ovs_header),
633 .name = OVS_FLOW_FAMILY,
634 .version = OVS_FLOW_VERSION,
635 .maxattr = OVS_FLOW_ATTR_MAX,
636 .netnsok = true,
637 .parallel_ops = true,
638};
639
640static struct genl_multicast_group ovs_dp_flow_multicast_group = {
641 .name = OVS_FLOW_MCGROUP
642};
643
644static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) 652static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
645{ 653{
646 return NLMSG_ALIGN(sizeof(struct ovs_header)) 654 return NLMSG_ALIGN(sizeof(struct ovs_header))
@@ -652,8 +660,8 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
652 + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */ 660 + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
653} 661}
654 662
655/* Called with ovs_mutex. */ 663/* Called with ovs_mutex or RCU read lock. */
656static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, 664static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
657 struct sk_buff *skb, u32 portid, 665 struct sk_buff *skb, u32 portid,
658 u32 seq, u32 flags, u8 cmd) 666 u32 seq, u32 flags, u8 cmd)
659{ 667{
@@ -670,7 +678,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
670 if (!ovs_header) 678 if (!ovs_header)
671 return -EMSGSIZE; 679 return -EMSGSIZE;
672 680
673 ovs_header->dp_ifindex = get_dpifindex(dp); 681 ovs_header->dp_ifindex = dp_ifindex;
674 682
675 /* Fill flow key. */ 683 /* Fill flow key. */
676 nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); 684 nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
@@ -693,6 +701,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
693 nla_nest_end(skb, nla); 701 nla_nest_end(skb, nla);
694 702
695 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags); 703 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
704
696 if (used && 705 if (used &&
697 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) 706 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
698 goto nla_put_failure; 707 goto nla_put_failure;
@@ -720,9 +729,9 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
720 const struct sw_flow_actions *sf_acts; 729 const struct sw_flow_actions *sf_acts;
721 730
722 sf_acts = rcu_dereference_ovsl(flow->sf_acts); 731 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
723
724 err = ovs_nla_put_actions(sf_acts->actions, 732 err = ovs_nla_put_actions(sf_acts->actions,
725 sf_acts->actions_len, skb); 733 sf_acts->actions_len, skb);
734
726 if (!err) 735 if (!err)
727 nla_nest_end(skb, start); 736 nla_nest_end(skb, start);
728 else { 737 else {
@@ -743,113 +752,128 @@ error:
743 return err; 752 return err;
744} 753}
745 754
746static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow, 755/* May not be called with RCU read lock. */
747 struct genl_info *info) 756static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
757 struct genl_info *info,
758 bool always)
748{ 759{
749 size_t len; 760 struct sk_buff *skb;
761
762 if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
763 return NULL;
750 764
751 len = ovs_flow_cmd_msg_size(ovsl_dereference(flow->sf_acts)); 765 skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
766 if (!skb)
767 return ERR_PTR(-ENOMEM);
752 768
753 return genlmsg_new_unicast(len, info, GFP_KERNEL); 769 return skb;
754} 770}
755 771
756static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, 772/* Called with ovs_mutex. */
757 struct datapath *dp, 773static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
758 struct genl_info *info, 774 int dp_ifindex,
759 u8 cmd) 775 struct genl_info *info, u8 cmd,
776 bool always)
760{ 777{
761 struct sk_buff *skb; 778 struct sk_buff *skb;
762 int retval; 779 int retval;
763 780
764 skb = ovs_flow_cmd_alloc_info(flow, info); 781 skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
765 if (!skb) 782 always);
766 return ERR_PTR(-ENOMEM); 783 if (!skb || IS_ERR(skb))
784 return skb;
767 785
768 retval = ovs_flow_cmd_fill_info(flow, dp, skb, info->snd_portid, 786 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
769 info->snd_seq, 0, cmd); 787 info->snd_portid, info->snd_seq, 0,
788 cmd);
770 BUG_ON(retval < 0); 789 BUG_ON(retval < 0);
771 return skb; 790 return skb;
772} 791}
773 792
774static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) 793static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
775{ 794{
776 struct nlattr **a = info->attrs; 795 struct nlattr **a = info->attrs;
777 struct ovs_header *ovs_header = info->userhdr; 796 struct ovs_header *ovs_header = info->userhdr;
778 struct sw_flow_key key, masked_key; 797 struct sw_flow *flow, *new_flow;
779 struct sw_flow *flow = NULL;
780 struct sw_flow_mask mask; 798 struct sw_flow_mask mask;
781 struct sk_buff *reply; 799 struct sk_buff *reply;
782 struct datapath *dp; 800 struct datapath *dp;
783 struct sw_flow_actions *acts = NULL; 801 struct sw_flow_actions *acts;
784 struct sw_flow_match match; 802 struct sw_flow_match match;
785 bool exact_5tuple;
786 int error; 803 int error;
787 804
788 /* Extract key. */ 805 /* Must have key and actions. */
789 error = -EINVAL; 806 error = -EINVAL;
790 if (!a[OVS_FLOW_ATTR_KEY]) 807 if (!a[OVS_FLOW_ATTR_KEY])
791 goto error; 808 goto error;
809 if (!a[OVS_FLOW_ATTR_ACTIONS])
810 goto error;
792 811
793 ovs_match_init(&match, &key, &mask); 812 /* Most of the time we need to allocate a new flow, do it before
794 error = ovs_nla_get_match(&match, &exact_5tuple, 813 * locking.
814 */
815 new_flow = ovs_flow_alloc();
816 if (IS_ERR(new_flow)) {
817 error = PTR_ERR(new_flow);
818 goto error;
819 }
820
821 /* Extract key. */
822 ovs_match_init(&match, &new_flow->unmasked_key, &mask);
823 error = ovs_nla_get_match(&match,
795 a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); 824 a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
796 if (error) 825 if (error)
797 goto error; 826 goto err_kfree_flow;
827
828 ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
798 829
799 /* Validate actions. */ 830 /* Validate actions. */
800 if (a[OVS_FLOW_ATTR_ACTIONS]) { 831 acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
801 acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS])); 832 error = PTR_ERR(acts);
802 error = PTR_ERR(acts); 833 if (IS_ERR(acts))
803 if (IS_ERR(acts)) 834 goto err_kfree_flow;
804 goto error;
805 835
806 ovs_flow_mask_key(&masked_key, &key, &mask); 836 error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
807 error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], 837 0, &acts);
808 &masked_key, 0, &acts); 838 if (error) {
809 if (error) { 839 OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
810 OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); 840 goto err_kfree_acts;
811 goto err_kfree; 841 }
812 } 842
813 } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) { 843 reply = ovs_flow_cmd_alloc_info(acts, info, false);
814 error = -EINVAL; 844 if (IS_ERR(reply)) {
815 goto error; 845 error = PTR_ERR(reply);
846 goto err_kfree_acts;
816 } 847 }
817 848
818 ovs_lock(); 849 ovs_lock();
819 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 850 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
820 error = -ENODEV; 851 if (unlikely(!dp)) {
821 if (!dp) 852 error = -ENODEV;
822 goto err_unlock_ovs; 853 goto err_unlock_ovs;
823 854 }
824 /* Check if this is a duplicate flow */ 855 /* Check if this is a duplicate flow */
825 flow = ovs_flow_tbl_lookup(&dp->table, &key); 856 flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
826 if (!flow) { 857 if (likely(!flow)) {
827 /* Bail out if we're not allowed to create a new flow. */ 858 rcu_assign_pointer(new_flow->sf_acts, acts);
828 error = -ENOENT;
829 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
830 goto err_unlock_ovs;
831
832 /* Allocate flow. */
833 flow = ovs_flow_alloc(!exact_5tuple);
834 if (IS_ERR(flow)) {
835 error = PTR_ERR(flow);
836 goto err_unlock_ovs;
837 }
838
839 flow->key = masked_key;
840 flow->unmasked_key = key;
841 rcu_assign_pointer(flow->sf_acts, acts);
842 859
843 /* Put flow in bucket. */ 860 /* Put flow in bucket. */
844 error = ovs_flow_tbl_insert(&dp->table, flow, &mask); 861 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
845 if (error) { 862 if (unlikely(error)) {
846 acts = NULL; 863 acts = NULL;
847 goto err_flow_free; 864 goto err_unlock_ovs;
848 } 865 }
849 866
850 reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW); 867 if (unlikely(reply)) {
868 error = ovs_flow_cmd_fill_info(new_flow,
869 ovs_header->dp_ifindex,
870 reply, info->snd_portid,
871 info->snd_seq, 0,
872 OVS_FLOW_CMD_NEW);
873 BUG_ON(error < 0);
874 }
875 ovs_unlock();
851 } else { 876 } else {
852 /* We found a matching flow. */
853 struct sw_flow_actions *old_acts; 877 struct sw_flow_actions *old_acts;
854 878
855 /* Bail out if we're not allowed to modify an existing flow. 879 /* Bail out if we're not allowed to modify an existing flow.
@@ -858,40 +882,154 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
858 * request. We also accept NLM_F_EXCL in case that bug ever 882 * request. We also accept NLM_F_EXCL in case that bug ever
859 * gets fixed. 883 * gets fixed.
860 */ 884 */
861 error = -EEXIST; 885 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
862 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW && 886 | NLM_F_EXCL))) {
863 info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) 887 error = -EEXIST;
864 goto err_unlock_ovs; 888 goto err_unlock_ovs;
865 889 }
866 /* The unmasked key has to be the same for flow updates. */ 890 /* The unmasked key has to be the same for flow updates. */
867 if (!ovs_flow_cmp_unmasked_key(flow, &match)) 891 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
892 error = -EEXIST;
868 goto err_unlock_ovs; 893 goto err_unlock_ovs;
869 894 }
870 /* Update actions. */ 895 /* Update actions. */
871 old_acts = ovsl_dereference(flow->sf_acts); 896 old_acts = ovsl_dereference(flow->sf_acts);
872 rcu_assign_pointer(flow->sf_acts, acts); 897 rcu_assign_pointer(flow->sf_acts, acts);
898
899 if (unlikely(reply)) {
900 error = ovs_flow_cmd_fill_info(flow,
901 ovs_header->dp_ifindex,
902 reply, info->snd_portid,
903 info->snd_seq, 0,
904 OVS_FLOW_CMD_NEW);
905 BUG_ON(error < 0);
906 }
907 ovs_unlock();
908
873 ovs_nla_free_flow_actions(old_acts); 909 ovs_nla_free_flow_actions(old_acts);
910 ovs_flow_free(new_flow, false);
911 }
912
913 if (reply)
914 ovs_notify(&dp_flow_genl_family, reply, info);
915 return 0;
916
917err_unlock_ovs:
918 ovs_unlock();
919 kfree_skb(reply);
920err_kfree_acts:
921 kfree(acts);
922err_kfree_flow:
923 ovs_flow_free(new_flow, false);
924error:
925 return error;
926}
927
928static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
929{
930 struct nlattr **a = info->attrs;
931 struct ovs_header *ovs_header = info->userhdr;
932 struct sw_flow_key key, masked_key;
933 struct sw_flow *flow;
934 struct sw_flow_mask mask;
935 struct sk_buff *reply = NULL;
936 struct datapath *dp;
937 struct sw_flow_actions *old_acts = NULL, *acts = NULL;
938 struct sw_flow_match match;
939 int error;
940
941 /* Extract key. */
942 error = -EINVAL;
943 if (!a[OVS_FLOW_ATTR_KEY])
944 goto error;
945
946 ovs_match_init(&match, &key, &mask);
947 error = ovs_nla_get_match(&match,
948 a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
949 if (error)
950 goto error;
951
952 /* Validate actions. */
953 if (a[OVS_FLOW_ATTR_ACTIONS]) {
954 acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
955 error = PTR_ERR(acts);
956 if (IS_ERR(acts))
957 goto error;
958
959 ovs_flow_mask_key(&masked_key, &key, &mask);
960 error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
961 &masked_key, 0, &acts);
962 if (error) {
963 OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
964 goto err_kfree_acts;
965 }
966 }
967
968 /* Can allocate before locking if have acts. */
969 if (acts) {
970 reply = ovs_flow_cmd_alloc_info(acts, info, false);
971 if (IS_ERR(reply)) {
972 error = PTR_ERR(reply);
973 goto err_kfree_acts;
974 }
975 }
874 976
875 reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW); 977 ovs_lock();
978 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
979 if (unlikely(!dp)) {
980 error = -ENODEV;
981 goto err_unlock_ovs;
982 }
983 /* Check that the flow exists. */
984 flow = ovs_flow_tbl_lookup(&dp->table, &key);
985 if (unlikely(!flow)) {
986 error = -ENOENT;
987 goto err_unlock_ovs;
988 }
989 /* The unmasked key has to be the same for flow updates. */
990 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
991 error = -EEXIST;
992 goto err_unlock_ovs;
993 }
994 /* Update actions, if present. */
995 if (likely(acts)) {
996 old_acts = ovsl_dereference(flow->sf_acts);
997 rcu_assign_pointer(flow->sf_acts, acts);
876 998
877 /* Clear stats. */ 999 if (unlikely(reply)) {
878 if (a[OVS_FLOW_ATTR_CLEAR]) 1000 error = ovs_flow_cmd_fill_info(flow,
879 ovs_flow_stats_clear(flow); 1001 ovs_header->dp_ifindex,
1002 reply, info->snd_portid,
1003 info->snd_seq, 0,
1004 OVS_FLOW_CMD_NEW);
1005 BUG_ON(error < 0);
1006 }
1007 } else {
1008 /* Could not alloc without acts before locking. */
1009 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1010 info, OVS_FLOW_CMD_NEW, false);
1011 if (unlikely(IS_ERR(reply))) {
1012 error = PTR_ERR(reply);
1013 goto err_unlock_ovs;
1014 }
880 } 1015 }
1016
1017 /* Clear stats. */
1018 if (a[OVS_FLOW_ATTR_CLEAR])
1019 ovs_flow_stats_clear(flow);
881 ovs_unlock(); 1020 ovs_unlock();
882 1021
883 if (!IS_ERR(reply)) 1022 if (reply)
884 ovs_notify(&dp_flow_genl_family, reply, info); 1023 ovs_notify(&dp_flow_genl_family, reply, info);
885 else 1024 if (old_acts)
886 genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0, 1025 ovs_nla_free_flow_actions(old_acts);
887 0, PTR_ERR(reply)); 1026
888 return 0; 1027 return 0;
889 1028
890err_flow_free:
891 ovs_flow_free(flow, false);
892err_unlock_ovs: 1029err_unlock_ovs:
893 ovs_unlock(); 1030 ovs_unlock();
894err_kfree: 1031 kfree_skb(reply);
1032err_kfree_acts:
895 kfree(acts); 1033 kfree(acts);
896error: 1034error:
897 return error; 1035 return error;
@@ -914,7 +1052,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
914 } 1052 }
915 1053
916 ovs_match_init(&match, &key, NULL); 1054 ovs_match_init(&match, &key, NULL);
917 err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL); 1055 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
918 if (err) 1056 if (err)
919 return err; 1057 return err;
920 1058
@@ -931,7 +1069,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
931 goto unlock; 1069 goto unlock;
932 } 1070 }
933 1071
934 reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW); 1072 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1073 OVS_FLOW_CMD_NEW, true);
935 if (IS_ERR(reply)) { 1074 if (IS_ERR(reply)) {
936 err = PTR_ERR(reply); 1075 err = PTR_ERR(reply);
937 goto unlock; 1076 goto unlock;
@@ -955,45 +1094,53 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
955 struct sw_flow_match match; 1094 struct sw_flow_match match;
956 int err; 1095 int err;
957 1096
1097 if (likely(a[OVS_FLOW_ATTR_KEY])) {
1098 ovs_match_init(&match, &key, NULL);
1099 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1100 if (unlikely(err))
1101 return err;
1102 }
1103
958 ovs_lock(); 1104 ovs_lock();
959 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 1105 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
960 if (!dp) { 1106 if (unlikely(!dp)) {
961 err = -ENODEV; 1107 err = -ENODEV;
962 goto unlock; 1108 goto unlock;
963 } 1109 }
964 1110
965 if (!a[OVS_FLOW_ATTR_KEY]) { 1111 if (unlikely(!a[OVS_FLOW_ATTR_KEY])) {
966 err = ovs_flow_tbl_flush(&dp->table); 1112 err = ovs_flow_tbl_flush(&dp->table);
967 goto unlock; 1113 goto unlock;
968 } 1114 }
969 1115
970 ovs_match_init(&match, &key, NULL);
971 err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
972 if (err)
973 goto unlock;
974
975 flow = ovs_flow_tbl_lookup(&dp->table, &key); 1116 flow = ovs_flow_tbl_lookup(&dp->table, &key);
976 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { 1117 if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) {
977 err = -ENOENT; 1118 err = -ENOENT;
978 goto unlock; 1119 goto unlock;
979 } 1120 }
980 1121
981 reply = ovs_flow_cmd_alloc_info(flow, info);
982 if (!reply) {
983 err = -ENOMEM;
984 goto unlock;
985 }
986
987 ovs_flow_tbl_remove(&dp->table, flow); 1122 ovs_flow_tbl_remove(&dp->table, flow);
1123 ovs_unlock();
988 1124
989 err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid, 1125 reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
990 info->snd_seq, 0, OVS_FLOW_CMD_DEL); 1126 info, false);
991 BUG_ON(err < 0); 1127 if (likely(reply)) {
1128 if (likely(!IS_ERR(reply))) {
1129 rcu_read_lock(); /*To keep RCU checker happy. */
1130 err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1131 reply, info->snd_portid,
1132 info->snd_seq, 0,
1133 OVS_FLOW_CMD_DEL);
1134 rcu_read_unlock();
1135 BUG_ON(err < 0);
1136
1137 ovs_notify(&dp_flow_genl_family, reply, info);
1138 } else {
1139 netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
1140 }
1141 }
992 1142
993 ovs_flow_free(flow, true); 1143 ovs_flow_free(flow, true);
994 ovs_unlock();
995
996 ovs_notify(&dp_flow_genl_family, reply, info);
997 return 0; 1144 return 0;
998unlock: 1145unlock:
999 ovs_unlock(); 1146 ovs_unlock();
@@ -1024,7 +1171,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1024 if (!flow) 1171 if (!flow)
1025 break; 1172 break;
1026 1173
1027 if (ovs_flow_cmd_fill_info(flow, dp, skb, 1174 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1028 NETLINK_CB(cb->skb).portid, 1175 NETLINK_CB(cb->skb).portid,
1029 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1176 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1030 OVS_FLOW_CMD_NEW) < 0) 1177 OVS_FLOW_CMD_NEW) < 0)
@@ -1037,11 +1184,17 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1037 return skb->len; 1184 return skb->len;
1038} 1185}
1039 1186
1040static const struct genl_ops dp_flow_genl_ops[] = { 1187static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1188 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1189 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1190 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1191};
1192
1193static struct genl_ops dp_flow_genl_ops[] = {
1041 { .cmd = OVS_FLOW_CMD_NEW, 1194 { .cmd = OVS_FLOW_CMD_NEW,
1042 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1195 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1043 .policy = flow_policy, 1196 .policy = flow_policy,
1044 .doit = ovs_flow_cmd_new_or_set 1197 .doit = ovs_flow_cmd_new
1045 }, 1198 },
1046 { .cmd = OVS_FLOW_CMD_DEL, 1199 { .cmd = OVS_FLOW_CMD_DEL,
1047 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1200 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
@@ -1057,28 +1210,22 @@ static const struct genl_ops dp_flow_genl_ops[] = {
1057 { .cmd = OVS_FLOW_CMD_SET, 1210 { .cmd = OVS_FLOW_CMD_SET,
1058 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1211 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1059 .policy = flow_policy, 1212 .policy = flow_policy,
1060 .doit = ovs_flow_cmd_new_or_set, 1213 .doit = ovs_flow_cmd_set,
1061 }, 1214 },
1062}; 1215};
1063 1216
1064static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { 1217static struct genl_family dp_flow_genl_family = {
1065 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1066 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1067 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1068};
1069
1070static struct genl_family dp_datapath_genl_family = {
1071 .id = GENL_ID_GENERATE, 1218 .id = GENL_ID_GENERATE,
1072 .hdrsize = sizeof(struct ovs_header), 1219 .hdrsize = sizeof(struct ovs_header),
1073 .name = OVS_DATAPATH_FAMILY, 1220 .name = OVS_FLOW_FAMILY,
1074 .version = OVS_DATAPATH_VERSION, 1221 .version = OVS_FLOW_VERSION,
1075 .maxattr = OVS_DP_ATTR_MAX, 1222 .maxattr = OVS_FLOW_ATTR_MAX,
1076 .netnsok = true, 1223 .netnsok = true,
1077 .parallel_ops = true, 1224 .parallel_ops = true,
1078}; 1225 .ops = dp_flow_genl_ops,
1079 1226 .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1080static struct genl_multicast_group ovs_dp_datapath_multicast_group = { 1227 .mcgrps = &ovs_dp_flow_multicast_group,
1081 .name = OVS_DATAPATH_MCGROUP 1228 .n_mcgrps = 1,
1082}; 1229};
1083 1230
1084static size_t ovs_dp_cmd_msg_size(void) 1231static size_t ovs_dp_cmd_msg_size(void)
@@ -1093,6 +1240,7 @@ static size_t ovs_dp_cmd_msg_size(void)
1093 return msgsize; 1240 return msgsize;
1094} 1241}
1095 1242
1243/* Called with ovs_mutex or RCU read lock. */
1096static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, 1244static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1097 u32 portid, u32 seq, u32 flags, u8 cmd) 1245 u32 portid, u32 seq, u32 flags, u8 cmd)
1098{ 1246{
@@ -1108,9 +1256,7 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1108 1256
1109 ovs_header->dp_ifindex = get_dpifindex(dp); 1257 ovs_header->dp_ifindex = get_dpifindex(dp);
1110 1258
1111 rcu_read_lock();
1112 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp)); 1259 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1113 rcu_read_unlock();
1114 if (err) 1260 if (err)
1115 goto nla_put_failure; 1261 goto nla_put_failure;
1116 1262
@@ -1135,25 +1281,12 @@ error:
1135 return -EMSGSIZE; 1281 return -EMSGSIZE;
1136} 1282}
1137 1283
1138static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, 1284static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
1139 struct genl_info *info, u8 cmd)
1140{ 1285{
1141 struct sk_buff *skb; 1286 return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
1142 int retval;
1143
1144 skb = genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
1145 if (!skb)
1146 return ERR_PTR(-ENOMEM);
1147
1148 retval = ovs_dp_cmd_fill_info(dp, skb, info->snd_portid, info->snd_seq, 0, cmd);
1149 if (retval < 0) {
1150 kfree_skb(skb);
1151 return ERR_PTR(retval);
1152 }
1153 return skb;
1154} 1287}
1155 1288
1156/* Called with ovs_mutex. */ 1289/* Called with rcu_read_lock or ovs_mutex. */
1157static struct datapath *lookup_datapath(struct net *net, 1290static struct datapath *lookup_datapath(struct net *net,
1158 struct ovs_header *ovs_header, 1291 struct ovs_header *ovs_header,
1159 struct nlattr *a[OVS_DP_ATTR_MAX + 1]) 1292 struct nlattr *a[OVS_DP_ATTR_MAX + 1])
@@ -1165,10 +1298,8 @@ static struct datapath *lookup_datapath(struct net *net,
1165 else { 1298 else {
1166 struct vport *vport; 1299 struct vport *vport;
1167 1300
1168 rcu_read_lock();
1169 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME])); 1301 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1170 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL; 1302 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1171 rcu_read_unlock();
1172 } 1303 }
1173 return dp ? dp : ERR_PTR(-ENODEV); 1304 return dp ? dp : ERR_PTR(-ENODEV);
1174} 1305}
@@ -1205,12 +1336,14 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1205 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID]) 1336 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1206 goto err; 1337 goto err;
1207 1338
1208 ovs_lock(); 1339 reply = ovs_dp_cmd_alloc_info(info);
1340 if (!reply)
1341 return -ENOMEM;
1209 1342
1210 err = -ENOMEM; 1343 err = -ENOMEM;
1211 dp = kzalloc(sizeof(*dp), GFP_KERNEL); 1344 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1212 if (dp == NULL) 1345 if (dp == NULL)
1213 goto err_unlock_ovs; 1346 goto err_free_reply;
1214 1347
1215 ovs_dp_set_net(dp, hold_net(sock_net(skb->sk))); 1348 ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1216 1349
@@ -1245,6 +1378,9 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1245 1378
1246 ovs_dp_change(dp, a); 1379 ovs_dp_change(dp, a);
1247 1380
1381 /* So far only local changes have been made, now need the lock. */
1382 ovs_lock();
1383
1248 vport = new_vport(&parms); 1384 vport = new_vport(&parms);
1249 if (IS_ERR(vport)) { 1385 if (IS_ERR(vport)) {
1250 err = PTR_ERR(vport); 1386 err = PTR_ERR(vport);
@@ -1263,10 +1399,9 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1263 goto err_destroy_ports_array; 1399 goto err_destroy_ports_array;
1264 } 1400 }
1265 1401
1266 reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW); 1402 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1267 err = PTR_ERR(reply); 1403 info->snd_seq, 0, OVS_DP_CMD_NEW);
1268 if (IS_ERR(reply)) 1404 BUG_ON(err < 0);
1269 goto err_destroy_local_port;
1270 1405
1271 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id); 1406 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1272 list_add_tail_rcu(&dp->list_node, &ovs_net->dps); 1407 list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
@@ -1276,9 +1411,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1276 ovs_notify(&dp_datapath_genl_family, reply, info); 1411 ovs_notify(&dp_datapath_genl_family, reply, info);
1277 return 0; 1412 return 0;
1278 1413
1279err_destroy_local_port:
1280 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1281err_destroy_ports_array: 1414err_destroy_ports_array:
1415 ovs_unlock();
1282 kfree(dp->ports); 1416 kfree(dp->ports);
1283err_destroy_percpu: 1417err_destroy_percpu:
1284 free_percpu(dp->stats_percpu); 1418 free_percpu(dp->stats_percpu);
@@ -1287,8 +1421,8 @@ err_destroy_table:
1287err_free_dp: 1421err_free_dp:
1288 release_net(ovs_dp_get_net(dp)); 1422 release_net(ovs_dp_get_net(dp));
1289 kfree(dp); 1423 kfree(dp);
1290err_unlock_ovs: 1424err_free_reply:
1291 ovs_unlock(); 1425 kfree_skb(reply);
1292err: 1426err:
1293 return err; 1427 return err;
1294} 1428}
@@ -1326,16 +1460,19 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1326 struct datapath *dp; 1460 struct datapath *dp;
1327 int err; 1461 int err;
1328 1462
1463 reply = ovs_dp_cmd_alloc_info(info);
1464 if (!reply)
1465 return -ENOMEM;
1466
1329 ovs_lock(); 1467 ovs_lock();
1330 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 1468 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1331 err = PTR_ERR(dp); 1469 err = PTR_ERR(dp);
1332 if (IS_ERR(dp)) 1470 if (IS_ERR(dp))
1333 goto unlock; 1471 goto err_unlock_free;
1334 1472
1335 reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_DEL); 1473 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1336 err = PTR_ERR(reply); 1474 info->snd_seq, 0, OVS_DP_CMD_DEL);
1337 if (IS_ERR(reply)) 1475 BUG_ON(err < 0);
1338 goto unlock;
1339 1476
1340 __dp_destroy(dp); 1477 __dp_destroy(dp);
1341 ovs_unlock(); 1478 ovs_unlock();
@@ -1343,8 +1480,10 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1343 ovs_notify(&dp_datapath_genl_family, reply, info); 1480 ovs_notify(&dp_datapath_genl_family, reply, info);
1344 1481
1345 return 0; 1482 return 0;
1346unlock: 1483
1484err_unlock_free:
1347 ovs_unlock(); 1485 ovs_unlock();
1486 kfree_skb(reply);
1348 return err; 1487 return err;
1349} 1488}
1350 1489
@@ -1354,29 +1493,30 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1354 struct datapath *dp; 1493 struct datapath *dp;
1355 int err; 1494 int err;
1356 1495
1496 reply = ovs_dp_cmd_alloc_info(info);
1497 if (!reply)
1498 return -ENOMEM;
1499
1357 ovs_lock(); 1500 ovs_lock();
1358 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 1501 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1359 err = PTR_ERR(dp); 1502 err = PTR_ERR(dp);
1360 if (IS_ERR(dp)) 1503 if (IS_ERR(dp))
1361 goto unlock; 1504 goto err_unlock_free;
1362 1505
1363 ovs_dp_change(dp, info->attrs); 1506 ovs_dp_change(dp, info->attrs);
1364 1507
1365 reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW); 1508 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1366 if (IS_ERR(reply)) { 1509 info->snd_seq, 0, OVS_DP_CMD_NEW);
1367 err = PTR_ERR(reply); 1510 BUG_ON(err < 0);
1368 genl_set_err(&dp_datapath_genl_family, sock_net(skb->sk), 0,
1369 0, err);
1370 err = 0;
1371 goto unlock;
1372 }
1373 1511
1374 ovs_unlock(); 1512 ovs_unlock();
1375 ovs_notify(&dp_datapath_genl_family, reply, info); 1513 ovs_notify(&dp_datapath_genl_family, reply, info);
1376 1514
1377 return 0; 1515 return 0;
1378unlock: 1516
1517err_unlock_free:
1379 ovs_unlock(); 1518 ovs_unlock();
1519 kfree_skb(reply);
1380 return err; 1520 return err;
1381} 1521}
1382 1522
@@ -1386,24 +1526,26 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1386 struct datapath *dp; 1526 struct datapath *dp;
1387 int err; 1527 int err;
1388 1528
1389 ovs_lock(); 1529 reply = ovs_dp_cmd_alloc_info(info);
1530 if (!reply)
1531 return -ENOMEM;
1532
1533 rcu_read_lock();
1390 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 1534 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1391 if (IS_ERR(dp)) { 1535 if (IS_ERR(dp)) {
1392 err = PTR_ERR(dp); 1536 err = PTR_ERR(dp);
1393 goto unlock; 1537 goto err_unlock_free;
1394 }
1395
1396 reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW);
1397 if (IS_ERR(reply)) {
1398 err = PTR_ERR(reply);
1399 goto unlock;
1400 } 1538 }
1539 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1540 info->snd_seq, 0, OVS_DP_CMD_NEW);
1541 BUG_ON(err < 0);
1542 rcu_read_unlock();
1401 1543
1402 ovs_unlock();
1403 return genlmsg_reply(reply, info); 1544 return genlmsg_reply(reply, info);
1404 1545
1405unlock: 1546err_unlock_free:
1406 ovs_unlock(); 1547 rcu_read_unlock();
1548 kfree_skb(reply);
1407 return err; 1549 return err;
1408} 1550}
1409 1551
@@ -1430,7 +1572,13 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1430 return skb->len; 1572 return skb->len;
1431} 1573}
1432 1574
1433static const struct genl_ops dp_datapath_genl_ops[] = { 1575static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1576 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1577 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1578 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1579};
1580
1581static struct genl_ops dp_datapath_genl_ops[] = {
1434 { .cmd = OVS_DP_CMD_NEW, 1582 { .cmd = OVS_DP_CMD_NEW,
1435 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1583 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1436 .policy = datapath_policy, 1584 .policy = datapath_policy,
@@ -1454,27 +1602,18 @@ static const struct genl_ops dp_datapath_genl_ops[] = {
1454 }, 1602 },
1455}; 1603};
1456 1604
1457static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { 1605static struct genl_family dp_datapath_genl_family = {
1458 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1459 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1460 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1461 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1462 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1463 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1464};
1465
1466struct genl_family dp_vport_genl_family = {
1467 .id = GENL_ID_GENERATE, 1606 .id = GENL_ID_GENERATE,
1468 .hdrsize = sizeof(struct ovs_header), 1607 .hdrsize = sizeof(struct ovs_header),
1469 .name = OVS_VPORT_FAMILY, 1608 .name = OVS_DATAPATH_FAMILY,
1470 .version = OVS_VPORT_VERSION, 1609 .version = OVS_DATAPATH_VERSION,
1471 .maxattr = OVS_VPORT_ATTR_MAX, 1610 .maxattr = OVS_DP_ATTR_MAX,
1472 .netnsok = true, 1611 .netnsok = true,
1473 .parallel_ops = true, 1612 .parallel_ops = true,
1474}; 1613 .ops = dp_datapath_genl_ops,
1475 1614 .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1476static struct genl_multicast_group ovs_dp_vport_multicast_group = { 1615 .mcgrps = &ovs_dp_datapath_multicast_group,
1477 .name = OVS_VPORT_MCGROUP 1616 .n_mcgrps = 1,
1478}; 1617};
1479 1618
1480/* Called with ovs_mutex or RCU read lock. */ 1619/* Called with ovs_mutex or RCU read lock. */
@@ -1516,7 +1655,12 @@ error:
1516 return err; 1655 return err;
1517} 1656}
1518 1657
1519/* Called with ovs_mutex or RCU read lock. */ 1658static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1659{
1660 return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1661}
1662
1663/* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1520struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid, 1664struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1521 u32 seq, u8 cmd) 1665 u32 seq, u8 cmd)
1522{ 1666{
@@ -1578,33 +1722,35 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1578 u32 port_no; 1722 u32 port_no;
1579 int err; 1723 int err;
1580 1724
1581 err = -EINVAL;
1582 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] || 1725 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1583 !a[OVS_VPORT_ATTR_UPCALL_PID]) 1726 !a[OVS_VPORT_ATTR_UPCALL_PID])
1584 goto exit; 1727 return -EINVAL;
1728
1729 port_no = a[OVS_VPORT_ATTR_PORT_NO]
1730 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1731 if (port_no >= DP_MAX_PORTS)
1732 return -EFBIG;
1733
1734 reply = ovs_vport_cmd_alloc_info();
1735 if (!reply)
1736 return -ENOMEM;
1585 1737
1586 ovs_lock(); 1738 ovs_lock();
1587 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 1739 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1588 err = -ENODEV; 1740 err = -ENODEV;
1589 if (!dp) 1741 if (!dp)
1590 goto exit_unlock; 1742 goto exit_unlock_free;
1591
1592 if (a[OVS_VPORT_ATTR_PORT_NO]) {
1593 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1594
1595 err = -EFBIG;
1596 if (port_no >= DP_MAX_PORTS)
1597 goto exit_unlock;
1598 1743
1744 if (port_no) {
1599 vport = ovs_vport_ovsl(dp, port_no); 1745 vport = ovs_vport_ovsl(dp, port_no);
1600 err = -EBUSY; 1746 err = -EBUSY;
1601 if (vport) 1747 if (vport)
1602 goto exit_unlock; 1748 goto exit_unlock_free;
1603 } else { 1749 } else {
1604 for (port_no = 1; ; port_no++) { 1750 for (port_no = 1; ; port_no++) {
1605 if (port_no >= DP_MAX_PORTS) { 1751 if (port_no >= DP_MAX_PORTS) {
1606 err = -EFBIG; 1752 err = -EFBIG;
1607 goto exit_unlock; 1753 goto exit_unlock_free;
1608 } 1754 }
1609 vport = ovs_vport_ovsl(dp, port_no); 1755 vport = ovs_vport_ovsl(dp, port_no);
1610 if (!vport) 1756 if (!vport)
@@ -1622,22 +1768,19 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1622 vport = new_vport(&parms); 1768 vport = new_vport(&parms);
1623 err = PTR_ERR(vport); 1769 err = PTR_ERR(vport);
1624 if (IS_ERR(vport)) 1770 if (IS_ERR(vport))
1625 goto exit_unlock; 1771 goto exit_unlock_free;
1626 1772
1627 err = 0; 1773 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1628 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, 1774 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1629 OVS_VPORT_CMD_NEW); 1775 BUG_ON(err < 0);
1630 if (IS_ERR(reply)) { 1776 ovs_unlock();
1631 err = PTR_ERR(reply);
1632 ovs_dp_detach_port(vport);
1633 goto exit_unlock;
1634 }
1635 1777
1636 ovs_notify(&dp_vport_genl_family, reply, info); 1778 ovs_notify(&dp_vport_genl_family, reply, info);
1779 return 0;
1637 1780
1638exit_unlock: 1781exit_unlock_free:
1639 ovs_unlock(); 1782 ovs_unlock();
1640exit: 1783 kfree_skb(reply);
1641 return err; 1784 return err;
1642} 1785}
1643 1786
@@ -1648,28 +1791,26 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1648 struct vport *vport; 1791 struct vport *vport;
1649 int err; 1792 int err;
1650 1793
1794 reply = ovs_vport_cmd_alloc_info();
1795 if (!reply)
1796 return -ENOMEM;
1797
1651 ovs_lock(); 1798 ovs_lock();
1652 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); 1799 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1653 err = PTR_ERR(vport); 1800 err = PTR_ERR(vport);
1654 if (IS_ERR(vport)) 1801 if (IS_ERR(vport))
1655 goto exit_unlock; 1802 goto exit_unlock_free;
1656 1803
1657 if (a[OVS_VPORT_ATTR_TYPE] && 1804 if (a[OVS_VPORT_ATTR_TYPE] &&
1658 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) { 1805 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
1659 err = -EINVAL; 1806 err = -EINVAL;
1660 goto exit_unlock; 1807 goto exit_unlock_free;
1661 }
1662
1663 reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1664 if (!reply) {
1665 err = -ENOMEM;
1666 goto exit_unlock;
1667 } 1808 }
1668 1809
1669 if (a[OVS_VPORT_ATTR_OPTIONS]) { 1810 if (a[OVS_VPORT_ATTR_OPTIONS]) {
1670 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); 1811 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1671 if (err) 1812 if (err)
1672 goto exit_free; 1813 goto exit_unlock_free;
1673 } 1814 }
1674 1815
1675 if (a[OVS_VPORT_ATTR_UPCALL_PID]) 1816 if (a[OVS_VPORT_ATTR_UPCALL_PID])
@@ -1683,10 +1824,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1683 ovs_notify(&dp_vport_genl_family, reply, info); 1824 ovs_notify(&dp_vport_genl_family, reply, info);
1684 return 0; 1825 return 0;
1685 1826
1686exit_free: 1827exit_unlock_free:
1687 kfree_skb(reply);
1688exit_unlock:
1689 ovs_unlock(); 1828 ovs_unlock();
1829 kfree_skb(reply);
1690 return err; 1830 return err;
1691} 1831}
1692 1832
@@ -1697,30 +1837,33 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1697 struct vport *vport; 1837 struct vport *vport;
1698 int err; 1838 int err;
1699 1839
1840 reply = ovs_vport_cmd_alloc_info();
1841 if (!reply)
1842 return -ENOMEM;
1843
1700 ovs_lock(); 1844 ovs_lock();
1701 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); 1845 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1702 err = PTR_ERR(vport); 1846 err = PTR_ERR(vport);
1703 if (IS_ERR(vport)) 1847 if (IS_ERR(vport))
1704 goto exit_unlock; 1848 goto exit_unlock_free;
1705 1849
1706 if (vport->port_no == OVSP_LOCAL) { 1850 if (vport->port_no == OVSP_LOCAL) {
1707 err = -EINVAL; 1851 err = -EINVAL;
1708 goto exit_unlock; 1852 goto exit_unlock_free;
1709 } 1853 }
1710 1854
1711 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, 1855 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1712 info->snd_seq, OVS_VPORT_CMD_DEL); 1856 info->snd_seq, 0, OVS_VPORT_CMD_DEL);
1713 err = PTR_ERR(reply); 1857 BUG_ON(err < 0);
1714 if (IS_ERR(reply))
1715 goto exit_unlock;
1716
1717 err = 0;
1718 ovs_dp_detach_port(vport); 1858 ovs_dp_detach_port(vport);
1859 ovs_unlock();
1719 1860
1720 ovs_notify(&dp_vport_genl_family, reply, info); 1861 ovs_notify(&dp_vport_genl_family, reply, info);
1862 return 0;
1721 1863
1722exit_unlock: 1864exit_unlock_free:
1723 ovs_unlock(); 1865 ovs_unlock();
1866 kfree_skb(reply);
1724 return err; 1867 return err;
1725} 1868}
1726 1869
@@ -1732,24 +1875,25 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1732 struct vport *vport; 1875 struct vport *vport;
1733 int err; 1876 int err;
1734 1877
1878 reply = ovs_vport_cmd_alloc_info();
1879 if (!reply)
1880 return -ENOMEM;
1881
1735 rcu_read_lock(); 1882 rcu_read_lock();
1736 vport = lookup_vport(sock_net(skb->sk), ovs_header, a); 1883 vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1737 err = PTR_ERR(vport); 1884 err = PTR_ERR(vport);
1738 if (IS_ERR(vport)) 1885 if (IS_ERR(vport))
1739 goto exit_unlock; 1886 goto exit_unlock_free;
1740 1887 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1741 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, 1888 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1742 info->snd_seq, OVS_VPORT_CMD_NEW); 1889 BUG_ON(err < 0);
1743 err = PTR_ERR(reply);
1744 if (IS_ERR(reply))
1745 goto exit_unlock;
1746
1747 rcu_read_unlock(); 1890 rcu_read_unlock();
1748 1891
1749 return genlmsg_reply(reply, info); 1892 return genlmsg_reply(reply, info);
1750 1893
1751exit_unlock: 1894exit_unlock_free:
1752 rcu_read_unlock(); 1895 rcu_read_unlock();
1896 kfree_skb(reply);
1753 return err; 1897 return err;
1754} 1898}
1755 1899
@@ -1792,7 +1936,16 @@ out:
1792 return skb->len; 1936 return skb->len;
1793} 1937}
1794 1938
1795static const struct genl_ops dp_vport_genl_ops[] = { 1939static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1940 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1941 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1942 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1943 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1944 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1945 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1946};
1947
1948static struct genl_ops dp_vport_genl_ops[] = {
1796 { .cmd = OVS_VPORT_CMD_NEW, 1949 { .cmd = OVS_VPORT_CMD_NEW,
1797 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1950 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1798 .policy = vport_policy, 1951 .policy = vport_policy,
@@ -1816,26 +1969,25 @@ static const struct genl_ops dp_vport_genl_ops[] = {
1816 }, 1969 },
1817}; 1970};
1818 1971
1819struct genl_family_and_ops { 1972struct genl_family dp_vport_genl_family = {
1820 struct genl_family *family; 1973 .id = GENL_ID_GENERATE,
1821 const struct genl_ops *ops; 1974 .hdrsize = sizeof(struct ovs_header),
1822 int n_ops; 1975 .name = OVS_VPORT_FAMILY,
1823 const struct genl_multicast_group *group; 1976 .version = OVS_VPORT_VERSION,
1977 .maxattr = OVS_VPORT_ATTR_MAX,
1978 .netnsok = true,
1979 .parallel_ops = true,
1980 .ops = dp_vport_genl_ops,
1981 .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
1982 .mcgrps = &ovs_dp_vport_multicast_group,
1983 .n_mcgrps = 1,
1824}; 1984};
1825 1985
1826static const struct genl_family_and_ops dp_genl_families[] = { 1986static struct genl_family * const dp_genl_families[] = {
1827 { &dp_datapath_genl_family, 1987 &dp_datapath_genl_family,
1828 dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops), 1988 &dp_vport_genl_family,
1829 &ovs_dp_datapath_multicast_group }, 1989 &dp_flow_genl_family,
1830 { &dp_vport_genl_family, 1990 &dp_packet_genl_family,
1831 dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1832 &ovs_dp_vport_multicast_group },
1833 { &dp_flow_genl_family,
1834 dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1835 &ovs_dp_flow_multicast_group },
1836 { &dp_packet_genl_family,
1837 dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
1838 NULL },
1839}; 1991};
1840 1992
1841static void dp_unregister_genl(int n_families) 1993static void dp_unregister_genl(int n_families)
@@ -1843,33 +1995,25 @@ static void dp_unregister_genl(int n_families)
1843 int i; 1995 int i;
1844 1996
1845 for (i = 0; i < n_families; i++) 1997 for (i = 0; i < n_families; i++)
1846 genl_unregister_family(dp_genl_families[i].family); 1998 genl_unregister_family(dp_genl_families[i]);
1847} 1999}
1848 2000
1849static int dp_register_genl(void) 2001static int dp_register_genl(void)
1850{ 2002{
1851 int n_registered;
1852 int err; 2003 int err;
1853 int i; 2004 int i;
1854 2005
1855 n_registered = 0;
1856 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) { 2006 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
1857 const struct genl_family_and_ops *f = &dp_genl_families[i];
1858 2007
1859 f->family->ops = f->ops; 2008 err = genl_register_family(dp_genl_families[i]);
1860 f->family->n_ops = f->n_ops;
1861 f->family->mcgrps = f->group;
1862 f->family->n_mcgrps = f->group ? 1 : 0;
1863 err = genl_register_family(f->family);
1864 if (err) 2009 if (err)
1865 goto error; 2010 goto error;
1866 n_registered++;
1867 } 2011 }
1868 2012
1869 return 0; 2013 return 0;
1870 2014
1871error: 2015error:
1872 dp_unregister_genl(n_registered); 2016 dp_unregister_genl(i);
1873 return err; 2017 return err;
1874} 2018}
1875 2019
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 05317380fc03..7ede507500d7 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -194,7 +194,9 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
194int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb); 194int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
195void ovs_dp_notify_wq(struct work_struct *work); 195void ovs_dp_notify_wq(struct work_struct *work);
196 196
197#define OVS_NLERR(fmt, ...) \ 197#define OVS_NLERR(fmt, ...) \
198 pr_info_once("netlink: " fmt, ##__VA_ARGS__) 198do { \
199 199 if (net_ratelimit()) \
200 pr_info("netlink: " fmt, ##__VA_ARGS__); \
201} while (0)
200#endif /* datapath.h */ 202#endif /* datapath.h */
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 2998989e76db..334751cb1528 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -64,88 +64,110 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
64void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) 64void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
65{ 65{
66 struct flow_stats *stats; 66 struct flow_stats *stats;
67 __be16 tcp_flags = 0; 67 __be16 tcp_flags = flow->key.tp.flags;
68 68 int node = numa_node_id();
69 if (!flow->stats.is_percpu) 69
70 stats = flow->stats.stat; 70 stats = rcu_dereference(flow->stats[node]);
71 else 71
72 stats = this_cpu_ptr(flow->stats.cpu_stats); 72 /* Check if already have node-specific stats. */
73 73 if (likely(stats)) {
74 if ((flow->key.eth.type == htons(ETH_P_IP) || 74 spin_lock(&stats->lock);
75 flow->key.eth.type == htons(ETH_P_IPV6)) && 75 /* Mark if we write on the pre-allocated stats. */
76 flow->key.ip.frag != OVS_FRAG_TYPE_LATER && 76 if (node == 0 && unlikely(flow->stats_last_writer != node))
77 flow->key.ip.proto == IPPROTO_TCP && 77 flow->stats_last_writer = node;
78 likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { 78 } else {
79 tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); 79 stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
80 spin_lock(&stats->lock);
81
82 /* If the current NUMA-node is the only writer on the
83 * pre-allocated stats keep using them.
84 */
85 if (unlikely(flow->stats_last_writer != node)) {
86 /* A previous locker may have already allocated the
87 * stats, so we need to check again. If node-specific
88 * stats were already allocated, we update the pre-
89 * allocated stats as we have already locked them.
90 */
91 if (likely(flow->stats_last_writer != NUMA_NO_NODE)
92 && likely(!rcu_dereference(flow->stats[node]))) {
93 /* Try to allocate node-specific stats. */
94 struct flow_stats *new_stats;
95
96 new_stats =
97 kmem_cache_alloc_node(flow_stats_cache,
98 GFP_THISNODE |
99 __GFP_NOMEMALLOC,
100 node);
101 if (likely(new_stats)) {
102 new_stats->used = jiffies;
103 new_stats->packet_count = 1;
104 new_stats->byte_count = skb->len;
105 new_stats->tcp_flags = tcp_flags;
106 spin_lock_init(&new_stats->lock);
107
108 rcu_assign_pointer(flow->stats[node],
109 new_stats);
110 goto unlock;
111 }
112 }
113 flow->stats_last_writer = node;
114 }
80 } 115 }
81 116
82 spin_lock(&stats->lock);
83 stats->used = jiffies; 117 stats->used = jiffies;
84 stats->packet_count++; 118 stats->packet_count++;
85 stats->byte_count += skb->len; 119 stats->byte_count += skb->len;
86 stats->tcp_flags |= tcp_flags; 120 stats->tcp_flags |= tcp_flags;
121unlock:
87 spin_unlock(&stats->lock); 122 spin_unlock(&stats->lock);
88} 123}
89 124
90static void stats_read(struct flow_stats *stats, 125/* Must be called with rcu_read_lock or ovs_mutex. */
91 struct ovs_flow_stats *ovs_stats, 126void ovs_flow_stats_get(const struct sw_flow *flow,
92 unsigned long *used, __be16 *tcp_flags) 127 struct ovs_flow_stats *ovs_stats,
93{
94 spin_lock(&stats->lock);
95 if (!*used || time_after(stats->used, *used))
96 *used = stats->used;
97 *tcp_flags |= stats->tcp_flags;
98 ovs_stats->n_packets += stats->packet_count;
99 ovs_stats->n_bytes += stats->byte_count;
100 spin_unlock(&stats->lock);
101}
102
103void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
104 unsigned long *used, __be16 *tcp_flags) 128 unsigned long *used, __be16 *tcp_flags)
105{ 129{
106 int cpu; 130 int node;
107 131
108 *used = 0; 132 *used = 0;
109 *tcp_flags = 0; 133 *tcp_flags = 0;
110 memset(ovs_stats, 0, sizeof(*ovs_stats)); 134 memset(ovs_stats, 0, sizeof(*ovs_stats));
111 135
112 local_bh_disable(); 136 for_each_node(node) {
113 if (!flow->stats.is_percpu) { 137 struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[node]);
114 stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
115 } else {
116 for_each_possible_cpu(cpu) {
117 struct flow_stats *stats;
118 138
119 stats = per_cpu_ptr(flow->stats.cpu_stats, cpu); 139 if (stats) {
120 stats_read(stats, ovs_stats, used, tcp_flags); 140 /* Local CPU may write on non-local stats, so we must
141 * block bottom-halves here.
142 */
143 spin_lock_bh(&stats->lock);
144 if (!*used || time_after(stats->used, *used))
145 *used = stats->used;
146 *tcp_flags |= stats->tcp_flags;
147 ovs_stats->n_packets += stats->packet_count;
148 ovs_stats->n_bytes += stats->byte_count;
149 spin_unlock_bh(&stats->lock);
121 } 150 }
122 } 151 }
123 local_bh_enable();
124}
125
126static void stats_reset(struct flow_stats *stats)
127{
128 spin_lock(&stats->lock);
129 stats->used = 0;
130 stats->packet_count = 0;
131 stats->byte_count = 0;
132 stats->tcp_flags = 0;
133 spin_unlock(&stats->lock);
134} 152}
135 153
154/* Called with ovs_mutex. */
136void ovs_flow_stats_clear(struct sw_flow *flow) 155void ovs_flow_stats_clear(struct sw_flow *flow)
137{ 156{
138 int cpu; 157 int node;
139 158
140 local_bh_disable(); 159 for_each_node(node) {
141 if (!flow->stats.is_percpu) { 160 struct flow_stats *stats = ovsl_dereference(flow->stats[node]);
142 stats_reset(flow->stats.stat); 161
143 } else { 162 if (stats) {
144 for_each_possible_cpu(cpu) { 163 spin_lock_bh(&stats->lock);
145 stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu)); 164 stats->used = 0;
165 stats->packet_count = 0;
166 stats->byte_count = 0;
167 stats->tcp_flags = 0;
168 spin_unlock_bh(&stats->lock);
146 } 169 }
147 } 170 }
148 local_bh_enable();
149} 171}
150 172
151static int check_header(struct sk_buff *skb, int len) 173static int check_header(struct sk_buff *skb, int len)
@@ -332,8 +354,8 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
332 /* The ICMPv6 type and code fields use the 16-bit transport port 354 /* The ICMPv6 type and code fields use the 16-bit transport port
333 * fields, so we need to store them in 16-bit network byte order. 355 * fields, so we need to store them in 16-bit network byte order.
334 */ 356 */
335 key->ipv6.tp.src = htons(icmp->icmp6_type); 357 key->tp.src = htons(icmp->icmp6_type);
336 key->ipv6.tp.dst = htons(icmp->icmp6_code); 358 key->tp.dst = htons(icmp->icmp6_code);
337 359
338 if (icmp->icmp6_code == 0 && 360 if (icmp->icmp6_code == 0 &&
339 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || 361 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
@@ -372,14 +394,14 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
372 && opt_len == 8) { 394 && opt_len == 8) {
373 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll))) 395 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
374 goto invalid; 396 goto invalid;
375 memcpy(key->ipv6.nd.sll, 397 ether_addr_copy(key->ipv6.nd.sll,
376 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN); 398 &nd->opt[offset+sizeof(*nd_opt)]);
377 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR 399 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
378 && opt_len == 8) { 400 && opt_len == 8) {
379 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll))) 401 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
380 goto invalid; 402 goto invalid;
381 memcpy(key->ipv6.nd.tll, 403 ether_addr_copy(key->ipv6.nd.tll,
382 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN); 404 &nd->opt[offset+sizeof(*nd_opt)]);
383 } 405 }
384 406
385 icmp_len -= opt_len; 407 icmp_len -= opt_len;
@@ -439,8 +461,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
439 * header in the linear data area. 461 * header in the linear data area.
440 */ 462 */
441 eth = eth_hdr(skb); 463 eth = eth_hdr(skb);
442 memcpy(key->eth.src, eth->h_source, ETH_ALEN); 464 ether_addr_copy(key->eth.src, eth->h_source);
443 memcpy(key->eth.dst, eth->h_dest, ETH_ALEN); 465 ether_addr_copy(key->eth.dst, eth->h_dest);
444 466
445 __skb_pull(skb, 2 * ETH_ALEN); 467 __skb_pull(skb, 2 * ETH_ALEN);
446 /* We are going to push all headers that we pull, so no need to 468 /* We are going to push all headers that we pull, so no need to
@@ -495,21 +517,21 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
495 if (key->ip.proto == IPPROTO_TCP) { 517 if (key->ip.proto == IPPROTO_TCP) {
496 if (tcphdr_ok(skb)) { 518 if (tcphdr_ok(skb)) {
497 struct tcphdr *tcp = tcp_hdr(skb); 519 struct tcphdr *tcp = tcp_hdr(skb);
498 key->ipv4.tp.src = tcp->source; 520 key->tp.src = tcp->source;
499 key->ipv4.tp.dst = tcp->dest; 521 key->tp.dst = tcp->dest;
500 key->ipv4.tp.flags = TCP_FLAGS_BE16(tcp); 522 key->tp.flags = TCP_FLAGS_BE16(tcp);
501 } 523 }
502 } else if (key->ip.proto == IPPROTO_UDP) { 524 } else if (key->ip.proto == IPPROTO_UDP) {
503 if (udphdr_ok(skb)) { 525 if (udphdr_ok(skb)) {
504 struct udphdr *udp = udp_hdr(skb); 526 struct udphdr *udp = udp_hdr(skb);
505 key->ipv4.tp.src = udp->source; 527 key->tp.src = udp->source;
506 key->ipv4.tp.dst = udp->dest; 528 key->tp.dst = udp->dest;
507 } 529 }
508 } else if (key->ip.proto == IPPROTO_SCTP) { 530 } else if (key->ip.proto == IPPROTO_SCTP) {
509 if (sctphdr_ok(skb)) { 531 if (sctphdr_ok(skb)) {
510 struct sctphdr *sctp = sctp_hdr(skb); 532 struct sctphdr *sctp = sctp_hdr(skb);
511 key->ipv4.tp.src = sctp->source; 533 key->tp.src = sctp->source;
512 key->ipv4.tp.dst = sctp->dest; 534 key->tp.dst = sctp->dest;
513 } 535 }
514 } else if (key->ip.proto == IPPROTO_ICMP) { 536 } else if (key->ip.proto == IPPROTO_ICMP) {
515 if (icmphdr_ok(skb)) { 537 if (icmphdr_ok(skb)) {
@@ -517,8 +539,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
517 /* The ICMP type and code fields use the 16-bit 539 /* The ICMP type and code fields use the 16-bit
518 * transport port fields, so we need to store 540 * transport port fields, so we need to store
519 * them in 16-bit network byte order. */ 541 * them in 16-bit network byte order. */
520 key->ipv4.tp.src = htons(icmp->type); 542 key->tp.src = htons(icmp->type);
521 key->ipv4.tp.dst = htons(icmp->code); 543 key->tp.dst = htons(icmp->code);
522 } 544 }
523 } 545 }
524 546
@@ -538,8 +560,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
538 key->ip.proto = ntohs(arp->ar_op); 560 key->ip.proto = ntohs(arp->ar_op);
539 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); 561 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
540 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); 562 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
541 memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); 563 ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha);
542 memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); 564 ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha);
543 } 565 }
544 } else if (key->eth.type == htons(ETH_P_IPV6)) { 566 } else if (key->eth.type == htons(ETH_P_IPV6)) {
545 int nh_len; /* IPv6 Header + Extensions */ 567 int nh_len; /* IPv6 Header + Extensions */
@@ -564,21 +586,21 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
564 if (key->ip.proto == NEXTHDR_TCP) { 586 if (key->ip.proto == NEXTHDR_TCP) {
565 if (tcphdr_ok(skb)) { 587 if (tcphdr_ok(skb)) {
566 struct tcphdr *tcp = tcp_hdr(skb); 588 struct tcphdr *tcp = tcp_hdr(skb);
567 key->ipv6.tp.src = tcp->source; 589 key->tp.src = tcp->source;
568 key->ipv6.tp.dst = tcp->dest; 590 key->tp.dst = tcp->dest;
569 key->ipv6.tp.flags = TCP_FLAGS_BE16(tcp); 591 key->tp.flags = TCP_FLAGS_BE16(tcp);
570 } 592 }
571 } else if (key->ip.proto == NEXTHDR_UDP) { 593 } else if (key->ip.proto == NEXTHDR_UDP) {
572 if (udphdr_ok(skb)) { 594 if (udphdr_ok(skb)) {
573 struct udphdr *udp = udp_hdr(skb); 595 struct udphdr *udp = udp_hdr(skb);
574 key->ipv6.tp.src = udp->source; 596 key->tp.src = udp->source;
575 key->ipv6.tp.dst = udp->dest; 597 key->tp.dst = udp->dest;
576 } 598 }
577 } else if (key->ip.proto == NEXTHDR_SCTP) { 599 } else if (key->ip.proto == NEXTHDR_SCTP) {
578 if (sctphdr_ok(skb)) { 600 if (sctphdr_ok(skb)) {
579 struct sctphdr *sctp = sctp_hdr(skb); 601 struct sctphdr *sctp = sctp_hdr(skb);
580 key->ipv6.tp.src = sctp->source; 602 key->tp.src = sctp->source;
581 key->ipv6.tp.dst = sctp->dest; 603 key->tp.dst = sctp->dest;
582 } 604 }
583 } else if (key->ip.proto == NEXTHDR_ICMP) { 605 } else if (key->ip.proto == NEXTHDR_ICMP) {
584 if (icmp6hdr_ok(skb)) { 606 if (icmp6hdr_ok(skb)) {
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 2d770e28a3a3..ac395d2cd821 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -47,7 +47,7 @@ struct ovs_key_ipv4_tunnel {
47 __be16 tun_flags; 47 __be16 tun_flags;
48 u8 ipv4_tos; 48 u8 ipv4_tos;
49 u8 ipv4_ttl; 49 u8 ipv4_ttl;
50}; 50} __packed __aligned(4); /* Minimize padding. */
51 51
52static inline void ovs_flow_tun_key_init(struct ovs_key_ipv4_tunnel *tun_key, 52static inline void ovs_flow_tun_key_init(struct ovs_key_ipv4_tunnel *tun_key,
53 const struct iphdr *iph, __be64 tun_id, 53 const struct iphdr *iph, __be64 tun_id,
@@ -71,7 +71,7 @@ struct sw_flow_key {
71 u32 priority; /* Packet QoS priority. */ 71 u32 priority; /* Packet QoS priority. */
72 u32 skb_mark; /* SKB mark. */ 72 u32 skb_mark; /* SKB mark. */
73 u16 in_port; /* Input switch port (or DP_MAX_PORTS). */ 73 u16 in_port; /* Input switch port (or DP_MAX_PORTS). */
74 } phy; 74 } __packed phy; /* Safe when right after 'tun_key'. */
75 struct { 75 struct {
76 u8 src[ETH_ALEN]; /* Ethernet source address. */ 76 u8 src[ETH_ALEN]; /* Ethernet source address. */
77 u8 dst[ETH_ALEN]; /* Ethernet destination address. */ 77 u8 dst[ETH_ALEN]; /* Ethernet destination address. */
@@ -84,23 +84,21 @@ struct sw_flow_key {
84 u8 ttl; /* IP TTL/hop limit. */ 84 u8 ttl; /* IP TTL/hop limit. */
85 u8 frag; /* One of OVS_FRAG_TYPE_*. */ 85 u8 frag; /* One of OVS_FRAG_TYPE_*. */
86 } ip; 86 } ip;
87 struct {
88 __be16 src; /* TCP/UDP/SCTP source port. */
89 __be16 dst; /* TCP/UDP/SCTP destination port. */
90 __be16 flags; /* TCP flags. */
91 } tp;
87 union { 92 union {
88 struct { 93 struct {
89 struct { 94 struct {
90 __be32 src; /* IP source address. */ 95 __be32 src; /* IP source address. */
91 __be32 dst; /* IP destination address. */ 96 __be32 dst; /* IP destination address. */
92 } addr; 97 } addr;
93 union { 98 struct {
94 struct { 99 u8 sha[ETH_ALEN]; /* ARP source hardware address. */
95 __be16 src; /* TCP/UDP/SCTP source port. */ 100 u8 tha[ETH_ALEN]; /* ARP target hardware address. */
96 __be16 dst; /* TCP/UDP/SCTP destination port. */ 101 } arp;
97 __be16 flags; /* TCP flags. */
98 } tp;
99 struct {
100 u8 sha[ETH_ALEN]; /* ARP source hardware address. */
101 u8 tha[ETH_ALEN]; /* ARP target hardware address. */
102 } arp;
103 };
104 } ipv4; 102 } ipv4;
105 struct { 103 struct {
106 struct { 104 struct {
@@ -109,11 +107,6 @@ struct sw_flow_key {
109 } addr; 107 } addr;
110 __be32 label; /* IPv6 flow label. */ 108 __be32 label; /* IPv6 flow label. */
111 struct { 109 struct {
112 __be16 src; /* TCP/UDP/SCTP source port. */
113 __be16 dst; /* TCP/UDP/SCTP destination port. */
114 __be16 flags; /* TCP flags. */
115 } tp;
116 struct {
117 struct in6_addr target; /* ND target address. */ 110 struct in6_addr target; /* ND target address. */
118 u8 sll[ETH_ALEN]; /* ND source link layer address. */ 111 u8 sll[ETH_ALEN]; /* ND source link layer address. */
119 u8 tll[ETH_ALEN]; /* ND target link layer address. */ 112 u8 tll[ETH_ALEN]; /* ND target link layer address. */
@@ -155,24 +148,22 @@ struct flow_stats {
155 __be16 tcp_flags; /* Union of seen TCP flags. */ 148 __be16 tcp_flags; /* Union of seen TCP flags. */
156}; 149};
157 150
158struct sw_flow_stats {
159 bool is_percpu;
160 union {
161 struct flow_stats *stat;
162 struct flow_stats __percpu *cpu_stats;
163 };
164};
165
166struct sw_flow { 151struct sw_flow {
167 struct rcu_head rcu; 152 struct rcu_head rcu;
168 struct hlist_node hash_node[2]; 153 struct hlist_node hash_node[2];
169 u32 hash; 154 u32 hash;
170 155 int stats_last_writer; /* NUMA-node id of the last writer on
156 * 'stats[0]'.
157 */
171 struct sw_flow_key key; 158 struct sw_flow_key key;
172 struct sw_flow_key unmasked_key; 159 struct sw_flow_key unmasked_key;
173 struct sw_flow_mask *mask; 160 struct sw_flow_mask *mask;
174 struct sw_flow_actions __rcu *sf_acts; 161 struct sw_flow_actions __rcu *sf_acts;
175 struct sw_flow_stats stats; 162 struct flow_stats __rcu *stats[]; /* One for each NUMA node. First one
163 * is allocated at flow creation time,
164 * the rest are allocated on demand
165 * while holding the 'stats[0].lock'.
166 */
176}; 167};
177 168
178struct arp_eth_header { 169struct arp_eth_header {
@@ -189,10 +180,10 @@ struct arp_eth_header {
189 unsigned char ar_tip[4]; /* target IP address */ 180 unsigned char ar_tip[4]; /* target IP address */
190} __packed; 181} __packed;
191 182
192void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb); 183void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *);
193void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *stats, 184void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
194 unsigned long *used, __be16 *tcp_flags); 185 unsigned long *used, __be16 *tcp_flags);
195void ovs_flow_stats_clear(struct sw_flow *flow); 186void ovs_flow_stats_clear(struct sw_flow *);
196u64 ovs_flow_used_time(unsigned long flow_jiffies); 187u64 ovs_flow_used_time(unsigned long flow_jiffies);
197 188
198int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *); 189int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 4d000acaed0d..d757848da89c 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -16,6 +16,8 @@
16 * 02110-1301, USA 16 * 02110-1301, USA
17 */ 17 */
18 18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
19#include "flow.h" 21#include "flow.h"
20#include "datapath.h" 22#include "datapath.h"
21#include <linux/uaccess.h> 23#include <linux/uaccess.h>
@@ -202,11 +204,11 @@ static bool match_validate(const struct sw_flow_match *match,
202 if (match->mask && (match->mask->key.ip.proto == 0xff)) 204 if (match->mask && (match->mask->key.ip.proto == 0xff))
203 mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6; 205 mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
204 206
205 if (match->key->ipv6.tp.src == 207 if (match->key->tp.src ==
206 htons(NDISC_NEIGHBOUR_SOLICITATION) || 208 htons(NDISC_NEIGHBOUR_SOLICITATION) ||
207 match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { 209 match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
208 key_expected |= 1 << OVS_KEY_ATTR_ND; 210 key_expected |= 1 << OVS_KEY_ATTR_ND;
209 if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff))) 211 if (match->mask && (match->mask->key.tp.src == htons(0xffff)))
210 mask_allowed |= 1 << OVS_KEY_ATTR_ND; 212 mask_allowed |= 1 << OVS_KEY_ATTR_ND;
211 } 213 }
212 } 214 }
@@ -216,14 +218,14 @@ static bool match_validate(const struct sw_flow_match *match,
216 if ((key_attrs & key_expected) != key_expected) { 218 if ((key_attrs & key_expected) != key_expected) {
217 /* Key attributes check failed. */ 219 /* Key attributes check failed. */
218 OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n", 220 OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
219 key_attrs, key_expected); 221 (unsigned long long)key_attrs, (unsigned long long)key_expected);
220 return false; 222 return false;
221 } 223 }
222 224
223 if ((mask_attrs & mask_allowed) != mask_attrs) { 225 if ((mask_attrs & mask_allowed) != mask_attrs) {
224 /* Mask attributes check failed. */ 226 /* Mask attributes check failed. */
225 OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n", 227 OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
226 mask_attrs, mask_allowed); 228 (unsigned long long)mask_attrs, (unsigned long long)mask_allowed);
227 return false; 229 return false;
228 } 230 }
229 231
@@ -266,20 +268,6 @@ static bool is_all_zero(const u8 *fp, size_t size)
266 return true; 268 return true;
267} 269}
268 270
269static bool is_all_set(const u8 *fp, size_t size)
270{
271 int i;
272
273 if (!fp)
274 return false;
275
276 for (i = 0; i < size; i++)
277 if (fp[i] != 0xff)
278 return false;
279
280 return true;
281}
282
283static int __parse_flow_nlattrs(const struct nlattr *attr, 271static int __parse_flow_nlattrs(const struct nlattr *attr,
284 const struct nlattr *a[], 272 const struct nlattr *a[],
285 u64 *attrsp, bool nz) 273 u64 *attrsp, bool nz)
@@ -501,9 +489,8 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
501 return 0; 489 return 0;
502} 490}
503 491
504static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple, 492static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
505 u64 attrs, const struct nlattr **a, 493 const struct nlattr **a, bool is_mask)
506 bool is_mask)
507{ 494{
508 int err; 495 int err;
509 u64 orig_attrs = attrs; 496 u64 orig_attrs = attrs;
@@ -560,11 +547,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
560 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask); 547 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
561 } 548 }
562 549
563 if (is_mask && exact_5tuple) {
564 if (match->mask->key.eth.type != htons(0xffff))
565 *exact_5tuple = false;
566 }
567
568 if (attrs & (1 << OVS_KEY_ATTR_IPV4)) { 550 if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
569 const struct ovs_key_ipv4 *ipv4_key; 551 const struct ovs_key_ipv4 *ipv4_key;
570 552
@@ -587,13 +569,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
587 SW_FLOW_KEY_PUT(match, ipv4.addr.dst, 569 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
588 ipv4_key->ipv4_dst, is_mask); 570 ipv4_key->ipv4_dst, is_mask);
589 attrs &= ~(1 << OVS_KEY_ATTR_IPV4); 571 attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
590
591 if (is_mask && exact_5tuple && *exact_5tuple) {
592 if (ipv4_key->ipv4_proto != 0xff ||
593 ipv4_key->ipv4_src != htonl(0xffffffff) ||
594 ipv4_key->ipv4_dst != htonl(0xffffffff))
595 *exact_5tuple = false;
596 }
597 } 572 }
598 573
599 if (attrs & (1 << OVS_KEY_ATTR_IPV6)) { 574 if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
@@ -625,13 +600,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
625 is_mask); 600 is_mask);
626 601
627 attrs &= ~(1 << OVS_KEY_ATTR_IPV6); 602 attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
628
629 if (is_mask && exact_5tuple && *exact_5tuple) {
630 if (ipv6_key->ipv6_proto != 0xff ||
631 !is_all_set((u8 *)ipv6_key->ipv6_src, sizeof(match->key->ipv6.addr.src)) ||
632 !is_all_set((u8 *)ipv6_key->ipv6_dst, sizeof(match->key->ipv6.addr.dst)))
633 *exact_5tuple = false;
634 }
635 } 603 }
636 604
637 if (attrs & (1 << OVS_KEY_ATTR_ARP)) { 605 if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
@@ -662,32 +630,18 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
662 const struct ovs_key_tcp *tcp_key; 630 const struct ovs_key_tcp *tcp_key;
663 631
664 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); 632 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
665 if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { 633 SW_FLOW_KEY_PUT(match, tp.src, tcp_key->tcp_src, is_mask);
666 SW_FLOW_KEY_PUT(match, ipv4.tp.src, 634 SW_FLOW_KEY_PUT(match, tp.dst, tcp_key->tcp_dst, is_mask);
667 tcp_key->tcp_src, is_mask);
668 SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
669 tcp_key->tcp_dst, is_mask);
670 } else {
671 SW_FLOW_KEY_PUT(match, ipv6.tp.src,
672 tcp_key->tcp_src, is_mask);
673 SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
674 tcp_key->tcp_dst, is_mask);
675 }
676 attrs &= ~(1 << OVS_KEY_ATTR_TCP); 635 attrs &= ~(1 << OVS_KEY_ATTR_TCP);
677
678 if (is_mask && exact_5tuple && *exact_5tuple &&
679 (tcp_key->tcp_src != htons(0xffff) ||
680 tcp_key->tcp_dst != htons(0xffff)))
681 *exact_5tuple = false;
682 } 636 }
683 637
684 if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) { 638 if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
685 if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { 639 if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
686 SW_FLOW_KEY_PUT(match, ipv4.tp.flags, 640 SW_FLOW_KEY_PUT(match, tp.flags,
687 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), 641 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
688 is_mask); 642 is_mask);
689 } else { 643 } else {
690 SW_FLOW_KEY_PUT(match, ipv6.tp.flags, 644 SW_FLOW_KEY_PUT(match, tp.flags,
691 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), 645 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
692 is_mask); 646 is_mask);
693 } 647 }
@@ -698,40 +652,17 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
698 const struct ovs_key_udp *udp_key; 652 const struct ovs_key_udp *udp_key;
699 653
700 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); 654 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
701 if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { 655 SW_FLOW_KEY_PUT(match, tp.src, udp_key->udp_src, is_mask);
702 SW_FLOW_KEY_PUT(match, ipv4.tp.src, 656 SW_FLOW_KEY_PUT(match, tp.dst, udp_key->udp_dst, is_mask);
703 udp_key->udp_src, is_mask);
704 SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
705 udp_key->udp_dst, is_mask);
706 } else {
707 SW_FLOW_KEY_PUT(match, ipv6.tp.src,
708 udp_key->udp_src, is_mask);
709 SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
710 udp_key->udp_dst, is_mask);
711 }
712 attrs &= ~(1 << OVS_KEY_ATTR_UDP); 657 attrs &= ~(1 << OVS_KEY_ATTR_UDP);
713
714 if (is_mask && exact_5tuple && *exact_5tuple &&
715 (udp_key->udp_src != htons(0xffff) ||
716 udp_key->udp_dst != htons(0xffff)))
717 *exact_5tuple = false;
718 } 658 }
719 659
720 if (attrs & (1 << OVS_KEY_ATTR_SCTP)) { 660 if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
721 const struct ovs_key_sctp *sctp_key; 661 const struct ovs_key_sctp *sctp_key;
722 662
723 sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]); 663 sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
724 if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { 664 SW_FLOW_KEY_PUT(match, tp.src, sctp_key->sctp_src, is_mask);
725 SW_FLOW_KEY_PUT(match, ipv4.tp.src, 665 SW_FLOW_KEY_PUT(match, tp.dst, sctp_key->sctp_dst, is_mask);
726 sctp_key->sctp_src, is_mask);
727 SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
728 sctp_key->sctp_dst, is_mask);
729 } else {
730 SW_FLOW_KEY_PUT(match, ipv6.tp.src,
731 sctp_key->sctp_src, is_mask);
732 SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
733 sctp_key->sctp_dst, is_mask);
734 }
735 attrs &= ~(1 << OVS_KEY_ATTR_SCTP); 666 attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
736 } 667 }
737 668
@@ -739,9 +670,9 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
739 const struct ovs_key_icmp *icmp_key; 670 const struct ovs_key_icmp *icmp_key;
740 671
741 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); 672 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
742 SW_FLOW_KEY_PUT(match, ipv4.tp.src, 673 SW_FLOW_KEY_PUT(match, tp.src,
743 htons(icmp_key->icmp_type), is_mask); 674 htons(icmp_key->icmp_type), is_mask);
744 SW_FLOW_KEY_PUT(match, ipv4.tp.dst, 675 SW_FLOW_KEY_PUT(match, tp.dst,
745 htons(icmp_key->icmp_code), is_mask); 676 htons(icmp_key->icmp_code), is_mask);
746 attrs &= ~(1 << OVS_KEY_ATTR_ICMP); 677 attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
747 } 678 }
@@ -750,9 +681,9 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
750 const struct ovs_key_icmpv6 *icmpv6_key; 681 const struct ovs_key_icmpv6 *icmpv6_key;
751 682
752 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); 683 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
753 SW_FLOW_KEY_PUT(match, ipv6.tp.src, 684 SW_FLOW_KEY_PUT(match, tp.src,
754 htons(icmpv6_key->icmpv6_type), is_mask); 685 htons(icmpv6_key->icmpv6_type), is_mask);
755 SW_FLOW_KEY_PUT(match, ipv6.tp.dst, 686 SW_FLOW_KEY_PUT(match, tp.dst,
756 htons(icmpv6_key->icmpv6_code), is_mask); 687 htons(icmpv6_key->icmpv6_code), is_mask);
757 attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); 688 attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
758 } 689 }
@@ -800,7 +731,6 @@ static void sw_flow_mask_set(struct sw_flow_mask *mask,
800 * attribute specifies the mask field of the wildcarded flow. 731 * attribute specifies the mask field of the wildcarded flow.
801 */ 732 */
802int ovs_nla_get_match(struct sw_flow_match *match, 733int ovs_nla_get_match(struct sw_flow_match *match,
803 bool *exact_5tuple,
804 const struct nlattr *key, 734 const struct nlattr *key,
805 const struct nlattr *mask) 735 const struct nlattr *mask)
806{ 736{
@@ -848,13 +778,10 @@ int ovs_nla_get_match(struct sw_flow_match *match,
848 } 778 }
849 } 779 }
850 780
851 err = ovs_key_from_nlattrs(match, NULL, key_attrs, a, false); 781 err = ovs_key_from_nlattrs(match, key_attrs, a, false);
852 if (err) 782 if (err)
853 return err; 783 return err;
854 784
855 if (exact_5tuple)
856 *exact_5tuple = true;
857
858 if (mask) { 785 if (mask) {
859 err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); 786 err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
860 if (err) 787 if (err)
@@ -892,7 +819,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
892 } 819 }
893 } 820 }
894 821
895 err = ovs_key_from_nlattrs(match, exact_5tuple, mask_attrs, a, true); 822 err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
896 if (err) 823 if (err)
897 return err; 824 return err;
898 } else { 825 } else {
@@ -982,8 +909,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
982 goto nla_put_failure; 909 goto nla_put_failure;
983 910
984 eth_key = nla_data(nla); 911 eth_key = nla_data(nla);
985 memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN); 912 ether_addr_copy(eth_key->eth_src, output->eth.src);
986 memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN); 913 ether_addr_copy(eth_key->eth_dst, output->eth.dst);
987 914
988 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { 915 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
989 __be16 eth_type; 916 __be16 eth_type;
@@ -1055,8 +982,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1055 arp_key->arp_sip = output->ipv4.addr.src; 982 arp_key->arp_sip = output->ipv4.addr.src;
1056 arp_key->arp_tip = output->ipv4.addr.dst; 983 arp_key->arp_tip = output->ipv4.addr.dst;
1057 arp_key->arp_op = htons(output->ip.proto); 984 arp_key->arp_op = htons(output->ip.proto);
1058 memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN); 985 ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
1059 memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN); 986 ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
1060 } 987 }
1061 988
1062 if ((swkey->eth.type == htons(ETH_P_IP) || 989 if ((swkey->eth.type == htons(ETH_P_IP) ||
@@ -1070,19 +997,11 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1070 if (!nla) 997 if (!nla)
1071 goto nla_put_failure; 998 goto nla_put_failure;
1072 tcp_key = nla_data(nla); 999 tcp_key = nla_data(nla);
1073 if (swkey->eth.type == htons(ETH_P_IP)) { 1000 tcp_key->tcp_src = output->tp.src;
1074 tcp_key->tcp_src = output->ipv4.tp.src; 1001 tcp_key->tcp_dst = output->tp.dst;
1075 tcp_key->tcp_dst = output->ipv4.tp.dst; 1002 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
1076 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS, 1003 output->tp.flags))
1077 output->ipv4.tp.flags)) 1004 goto nla_put_failure;
1078 goto nla_put_failure;
1079 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1080 tcp_key->tcp_src = output->ipv6.tp.src;
1081 tcp_key->tcp_dst = output->ipv6.tp.dst;
1082 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
1083 output->ipv6.tp.flags))
1084 goto nla_put_failure;
1085 }
1086 } else if (swkey->ip.proto == IPPROTO_UDP) { 1005 } else if (swkey->ip.proto == IPPROTO_UDP) {
1087 struct ovs_key_udp *udp_key; 1006 struct ovs_key_udp *udp_key;
1088 1007
@@ -1090,13 +1009,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1090 if (!nla) 1009 if (!nla)
1091 goto nla_put_failure; 1010 goto nla_put_failure;
1092 udp_key = nla_data(nla); 1011 udp_key = nla_data(nla);
1093 if (swkey->eth.type == htons(ETH_P_IP)) { 1012 udp_key->udp_src = output->tp.src;
1094 udp_key->udp_src = output->ipv4.tp.src; 1013 udp_key->udp_dst = output->tp.dst;
1095 udp_key->udp_dst = output->ipv4.tp.dst;
1096 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1097 udp_key->udp_src = output->ipv6.tp.src;
1098 udp_key->udp_dst = output->ipv6.tp.dst;
1099 }
1100 } else if (swkey->ip.proto == IPPROTO_SCTP) { 1014 } else if (swkey->ip.proto == IPPROTO_SCTP) {
1101 struct ovs_key_sctp *sctp_key; 1015 struct ovs_key_sctp *sctp_key;
1102 1016
@@ -1104,13 +1018,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1104 if (!nla) 1018 if (!nla)
1105 goto nla_put_failure; 1019 goto nla_put_failure;
1106 sctp_key = nla_data(nla); 1020 sctp_key = nla_data(nla);
1107 if (swkey->eth.type == htons(ETH_P_IP)) { 1021 sctp_key->sctp_src = output->tp.src;
1108 sctp_key->sctp_src = swkey->ipv4.tp.src; 1022 sctp_key->sctp_dst = output->tp.dst;
1109 sctp_key->sctp_dst = swkey->ipv4.tp.dst;
1110 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1111 sctp_key->sctp_src = swkey->ipv6.tp.src;
1112 sctp_key->sctp_dst = swkey->ipv6.tp.dst;
1113 }
1114 } else if (swkey->eth.type == htons(ETH_P_IP) && 1023 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1115 swkey->ip.proto == IPPROTO_ICMP) { 1024 swkey->ip.proto == IPPROTO_ICMP) {
1116 struct ovs_key_icmp *icmp_key; 1025 struct ovs_key_icmp *icmp_key;
@@ -1119,8 +1028,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1119 if (!nla) 1028 if (!nla)
1120 goto nla_put_failure; 1029 goto nla_put_failure;
1121 icmp_key = nla_data(nla); 1030 icmp_key = nla_data(nla);
1122 icmp_key->icmp_type = ntohs(output->ipv4.tp.src); 1031 icmp_key->icmp_type = ntohs(output->tp.src);
1123 icmp_key->icmp_code = ntohs(output->ipv4.tp.dst); 1032 icmp_key->icmp_code = ntohs(output->tp.dst);
1124 } else if (swkey->eth.type == htons(ETH_P_IPV6) && 1033 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1125 swkey->ip.proto == IPPROTO_ICMPV6) { 1034 swkey->ip.proto == IPPROTO_ICMPV6) {
1126 struct ovs_key_icmpv6 *icmpv6_key; 1035 struct ovs_key_icmpv6 *icmpv6_key;
@@ -1130,8 +1039,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1130 if (!nla) 1039 if (!nla)
1131 goto nla_put_failure; 1040 goto nla_put_failure;
1132 icmpv6_key = nla_data(nla); 1041 icmpv6_key = nla_data(nla);
1133 icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src); 1042 icmpv6_key->icmpv6_type = ntohs(output->tp.src);
1134 icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst); 1043 icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
1135 1044
1136 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || 1045 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1137 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { 1046 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
@@ -1143,8 +1052,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1143 nd_key = nla_data(nla); 1052 nd_key = nla_data(nla);
1144 memcpy(nd_key->nd_target, &output->ipv6.nd.target, 1053 memcpy(nd_key->nd_target, &output->ipv6.nd.target,
1145 sizeof(nd_key->nd_target)); 1054 sizeof(nd_key->nd_target));
1146 memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN); 1055 ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
1147 memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN); 1056 ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
1148 } 1057 }
1149 } 1058 }
1150 } 1059 }
@@ -1309,13 +1218,10 @@ static int validate_and_copy_sample(const struct nlattr *attr,
1309 1218
1310static int validate_tp_port(const struct sw_flow_key *flow_key) 1219static int validate_tp_port(const struct sw_flow_key *flow_key)
1311{ 1220{
1312 if (flow_key->eth.type == htons(ETH_P_IP)) { 1221 if ((flow_key->eth.type == htons(ETH_P_IP) ||
1313 if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst) 1222 flow_key->eth.type == htons(ETH_P_IPV6)) &&
1314 return 0; 1223 (flow_key->tp.src || flow_key->tp.dst))
1315 } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { 1224 return 0;
1316 if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
1317 return 0;
1318 }
1319 1225
1320 return -EINVAL; 1226 return -EINVAL;
1321} 1227}
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index b31fbe28bc7a..440151045d39 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -45,7 +45,6 @@ int ovs_nla_put_flow(const struct sw_flow_key *,
45int ovs_nla_get_flow_metadata(struct sw_flow *flow, 45int ovs_nla_get_flow_metadata(struct sw_flow *flow,
46 const struct nlattr *attr); 46 const struct nlattr *attr);
47int ovs_nla_get_match(struct sw_flow_match *match, 47int ovs_nla_get_match(struct sw_flow_match *match,
48 bool *exact_5tuple,
49 const struct nlattr *, 48 const struct nlattr *,
50 const struct nlattr *); 49 const struct nlattr *);
51 50
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 3c268b3d71c3..574c3abc9b30 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -48,6 +48,7 @@
48#define REHASH_INTERVAL (10 * 60 * HZ) 48#define REHASH_INTERVAL (10 * 60 * HZ)
49 49
50static struct kmem_cache *flow_cache; 50static struct kmem_cache *flow_cache;
51struct kmem_cache *flow_stats_cache __read_mostly;
51 52
52static u16 range_n_bytes(const struct sw_flow_key_range *range) 53static u16 range_n_bytes(const struct sw_flow_key_range *range)
53{ 54{
@@ -57,8 +58,10 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range)
57void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, 58void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
58 const struct sw_flow_mask *mask) 59 const struct sw_flow_mask *mask)
59{ 60{
60 const long *m = (long *)((u8 *)&mask->key + mask->range.start); 61 const long *m = (const long *)((const u8 *)&mask->key +
61 const long *s = (long *)((u8 *)src + mask->range.start); 62 mask->range.start);
63 const long *s = (const long *)((const u8 *)src +
64 mask->range.start);
62 long *d = (long *)((u8 *)dst + mask->range.start); 65 long *d = (long *)((u8 *)dst + mask->range.start);
63 int i; 66 int i;
64 67
@@ -70,10 +73,11 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
70 *d++ = *s++ & *m++; 73 *d++ = *s++ & *m++;
71} 74}
72 75
73struct sw_flow *ovs_flow_alloc(bool percpu_stats) 76struct sw_flow *ovs_flow_alloc(void)
74{ 77{
75 struct sw_flow *flow; 78 struct sw_flow *flow;
76 int cpu; 79 struct flow_stats *stats;
80 int node;
77 81
78 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); 82 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
79 if (!flow) 83 if (!flow)
@@ -81,27 +85,22 @@ struct sw_flow *ovs_flow_alloc(bool percpu_stats)
81 85
82 flow->sf_acts = NULL; 86 flow->sf_acts = NULL;
83 flow->mask = NULL; 87 flow->mask = NULL;
88 flow->stats_last_writer = NUMA_NO_NODE;
84 89
85 flow->stats.is_percpu = percpu_stats; 90 /* Initialize the default stat node. */
91 stats = kmem_cache_alloc_node(flow_stats_cache,
92 GFP_KERNEL | __GFP_ZERO, 0);
93 if (!stats)
94 goto err;
86 95
87 if (!percpu_stats) { 96 spin_lock_init(&stats->lock);
88 flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
89 if (!flow->stats.stat)
90 goto err;
91 97
92 spin_lock_init(&flow->stats.stat->lock); 98 RCU_INIT_POINTER(flow->stats[0], stats);
93 } else {
94 flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
95 if (!flow->stats.cpu_stats)
96 goto err;
97 99
98 for_each_possible_cpu(cpu) { 100 for_each_node(node)
99 struct flow_stats *cpu_stats; 101 if (node != 0)
102 RCU_INIT_POINTER(flow->stats[node], NULL);
100 103
101 cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
102 spin_lock_init(&cpu_stats->lock);
103 }
104 }
105 return flow; 104 return flow;
106err: 105err:
107 kmem_cache_free(flow_cache, flow); 106 kmem_cache_free(flow_cache, flow);
@@ -138,11 +137,13 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
138 137
139static void flow_free(struct sw_flow *flow) 138static void flow_free(struct sw_flow *flow)
140{ 139{
141 kfree((struct sf_flow_acts __force *)flow->sf_acts); 140 int node;
142 if (flow->stats.is_percpu) 141
143 free_percpu(flow->stats.cpu_stats); 142 kfree((struct sw_flow_actions __force *)flow->sf_acts);
144 else 143 for_each_node(node)
145 kfree(flow->stats.stat); 144 if (flow->stats[node])
145 kmem_cache_free(flow_stats_cache,
146 (struct flow_stats __force *)flow->stats[node]);
146 kmem_cache_free(flow_cache, flow); 147 kmem_cache_free(flow_cache, flow);
147} 148}
148 149
@@ -158,25 +159,6 @@ void ovs_flow_free(struct sw_flow *flow, bool deferred)
158 if (!flow) 159 if (!flow)
159 return; 160 return;
160 161
161 if (flow->mask) {
162 struct sw_flow_mask *mask = flow->mask;
163
164 /* ovs-lock is required to protect mask-refcount and
165 * mask list.
166 */
167 ASSERT_OVSL();
168 BUG_ON(!mask->ref_count);
169 mask->ref_count--;
170
171 if (!mask->ref_count) {
172 list_del_rcu(&mask->list);
173 if (deferred)
174 kfree_rcu(mask, rcu);
175 else
176 kfree(mask);
177 }
178 }
179
180 if (deferred) 162 if (deferred)
181 call_rcu(&flow->rcu, rcu_free_flow_callback); 163 call_rcu(&flow->rcu, rcu_free_flow_callback);
182 else 164 else
@@ -375,7 +357,7 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table)
375static u32 flow_hash(const struct sw_flow_key *key, int key_start, 357static u32 flow_hash(const struct sw_flow_key *key, int key_start,
376 int key_end) 358 int key_end)
377{ 359{
378 u32 *hash_key = (u32 *)((u8 *)key + key_start); 360 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
379 int hash_u32s = (key_end - key_start) >> 2; 361 int hash_u32s = (key_end - key_start) >> 2;
380 362
381 /* Make sure number of hash bytes are multiple of u32. */ 363 /* Make sure number of hash bytes are multiple of u32. */
@@ -397,8 +379,8 @@ static bool cmp_key(const struct sw_flow_key *key1,
397 const struct sw_flow_key *key2, 379 const struct sw_flow_key *key2,
398 int key_start, int key_end) 380 int key_start, int key_end)
399{ 381{
400 const long *cp1 = (long *)((u8 *)key1 + key_start); 382 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
401 const long *cp2 = (long *)((u8 *)key2 + key_start); 383 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
402 long diffs = 0; 384 long diffs = 0;
403 int i; 385 int i;
404 386
@@ -490,6 +472,25 @@ static struct table_instance *table_instance_expand(struct table_instance *ti)
490 return table_instance_rehash(ti, ti->n_buckets * 2); 472 return table_instance_rehash(ti, ti->n_buckets * 2);
491} 473}
492 474
475/* Remove 'mask' from the mask list, if it is not needed any more. */
476static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
477{
478 if (mask) {
479 /* ovs-lock is required to protect mask-refcount and
480 * mask list.
481 */
482 ASSERT_OVSL();
483 BUG_ON(!mask->ref_count);
484 mask->ref_count--;
485
486 if (!mask->ref_count) {
487 list_del_rcu(&mask->list);
488 kfree_rcu(mask, rcu);
489 }
490 }
491}
492
493/* Must be called with OVS mutex held. */
493void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) 494void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
494{ 495{
495 struct table_instance *ti = ovsl_dereference(table->ti); 496 struct table_instance *ti = ovsl_dereference(table->ti);
@@ -497,6 +498,11 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
497 BUG_ON(table->count == 0); 498 BUG_ON(table->count == 0);
498 hlist_del_rcu(&flow->hash_node[ti->node_ver]); 499 hlist_del_rcu(&flow->hash_node[ti->node_ver]);
499 table->count--; 500 table->count--;
501
502 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
503 * accessible as long as the RCU read lock is held.
504 */
505 flow_mask_remove(table, flow->mask);
500} 506}
501 507
502static struct sw_flow_mask *mask_alloc(void) 508static struct sw_flow_mask *mask_alloc(void)
@@ -513,8 +519,8 @@ static struct sw_flow_mask *mask_alloc(void)
513static bool mask_equal(const struct sw_flow_mask *a, 519static bool mask_equal(const struct sw_flow_mask *a,
514 const struct sw_flow_mask *b) 520 const struct sw_flow_mask *b)
515{ 521{
516 u8 *a_ = (u8 *)&a->key + a->range.start; 522 const u8 *a_ = (const u8 *)&a->key + a->range.start;
517 u8 *b_ = (u8 *)&b->key + b->range.start; 523 const u8 *b_ = (const u8 *)&b->key + b->range.start;
518 524
519 return (a->range.end == b->range.end) 525 return (a->range.end == b->range.end)
520 && (a->range.start == b->range.start) 526 && (a->range.start == b->range.start)
@@ -559,6 +565,7 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
559 return 0; 565 return 0;
560} 566}
561 567
568/* Must be called with OVS mutex held. */
562int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, 569int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
563 struct sw_flow_mask *mask) 570 struct sw_flow_mask *mask)
564{ 571{
@@ -597,16 +604,28 @@ int ovs_flow_init(void)
597 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); 604 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
598 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); 605 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
599 606
600 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, 607 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
601 0, NULL); 608 + (num_possible_nodes()
609 * sizeof(struct flow_stats *)),
610 0, 0, NULL);
602 if (flow_cache == NULL) 611 if (flow_cache == NULL)
603 return -ENOMEM; 612 return -ENOMEM;
604 613
614 flow_stats_cache
615 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
616 0, SLAB_HWCACHE_ALIGN, NULL);
617 if (flow_stats_cache == NULL) {
618 kmem_cache_destroy(flow_cache);
619 flow_cache = NULL;
620 return -ENOMEM;
621 }
622
605 return 0; 623 return 0;
606} 624}
607 625
608/* Uninitializes the flow module. */ 626/* Uninitializes the flow module. */
609void ovs_flow_exit(void) 627void ovs_flow_exit(void)
610{ 628{
629 kmem_cache_destroy(flow_stats_cache);
611 kmem_cache_destroy(flow_cache); 630 kmem_cache_destroy(flow_cache);
612} 631}
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index baaeb101924d..ca8a5820f615 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -52,10 +52,12 @@ struct flow_table {
52 unsigned int count; 52 unsigned int count;
53}; 53};
54 54
55extern struct kmem_cache *flow_stats_cache;
56
55int ovs_flow_init(void); 57int ovs_flow_init(void);
56void ovs_flow_exit(void); 58void ovs_flow_exit(void);
57 59
58struct sw_flow *ovs_flow_alloc(bool percpu_stats); 60struct sw_flow *ovs_flow_alloc(void);
59void ovs_flow_free(struct sw_flow *, bool deferred); 61void ovs_flow_free(struct sw_flow *, bool deferred);
60 62
61int ovs_flow_tbl_init(struct flow_table *); 63int ovs_flow_tbl_init(struct flow_table *);
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index ebb6e2442554..35ec4fed09e2 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -172,7 +172,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
172 df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? 172 df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
173 htons(IP_DF) : 0; 173 htons(IP_DF) : 0;
174 174
175 skb->local_df = 1; 175 skb->ignore_df = 1;
176 176
177 return iptunnel_xmit(skb->sk, rt, skb, fl.saddr, 177 return iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
178 OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, 178 OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
@@ -256,7 +256,7 @@ static void gre_tnl_destroy(struct vport *vport)
256 256
257 ovs_net = net_generic(net, ovs_net_id); 257 ovs_net = net_generic(net, ovs_net_id);
258 258
259 rcu_assign_pointer(ovs_net->vport_net.gre_vport, NULL); 259 RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
260 ovs_vport_deferred_free(vport); 260 ovs_vport_deferred_free(vport);
261 gre_exit(); 261 gre_exit();
262} 262}
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 729c68763fe7..789af9280e77 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -130,7 +130,7 @@ static void do_setup(struct net_device *netdev)
130 netdev->priv_flags &= ~IFF_TX_SKB_SHARING; 130 netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
131 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 131 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
132 netdev->destructor = internal_dev_destructor; 132 netdev->destructor = internal_dev_destructor;
133 SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops); 133 netdev->ethtool_ops = &internal_dev_ethtool_ops;
134 netdev->tx_queue_len = 0; 134 netdev->tx_queue_len = 0;
135 135
136 netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST | 136 netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index e797a50ac2be..0edbd95c60e7 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -122,7 +122,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
122 vxlan_port = vxlan_vport(vport); 122 vxlan_port = vxlan_vport(vport);
123 strncpy(vxlan_port->name, parms->name, IFNAMSIZ); 123 strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
124 124
125 vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true, false); 125 vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true, 0);
126 if (IS_ERR(vs)) { 126 if (IS_ERR(vs)) {
127 ovs_vport_free(vport); 127 ovs_vport_free(vport);
128 return (void *)vs; 128 return (void *)vs;
@@ -170,7 +170,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
170 df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? 170 df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
171 htons(IP_DF) : 0; 171 htons(IP_DF) : 0;
172 172
173 skb->local_df = 1; 173 skb->ignore_df = 1;
174 174
175 inet_get_local_port_range(net, &port_min, &port_max); 175 inet_get_local_port_range(net, &port_min, &port_max);
176 src_port = vxlan_src_port(port_min, port_max, skb); 176 src_port = vxlan_src_port(port_min, port_max, skb);
@@ -180,7 +180,8 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
180 OVS_CB(skb)->tun_key->ipv4_tos, 180 OVS_CB(skb)->tun_key->ipv4_tos,
181 OVS_CB(skb)->tun_key->ipv4_ttl, df, 181 OVS_CB(skb)->tun_key->ipv4_ttl, df,
182 src_port, dst_port, 182 src_port, dst_port,
183 htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8)); 183 htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8),
184 false);
184 if (err < 0) 185 if (err < 0)
185 ip_rt_put(rt); 186 ip_rt_put(rt);
186error: 187error:
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index d7e50a17396c..8d721e62f388 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -172,7 +172,7 @@ void ovs_vport_deferred_free(struct vport *vport);
172 */ 172 */
173static inline void *vport_priv(const struct vport *vport) 173static inline void *vport_priv(const struct vport *vport)
174{ 174{
175 return (u8 *)vport + ALIGN(sizeof(struct vport), VPORT_ALIGN); 175 return (u8 *)(uintptr_t)vport + ALIGN(sizeof(struct vport), VPORT_ALIGN);
176} 176}
177 177
178/** 178/**
@@ -185,9 +185,9 @@ static inline void *vport_priv(const struct vport *vport)
185 * the result of a hash table lookup. @priv must point to the start of the 185 * the result of a hash table lookup. @priv must point to the start of the
186 * private data area. 186 * private data area.
187 */ 187 */
188static inline struct vport *vport_from_priv(const void *priv) 188static inline struct vport *vport_from_priv(void *priv)
189{ 189{
190 return (struct vport *)(priv - ALIGN(sizeof(struct vport), VPORT_ALIGN)); 190 return (struct vport *)((u8 *)priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
191} 191}
192 192
193void ovs_vport_receive(struct vport *, struct sk_buff *, 193void ovs_vport_receive(struct vport *, struct sk_buff *,
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 37be6e226d1b..1dde91e3dc70 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -298,7 +298,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
298 rds_ib_stats_inc(s_ib_tx_cq_event); 298 rds_ib_stats_inc(s_ib_tx_cq_event);
299 299
300 if (wc.wr_id == RDS_IB_ACK_WR_ID) { 300 if (wc.wr_id == RDS_IB_ACK_WR_ID) {
301 if (ic->i_ack_queued + HZ/2 < jiffies) 301 if (time_after(jiffies, ic->i_ack_queued + HZ/2))
302 rds_ib_stats_inc(s_ib_tx_stalled); 302 rds_ib_stats_inc(s_ib_tx_stalled);
303 rds_ib_ack_send_complete(ic); 303 rds_ib_ack_send_complete(ic);
304 continue; 304 continue;
@@ -315,7 +315,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
315 315
316 rm = rds_ib_send_unmap_op(ic, send, wc.status); 316 rm = rds_ib_send_unmap_op(ic, send, wc.status);
317 317
318 if (send->s_queued + HZ/2 < jiffies) 318 if (time_after(jiffies, send->s_queued + HZ/2))
319 rds_ib_stats_inc(s_ib_tx_stalled); 319 rds_ib_stats_inc(s_ib_tx_stalled);
320 320
321 if (send->s_op) { 321 if (send->s_op) {
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index e40c3c5db2c4..9105ea03aec5 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -232,7 +232,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
232 } 232 }
233 233
234 if (wc.wr_id == RDS_IW_ACK_WR_ID) { 234 if (wc.wr_id == RDS_IW_ACK_WR_ID) {
235 if (ic->i_ack_queued + HZ/2 < jiffies) 235 if (time_after(jiffies, ic->i_ack_queued + HZ/2))
236 rds_iw_stats_inc(s_iw_tx_stalled); 236 rds_iw_stats_inc(s_iw_tx_stalled);
237 rds_iw_ack_send_complete(ic); 237 rds_iw_ack_send_complete(ic);
238 continue; 238 continue;
@@ -267,7 +267,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
267 267
268 send->s_wr.opcode = 0xdead; 268 send->s_wr.opcode = 0xdead;
269 send->s_wr.num_sge = 1; 269 send->s_wr.num_sge = 1;
270 if (send->s_queued + HZ/2 < jiffies) 270 if (time_after(jiffies, send->s_queued + HZ/2))
271 rds_iw_stats_inc(s_iw_tx_stalled); 271 rds_iw_stats_inc(s_iw_tx_stalled);
272 272
273 /* If a RDMA operation produced an error, signal this right 273 /* If a RDMA operation produced an error, signal this right
diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c
index 89c91515ed0c..139239d2cb22 100644
--- a/net/rds/iw_sysctl.c
+++ b/net/rds/iw_sysctl.c
@@ -111,8 +111,7 @@ static struct ctl_table rds_iw_sysctl_table[] = {
111 111
112void rds_iw_sysctl_exit(void) 112void rds_iw_sysctl_exit(void)
113{ 113{
114 if (rds_iw_sysctl_hdr) 114 unregister_net_sysctl_table(rds_iw_sysctl_hdr);
115 unregister_net_sysctl_table(rds_iw_sysctl_hdr);
116} 115}
117 116
118int rds_iw_sysctl_init(void) 117int rds_iw_sysctl_init(void)
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index c2be901d19ee..6cd9d1deafc3 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -168,7 +168,7 @@ static int rds_rdma_listen_init(void)
168 return ret; 168 return ret;
169 } 169 }
170 170
171 sin.sin_family = AF_INET, 171 sin.sin_family = AF_INET;
172 sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY); 172 sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
173 sin.sin_port = (__force u16)htons(RDS_PORT); 173 sin.sin_port = (__force u16)htons(RDS_PORT);
174 174
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index b5cb2aa08f33..c3b0cd43eb56 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -94,8 +94,7 @@ static struct ctl_table rds_sysctl_rds_table[] = {
94 94
95void rds_sysctl_exit(void) 95void rds_sysctl_exit(void)
96{ 96{
97 if (rds_sysctl_reg_table) 97 unregister_net_sysctl_table(rds_sysctl_reg_table);
98 unregister_net_sysctl_table(rds_sysctl_reg_table);
99} 98}
100 99
101int rds_sysctl_init(void) 100int rds_sysctl_init(void)
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 4e638f851185..23ab4dcd1d9f 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -153,7 +153,7 @@ int rds_tcp_listen_init(void)
153 sock->sk->sk_data_ready = rds_tcp_listen_data_ready; 153 sock->sk->sk_data_ready = rds_tcp_listen_data_ready;
154 write_unlock_bh(&sock->sk->sk_callback_lock); 154 write_unlock_bh(&sock->sk->sk_callback_lock);
155 155
156 sin.sin_family = PF_INET, 156 sin.sin_family = PF_INET;
157 sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY); 157 sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
158 sin.sin_port = (__force u16)htons(RDS_TCP_PORT); 158 sin.sin_port = (__force u16)htons(RDS_TCP_PORT);
159 159
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index bd2a5b90400c..14c98e48f261 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -36,8 +36,6 @@ struct rfkill_gpio_data {
36 struct gpio_desc *shutdown_gpio; 36 struct gpio_desc *shutdown_gpio;
37 37
38 struct rfkill *rfkill_dev; 38 struct rfkill *rfkill_dev;
39 char *reset_name;
40 char *shutdown_name;
41 struct clk *clk; 39 struct clk *clk;
42 40
43 bool clk_enabled; 41 bool clk_enabled;
@@ -47,17 +45,14 @@ static int rfkill_gpio_set_power(void *data, bool blocked)
47{ 45{
48 struct rfkill_gpio_data *rfkill = data; 46 struct rfkill_gpio_data *rfkill = data;
49 47
50 if (blocked) { 48 if (!blocked && !IS_ERR(rfkill->clk) && !rfkill->clk_enabled)
51 gpiod_set_value(rfkill->shutdown_gpio, 0); 49 clk_enable(rfkill->clk);
52 gpiod_set_value(rfkill->reset_gpio, 0); 50
53 if (!IS_ERR(rfkill->clk) && rfkill->clk_enabled) 51 gpiod_set_value_cansleep(rfkill->shutdown_gpio, !blocked);
54 clk_disable(rfkill->clk); 52 gpiod_set_value_cansleep(rfkill->reset_gpio, !blocked);
55 } else { 53
56 if (!IS_ERR(rfkill->clk) && !rfkill->clk_enabled) 54 if (blocked && !IS_ERR(rfkill->clk) && rfkill->clk_enabled)
57 clk_enable(rfkill->clk); 55 clk_disable(rfkill->clk);
58 gpiod_set_value(rfkill->reset_gpio, 1);
59 gpiod_set_value(rfkill->shutdown_gpio, 1);
60 }
61 56
62 rfkill->clk_enabled = blocked; 57 rfkill->clk_enabled = blocked;
63 58
@@ -87,10 +82,8 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
87{ 82{
88 struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data; 83 struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
89 struct rfkill_gpio_data *rfkill; 84 struct rfkill_gpio_data *rfkill;
90 const char *clk_name = NULL;
91 struct gpio_desc *gpio; 85 struct gpio_desc *gpio;
92 int ret; 86 int ret;
93 int len;
94 87
95 rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL); 88 rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL);
96 if (!rfkill) 89 if (!rfkill)
@@ -101,28 +94,15 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
101 if (ret) 94 if (ret)
102 return ret; 95 return ret;
103 } else if (pdata) { 96 } else if (pdata) {
104 clk_name = pdata->power_clk_name;
105 rfkill->name = pdata->name; 97 rfkill->name = pdata->name;
106 rfkill->type = pdata->type; 98 rfkill->type = pdata->type;
107 } else { 99 } else {
108 return -ENODEV; 100 return -ENODEV;
109 } 101 }
110 102
111 len = strlen(rfkill->name); 103 rfkill->clk = devm_clk_get(&pdev->dev, NULL);
112 rfkill->reset_name = devm_kzalloc(&pdev->dev, len + 7, GFP_KERNEL);
113 if (!rfkill->reset_name)
114 return -ENOMEM;
115
116 rfkill->shutdown_name = devm_kzalloc(&pdev->dev, len + 10, GFP_KERNEL);
117 if (!rfkill->shutdown_name)
118 return -ENOMEM;
119 104
120 snprintf(rfkill->reset_name, len + 6 , "%s_reset", rfkill->name); 105 gpio = devm_gpiod_get_index(&pdev->dev, "reset", 0);
121 snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", rfkill->name);
122
123 rfkill->clk = devm_clk_get(&pdev->dev, clk_name);
124
125 gpio = devm_gpiod_get_index(&pdev->dev, rfkill->reset_name, 0);
126 if (!IS_ERR(gpio)) { 106 if (!IS_ERR(gpio)) {
127 ret = gpiod_direction_output(gpio, 0); 107 ret = gpiod_direction_output(gpio, 0);
128 if (ret) 108 if (ret)
@@ -130,7 +110,7 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
130 rfkill->reset_gpio = gpio; 110 rfkill->reset_gpio = gpio;
131 } 111 }
132 112
133 gpio = devm_gpiod_get_index(&pdev->dev, rfkill->shutdown_name, 1); 113 gpio = devm_gpiod_get_index(&pdev->dev, "shutdown", 1);
134 if (!IS_ERR(gpio)) { 114 if (!IS_ERR(gpio)) {
135 ret = gpiod_direction_output(gpio, 0); 115 ret = gpiod_direction_output(gpio, 0);
136 if (ret) 116 if (ret)
@@ -146,14 +126,6 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
146 return -EINVAL; 126 return -EINVAL;
147 } 127 }
148 128
149 if (pdata && pdata->gpio_runtime_setup) {
150 ret = pdata->gpio_runtime_setup(pdev);
151 if (ret) {
152 dev_err(&pdev->dev, "can't set up gpio\n");
153 return ret;
154 }
155 }
156
157 rfkill->rfkill_dev = rfkill_alloc(rfkill->name, &pdev->dev, 129 rfkill->rfkill_dev = rfkill_alloc(rfkill->name, &pdev->dev,
158 rfkill->type, &rfkill_gpio_ops, 130 rfkill->type, &rfkill_gpio_ops,
159 rfkill); 131 rfkill);
@@ -174,20 +146,23 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
174static int rfkill_gpio_remove(struct platform_device *pdev) 146static int rfkill_gpio_remove(struct platform_device *pdev)
175{ 147{
176 struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev); 148 struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
177 struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
178 149
179 if (pdata && pdata->gpio_runtime_close)
180 pdata->gpio_runtime_close(pdev);
181 rfkill_unregister(rfkill->rfkill_dev); 150 rfkill_unregister(rfkill->rfkill_dev);
182 rfkill_destroy(rfkill->rfkill_dev); 151 rfkill_destroy(rfkill->rfkill_dev);
183 152
184 return 0; 153 return 0;
185} 154}
186 155
156#ifdef CONFIG_ACPI
187static const struct acpi_device_id rfkill_acpi_match[] = { 157static const struct acpi_device_id rfkill_acpi_match[] = {
158 { "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
159 { "BCM2E39", RFKILL_TYPE_BLUETOOTH },
160 { "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
188 { "BCM4752", RFKILL_TYPE_GPS }, 161 { "BCM4752", RFKILL_TYPE_GPS },
162 { "LNV4752", RFKILL_TYPE_GPS },
189 { }, 163 { },
190}; 164};
165#endif
191 166
192static struct platform_driver rfkill_gpio_driver = { 167static struct platform_driver rfkill_gpio_driver = {
193 .probe = rfkill_gpio_probe, 168 .probe = rfkill_gpio_probe,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index bdbdb1a7920a..45527e6b52db 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -134,7 +134,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
134 int err; 134 int err;
135 int tp_created = 0; 135 int tp_created = 0;
136 136
137 if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_capable(skb, CAP_NET_ADMIN)) 137 if ((n->nlmsg_type != RTM_GETTFILTER) &&
138 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
138 return -EPERM; 139 return -EPERM;
139 140
140replay: 141replay:
@@ -317,7 +318,8 @@ replay:
317 } 318 }
318 } 319 }
319 320
320 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh); 321 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
322 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
321 if (err == 0) { 323 if (err == 0) {
322 if (tp_created) { 324 if (tp_created) {
323 spin_lock_bh(root_lock); 325 spin_lock_bh(root_lock);
@@ -504,7 +506,7 @@ void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
504EXPORT_SYMBOL(tcf_exts_destroy); 506EXPORT_SYMBOL(tcf_exts_destroy);
505 507
506int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 508int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
507 struct nlattr *rate_tlv, struct tcf_exts *exts) 509 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
508{ 510{
509#ifdef CONFIG_NET_CLS_ACT 511#ifdef CONFIG_NET_CLS_ACT
510 { 512 {
@@ -513,7 +515,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
513 INIT_LIST_HEAD(&exts->actions); 515 INIT_LIST_HEAD(&exts->actions);
514 if (exts->police && tb[exts->police]) { 516 if (exts->police && tb[exts->police]) {
515 act = tcf_action_init_1(net, tb[exts->police], rate_tlv, 517 act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
516 "police", TCA_ACT_NOREPLACE, 518 "police", ovr,
517 TCA_ACT_BIND); 519 TCA_ACT_BIND);
518 if (IS_ERR(act)) 520 if (IS_ERR(act))
519 return PTR_ERR(act); 521 return PTR_ERR(act);
@@ -523,7 +525,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
523 } else if (exts->action && tb[exts->action]) { 525 } else if (exts->action && tb[exts->action]) {
524 int err; 526 int err;
525 err = tcf_action_init(net, tb[exts->action], rate_tlv, 527 err = tcf_action_init(net, tb[exts->action], rate_tlv,
526 NULL, TCA_ACT_NOREPLACE, 528 NULL, ovr,
527 TCA_ACT_BIND, &exts->actions); 529 TCA_ACT_BIND, &exts->actions);
528 if (err) 530 if (err)
529 return err; 531 return err;
@@ -543,14 +545,12 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
543 struct tcf_exts *src) 545 struct tcf_exts *src)
544{ 546{
545#ifdef CONFIG_NET_CLS_ACT 547#ifdef CONFIG_NET_CLS_ACT
546 if (!list_empty(&src->actions)) { 548 LIST_HEAD(tmp);
547 LIST_HEAD(tmp); 549 tcf_tree_lock(tp);
548 tcf_tree_lock(tp); 550 list_splice_init(&dst->actions, &tmp);
549 list_splice_init(&dst->actions, &tmp); 551 list_splice(&src->actions, &dst->actions);
550 list_splice(&src->actions, &dst->actions); 552 tcf_tree_unlock(tp);
551 tcf_tree_unlock(tp); 553 tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
552 tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
553 }
554#endif 554#endif
555} 555}
556EXPORT_SYMBOL(tcf_exts_change); 556EXPORT_SYMBOL(tcf_exts_change);
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index e98ca99c202b..0ae1813e3e90 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -130,14 +130,14 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
130static int basic_set_parms(struct net *net, struct tcf_proto *tp, 130static int basic_set_parms(struct net *net, struct tcf_proto *tp,
131 struct basic_filter *f, unsigned long base, 131 struct basic_filter *f, unsigned long base,
132 struct nlattr **tb, 132 struct nlattr **tb,
133 struct nlattr *est) 133 struct nlattr *est, bool ovr)
134{ 134{
135 int err; 135 int err;
136 struct tcf_exts e; 136 struct tcf_exts e;
137 struct tcf_ematch_tree t; 137 struct tcf_ematch_tree t;
138 138
139 tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE); 139 tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE);
140 err = tcf_exts_validate(net, tp, tb, est, &e); 140 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
141 if (err < 0) 141 if (err < 0)
142 return err; 142 return err;
143 143
@@ -161,7 +161,7 @@ errout:
161 161
162static int basic_change(struct net *net, struct sk_buff *in_skb, 162static int basic_change(struct net *net, struct sk_buff *in_skb,
163 struct tcf_proto *tp, unsigned long base, u32 handle, 163 struct tcf_proto *tp, unsigned long base, u32 handle,
164 struct nlattr **tca, unsigned long *arg) 164 struct nlattr **tca, unsigned long *arg, bool ovr)
165{ 165{
166 int err; 166 int err;
167 struct basic_head *head = tp->root; 167 struct basic_head *head = tp->root;
@@ -179,7 +179,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
179 if (f != NULL) { 179 if (f != NULL) {
180 if (handle && f->handle != handle) 180 if (handle && f->handle != handle)
181 return -EINVAL; 181 return -EINVAL;
182 return basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE]); 182 return basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
183 } 183 }
184 184
185 err = -ENOBUFS; 185 err = -ENOBUFS;
@@ -206,7 +206,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
206 f->handle = head->hgenerator; 206 f->handle = head->hgenerator;
207 } 207 }
208 208
209 err = basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE]); 209 err = basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
210 if (err < 0) 210 if (err < 0)
211 goto errout; 211 goto errout;
212 212
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 8e3cf49118e3..13f64df2c710 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -156,11 +156,11 @@ static void cls_bpf_put(struct tcf_proto *tp, unsigned long f)
156static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, 156static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
157 struct cls_bpf_prog *prog, 157 struct cls_bpf_prog *prog,
158 unsigned long base, struct nlattr **tb, 158 unsigned long base, struct nlattr **tb,
159 struct nlattr *est) 159 struct nlattr *est, bool ovr)
160{ 160{
161 struct sock_filter *bpf_ops, *bpf_old; 161 struct sock_filter *bpf_ops, *bpf_old;
162 struct tcf_exts exts; 162 struct tcf_exts exts;
163 struct sock_fprog tmp; 163 struct sock_fprog_kern tmp;
164 struct sk_filter *fp, *fp_old; 164 struct sk_filter *fp, *fp_old;
165 u16 bpf_size, bpf_len; 165 u16 bpf_size, bpf_len;
166 u32 classid; 166 u32 classid;
@@ -170,7 +170,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
170 return -EINVAL; 170 return -EINVAL;
171 171
172 tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE); 172 tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
173 ret = tcf_exts_validate(net, tp, tb, est, &exts); 173 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
174 if (ret < 0) 174 if (ret < 0)
175 return ret; 175 return ret;
176 176
@@ -191,7 +191,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
191 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); 191 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
192 192
193 tmp.len = bpf_len; 193 tmp.len = bpf_len;
194 tmp.filter = (struct sock_filter __user *) bpf_ops; 194 tmp.filter = bpf_ops;
195 195
196 ret = sk_unattached_filter_create(&fp, &tmp); 196 ret = sk_unattached_filter_create(&fp, &tmp);
197 if (ret) 197 if (ret)
@@ -242,7 +242,7 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
242static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, 242static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
243 struct tcf_proto *tp, unsigned long base, 243 struct tcf_proto *tp, unsigned long base,
244 u32 handle, struct nlattr **tca, 244 u32 handle, struct nlattr **tca,
245 unsigned long *arg) 245 unsigned long *arg, bool ovr)
246{ 246{
247 struct cls_bpf_head *head = tp->root; 247 struct cls_bpf_head *head = tp->root;
248 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg; 248 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg;
@@ -260,7 +260,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
260 if (handle && prog->handle != handle) 260 if (handle && prog->handle != handle)
261 return -EINVAL; 261 return -EINVAL;
262 return cls_bpf_modify_existing(net, tp, prog, base, tb, 262 return cls_bpf_modify_existing(net, tp, prog, base, tb,
263 tca[TCA_RATE]); 263 tca[TCA_RATE], ovr);
264 } 264 }
265 265
266 prog = kzalloc(sizeof(*prog), GFP_KERNEL); 266 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
@@ -277,7 +277,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
277 goto errout; 277 goto errout;
278 } 278 }
279 279
280 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE]); 280 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
281 if (ret < 0) 281 if (ret < 0)
282 goto errout; 282 goto errout;
283 283
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 8e2158ab551c..cacf01bd04f0 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -83,7 +83,7 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
83static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, 83static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
84 struct tcf_proto *tp, unsigned long base, 84 struct tcf_proto *tp, unsigned long base,
85 u32 handle, struct nlattr **tca, 85 u32 handle, struct nlattr **tca,
86 unsigned long *arg) 86 unsigned long *arg, bool ovr)
87{ 87{
88 struct nlattr *tb[TCA_CGROUP_MAX + 1]; 88 struct nlattr *tb[TCA_CGROUP_MAX + 1];
89 struct cls_cgroup_head *head = tp->root; 89 struct cls_cgroup_head *head = tp->root;
@@ -119,7 +119,7 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
119 return err; 119 return err;
120 120
121 tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); 121 tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
122 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e); 122 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
123 if (err < 0) 123 if (err < 0)
124 return err; 124 return err;
125 125
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 257029c54332..35be16f7c192 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -349,7 +349,7 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
349static int flow_change(struct net *net, struct sk_buff *in_skb, 349static int flow_change(struct net *net, struct sk_buff *in_skb,
350 struct tcf_proto *tp, unsigned long base, 350 struct tcf_proto *tp, unsigned long base,
351 u32 handle, struct nlattr **tca, 351 u32 handle, struct nlattr **tca,
352 unsigned long *arg) 352 unsigned long *arg, bool ovr)
353{ 353{
354 struct flow_head *head = tp->root; 354 struct flow_head *head = tp->root;
355 struct flow_filter *f; 355 struct flow_filter *f;
@@ -393,7 +393,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
393 } 393 }
394 394
395 tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE); 395 tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE);
396 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e); 396 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
397 if (err < 0) 397 if (err < 0)
398 return err; 398 return err;
399 399
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 63a3ce75c02e..861b03ccfed0 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -169,7 +169,7 @@ static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
169 169
170static int 170static int
171fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, 171fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
172 struct nlattr **tb, struct nlattr **tca, unsigned long base) 172 struct nlattr **tb, struct nlattr **tca, unsigned long base, bool ovr)
173{ 173{
174 struct fw_head *head = tp->root; 174 struct fw_head *head = tp->root;
175 struct tcf_exts e; 175 struct tcf_exts e;
@@ -177,7 +177,7 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
177 int err; 177 int err;
178 178
179 tcf_exts_init(&e, TCA_FW_ACT, TCA_FW_POLICE); 179 tcf_exts_init(&e, TCA_FW_ACT, TCA_FW_POLICE);
180 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e); 180 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
181 if (err < 0) 181 if (err < 0)
182 return err; 182 return err;
183 183
@@ -218,7 +218,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
218 struct tcf_proto *tp, unsigned long base, 218 struct tcf_proto *tp, unsigned long base,
219 u32 handle, 219 u32 handle,
220 struct nlattr **tca, 220 struct nlattr **tca,
221 unsigned long *arg) 221 unsigned long *arg, bool ovr)
222{ 222{
223 struct fw_head *head = tp->root; 223 struct fw_head *head = tp->root;
224 struct fw_filter *f = (struct fw_filter *) *arg; 224 struct fw_filter *f = (struct fw_filter *) *arg;
@@ -236,7 +236,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
236 if (f != NULL) { 236 if (f != NULL) {
237 if (f->id != handle && handle) 237 if (f->id != handle && handle)
238 return -EINVAL; 238 return -EINVAL;
239 return fw_change_attrs(net, tp, f, tb, tca, base); 239 return fw_change_attrs(net, tp, f, tb, tca, base, ovr);
240 } 240 }
241 241
242 if (!handle) 242 if (!handle)
@@ -264,7 +264,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
264 tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE); 264 tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE);
265 f->id = handle; 265 f->id = handle;
266 266
267 err = fw_change_attrs(net, tp, f, tb, tca, base); 267 err = fw_change_attrs(net, tp, f, tb, tca, base, ovr);
268 if (err < 0) 268 if (err < 0)
269 goto errout; 269 goto errout;
270 270
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 1ad3068f2ce1..dd9fc2523c76 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -333,7 +333,8 @@ static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
333static int route4_set_parms(struct net *net, struct tcf_proto *tp, 333static int route4_set_parms(struct net *net, struct tcf_proto *tp,
334 unsigned long base, struct route4_filter *f, 334 unsigned long base, struct route4_filter *f,
335 u32 handle, struct route4_head *head, 335 u32 handle, struct route4_head *head,
336 struct nlattr **tb, struct nlattr *est, int new) 336 struct nlattr **tb, struct nlattr *est, int new,
337 bool ovr)
337{ 338{
338 int err; 339 int err;
339 u32 id = 0, to = 0, nhandle = 0x8000; 340 u32 id = 0, to = 0, nhandle = 0x8000;
@@ -343,7 +344,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
343 struct tcf_exts e; 344 struct tcf_exts e;
344 345
345 tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); 346 tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
346 err = tcf_exts_validate(net, tp, tb, est, &e); 347 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
347 if (err < 0) 348 if (err < 0)
348 return err; 349 return err;
349 350
@@ -428,7 +429,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
428 struct tcf_proto *tp, unsigned long base, 429 struct tcf_proto *tp, unsigned long base,
429 u32 handle, 430 u32 handle,
430 struct nlattr **tca, 431 struct nlattr **tca,
431 unsigned long *arg) 432 unsigned long *arg, bool ovr)
432{ 433{
433 struct route4_head *head = tp->root; 434 struct route4_head *head = tp->root;
434 struct route4_filter *f, *f1, **fp; 435 struct route4_filter *f, *f1, **fp;
@@ -455,7 +456,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
455 old_handle = f->handle; 456 old_handle = f->handle;
456 457
457 err = route4_set_parms(net, tp, base, f, handle, head, tb, 458 err = route4_set_parms(net, tp, base, f, handle, head, tb,
458 tca[TCA_RATE], 0); 459 tca[TCA_RATE], 0, ovr);
459 if (err < 0) 460 if (err < 0)
460 return err; 461 return err;
461 462
@@ -479,7 +480,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
479 480
480 tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); 481 tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
481 err = route4_set_parms(net, tp, base, f, handle, head, tb, 482 err = route4_set_parms(net, tp, base, f, handle, head, tb,
482 tca[TCA_RATE], 1); 483 tca[TCA_RATE], 1, ovr);
483 if (err < 0) 484 if (err < 0)
484 goto errout; 485 goto errout;
485 486
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 19f8e5dfa8bd..1020e233a5d6 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -415,7 +415,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
415 struct tcf_proto *tp, unsigned long base, 415 struct tcf_proto *tp, unsigned long base,
416 u32 handle, 416 u32 handle,
417 struct nlattr **tca, 417 struct nlattr **tca,
418 unsigned long *arg) 418 unsigned long *arg, bool ovr)
419{ 419{
420 struct rsvp_head *data = tp->root; 420 struct rsvp_head *data = tp->root;
421 struct rsvp_filter *f, **fp; 421 struct rsvp_filter *f, **fp;
@@ -436,7 +436,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
436 return err; 436 return err;
437 437
438 tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE); 438 tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE);
439 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e); 439 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
440 if (err < 0) 440 if (err < 0)
441 return err; 441 return err;
442 442
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index f435a88d899a..c721cd4a469f 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -198,7 +198,7 @@ static int
198tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, 198tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
199 u32 handle, struct tcindex_data *p, 199 u32 handle, struct tcindex_data *p,
200 struct tcindex_filter_result *r, struct nlattr **tb, 200 struct tcindex_filter_result *r, struct nlattr **tb,
201 struct nlattr *est) 201 struct nlattr *est, bool ovr)
202{ 202{
203 int err, balloc = 0; 203 int err, balloc = 0;
204 struct tcindex_filter_result new_filter_result, *old_r = r; 204 struct tcindex_filter_result new_filter_result, *old_r = r;
@@ -208,7 +208,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
208 struct tcf_exts e; 208 struct tcf_exts e;
209 209
210 tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); 210 tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
211 err = tcf_exts_validate(net, tp, tb, est, &e); 211 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
212 if (err < 0) 212 if (err < 0)
213 return err; 213 return err;
214 214
@@ -341,7 +341,7 @@ errout:
341static int 341static int
342tcindex_change(struct net *net, struct sk_buff *in_skb, 342tcindex_change(struct net *net, struct sk_buff *in_skb,
343 struct tcf_proto *tp, unsigned long base, u32 handle, 343 struct tcf_proto *tp, unsigned long base, u32 handle,
344 struct nlattr **tca, unsigned long *arg) 344 struct nlattr **tca, unsigned long *arg, bool ovr)
345{ 345{
346 struct nlattr *opt = tca[TCA_OPTIONS]; 346 struct nlattr *opt = tca[TCA_OPTIONS];
347 struct nlattr *tb[TCA_TCINDEX_MAX + 1]; 347 struct nlattr *tb[TCA_TCINDEX_MAX + 1];
@@ -361,7 +361,7 @@ tcindex_change(struct net *net, struct sk_buff *in_skb,
361 return err; 361 return err;
362 362
363 return tcindex_set_parms(net, tp, base, handle, p, r, tb, 363 return tcindex_set_parms(net, tp, base, handle, p, r, tb,
364 tca[TCA_RATE]); 364 tca[TCA_RATE], ovr);
365} 365}
366 366
367 367
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 84c28daff848..c39b583ace32 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -486,13 +486,13 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
486static int u32_set_parms(struct net *net, struct tcf_proto *tp, 486static int u32_set_parms(struct net *net, struct tcf_proto *tp,
487 unsigned long base, struct tc_u_hnode *ht, 487 unsigned long base, struct tc_u_hnode *ht,
488 struct tc_u_knode *n, struct nlattr **tb, 488 struct tc_u_knode *n, struct nlattr **tb,
489 struct nlattr *est) 489 struct nlattr *est, bool ovr)
490{ 490{
491 int err; 491 int err;
492 struct tcf_exts e; 492 struct tcf_exts e;
493 493
494 tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE); 494 tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
495 err = tcf_exts_validate(net, tp, tb, est, &e); 495 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
496 if (err < 0) 496 if (err < 0)
497 return err; 497 return err;
498 498
@@ -545,7 +545,7 @@ errout:
545static int u32_change(struct net *net, struct sk_buff *in_skb, 545static int u32_change(struct net *net, struct sk_buff *in_skb,
546 struct tcf_proto *tp, unsigned long base, u32 handle, 546 struct tcf_proto *tp, unsigned long base, u32 handle,
547 struct nlattr **tca, 547 struct nlattr **tca,
548 unsigned long *arg) 548 unsigned long *arg, bool ovr)
549{ 549{
550 struct tc_u_common *tp_c = tp->data; 550 struct tc_u_common *tp_c = tp->data;
551 struct tc_u_hnode *ht; 551 struct tc_u_hnode *ht;
@@ -569,7 +569,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
569 return -EINVAL; 569 return -EINVAL;
570 570
571 return u32_set_parms(net, tp, base, n->ht_up, n, tb, 571 return u32_set_parms(net, tp, base, n->ht_up, n, tb,
572 tca[TCA_RATE]); 572 tca[TCA_RATE], ovr);
573 } 573 }
574 574
575 if (tb[TCA_U32_DIVISOR]) { 575 if (tb[TCA_U32_DIVISOR]) {
@@ -656,7 +656,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
656 } 656 }
657#endif 657#endif
658 658
659 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE]); 659 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
660 if (err == 0) { 660 if (err == 0) {
661 struct tc_u_knode **ins; 661 struct tc_u_knode **ins;
662 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next) 662 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 400769014bbd..58bed7599db7 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -563,7 +563,7 @@ out:
563} 563}
564EXPORT_SYMBOL(__qdisc_calculate_pkt_len); 564EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
565 565
566void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc) 566void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
567{ 567{
568 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { 568 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
569 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", 569 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
@@ -1084,7 +1084,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1084 struct Qdisc *p = NULL; 1084 struct Qdisc *p = NULL;
1085 int err; 1085 int err;
1086 1086
1087 if ((n->nlmsg_type != RTM_GETQDISC) && !netlink_capable(skb, CAP_NET_ADMIN)) 1087 if ((n->nlmsg_type != RTM_GETQDISC) &&
1088 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1088 return -EPERM; 1089 return -EPERM;
1089 1090
1090 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 1091 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1151,7 +1152,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1151 struct Qdisc *q, *p; 1152 struct Qdisc *q, *p;
1152 int err; 1153 int err;
1153 1154
1154 if (!netlink_capable(skb, CAP_NET_ADMIN)) 1155 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1155 return -EPERM; 1156 return -EPERM;
1156 1157
1157replay: 1158replay:
@@ -1490,7 +1491,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
1490 u32 qid; 1491 u32 qid;
1491 int err; 1492 int err;
1492 1493
1493 if ((n->nlmsg_type != RTM_GETTCLASS) && !netlink_capable(skb, CAP_NET_ADMIN)) 1494 if ((n->nlmsg_type != RTM_GETTCLASS) &&
1495 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1494 return -EPERM; 1496 return -EPERM;
1495 1497
1496 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 1498 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 2aee02802c27..ed30e436128b 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -391,12 +391,7 @@ static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
391 391
392static void choke_free(void *addr) 392static void choke_free(void *addr)
393{ 393{
394 if (addr) { 394 kvfree(addr);
395 if (is_vmalloc_addr(addr))
396 vfree(addr);
397 else
398 kfree(addr);
399 }
400} 395}
401 396
402static int choke_change(struct Qdisc *sch, struct nlattr *opt) 397static int choke_change(struct Qdisc *sch, struct nlattr *opt)
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 8302717ea303..7bbbfe112192 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -391,8 +391,10 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
391 while (1) { 391 while (1) {
392 cl = list_first_entry(&q->active, struct drr_class, alist); 392 cl = list_first_entry(&q->active, struct drr_class, alist);
393 skb = cl->qdisc->ops->peek(cl->qdisc); 393 skb = cl->qdisc->ops->peek(cl->qdisc);
394 if (skb == NULL) 394 if (skb == NULL) {
395 qdisc_warn_nonwc(__func__, cl->qdisc);
395 goto out; 396 goto out;
397 }
396 398
397 len = qdisc_pkt_len(skb); 399 len = qdisc_pkt_len(skb);
398 if (len <= cl->deficit) { 400 if (len <= cl->deficit) {
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 23c682b42f99..ba32c2b005d0 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -591,10 +591,7 @@ static void *fq_alloc_node(size_t sz, int node)
591 591
592static void fq_free(void *addr) 592static void fq_free(void *addr)
593{ 593{
594 if (addr && is_vmalloc_addr(addr)) 594 kvfree(addr);
595 vfree(addr);
596 else
597 kfree(addr);
598} 595}
599 596
600static int fq_resize(struct Qdisc *sch, u32 log) 597static int fq_resize(struct Qdisc *sch, u32 log)
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 0bf432c782c1..063b726bf1f8 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -365,12 +365,7 @@ static void *fq_codel_zalloc(size_t sz)
365 365
366static void fq_codel_free(void *addr) 366static void fq_codel_free(void *addr)
367{ 367{
368 if (addr) { 368 kvfree(addr);
369 if (is_vmalloc_addr(addr))
370 vfree(addr);
371 else
372 kfree(addr);
373 }
374} 369}
375 370
376static void fq_codel_destroy(struct Qdisc *sch) 371static void fq_codel_destroy(struct Qdisc *sch)
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 6e957c3b9854..d85b6812a7d4 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -414,7 +414,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
414 } 414 }
415 bucket->deficit = weight * q->quantum; 415 bucket->deficit = weight * q->quantum;
416 } 416 }
417 if (++sch->q.qlen < sch->limit) 417 if (++sch->q.qlen <= sch->limit)
418 return NET_XMIT_SUCCESS; 418 return NET_XMIT_SUCCESS;
419 419
420 q->drop_overlimit++; 420 q->drop_overlimit++;
@@ -494,12 +494,7 @@ static void *hhf_zalloc(size_t sz)
494 494
495static void hhf_free(void *addr) 495static void hhf_free(void *addr)
496{ 496{
497 if (addr) { 497 kvfree(addr);
498 if (is_vmalloc_addr(addr))
499 vfree(addr);
500 else
501 kfree(addr);
502 }
503} 498}
504 499
505static void hhf_destroy(struct Qdisc *sch) 500static void hhf_destroy(struct Qdisc *sch)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index f1669a00f571..111d70fddaea 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -648,12 +648,7 @@ static void netem_reset(struct Qdisc *sch)
648 648
649static void dist_free(struct disttable *d) 649static void dist_free(struct disttable *d)
650{ 650{
651 if (d) { 651 kvfree(d);
652 if (is_vmalloc_addr(d))
653 vfree(d);
654 else
655 kfree(d);
656 }
657} 652}
658 653
659/* 654/*
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 87317ff0b4ec..1af2f73906d0 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -716,12 +716,7 @@ static void *sfq_alloc(size_t sz)
716 716
717static void sfq_free(void *addr) 717static void sfq_free(void *addr)
718{ 718{
719 if (addr) { 719 kvfree(addr);
720 if (is_vmalloc_addr(addr))
721 vfree(addr);
722 else
723 kfree(addr);
724 }
725} 720}
726 721
727static void sfq_destroy(struct Qdisc *sch) 722static void sfq_destroy(struct Qdisc *sch)
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 39579c3e0d14..9de23a222d3f 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -55,6 +55,7 @@
55#include <net/sctp/sm.h> 55#include <net/sctp/sm.h>
56 56
57/* Forward declarations for internal functions. */ 57/* Forward declarations for internal functions. */
58static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
58static void sctp_assoc_bh_rcv(struct work_struct *work); 59static void sctp_assoc_bh_rcv(struct work_struct *work);
59static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); 60static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
60static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc); 61static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
@@ -330,7 +331,7 @@ void sctp_association_free(struct sctp_association *asoc)
330 /* Only real associations count against the endpoint, so 331 /* Only real associations count against the endpoint, so
331 * don't bother for if this is a temporary association. 332 * don't bother for if this is a temporary association.
332 */ 333 */
333 if (!asoc->temp) { 334 if (!list_empty(&asoc->asocs)) {
334 list_del(&asoc->asocs); 335 list_del(&asoc->asocs);
335 336
336 /* Decrement the backlog value for a TCP-style listening 337 /* Decrement the backlog value for a TCP-style listening
@@ -774,9 +775,6 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
774 sctp_transport_cmd_t command, 775 sctp_transport_cmd_t command,
775 sctp_sn_error_t error) 776 sctp_sn_error_t error)
776{ 777{
777 struct sctp_transport *t = NULL;
778 struct sctp_transport *first;
779 struct sctp_transport *second;
780 struct sctp_ulpevent *event; 778 struct sctp_ulpevent *event;
781 struct sockaddr_storage addr; 779 struct sockaddr_storage addr;
782 int spc_state = 0; 780 int spc_state = 0;
@@ -829,13 +827,14 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
829 return; 827 return;
830 } 828 }
831 829
832 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the 830 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification
833 * user. 831 * to the user.
834 */ 832 */
835 if (ulp_notify) { 833 if (ulp_notify) {
836 memset(&addr, 0, sizeof(struct sockaddr_storage)); 834 memset(&addr, 0, sizeof(struct sockaddr_storage));
837 memcpy(&addr, &transport->ipaddr, 835 memcpy(&addr, &transport->ipaddr,
838 transport->af_specific->sockaddr_len); 836 transport->af_specific->sockaddr_len);
837
839 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, 838 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
840 0, spc_state, error, GFP_ATOMIC); 839 0, spc_state, error, GFP_ATOMIC);
841 if (event) 840 if (event)
@@ -843,60 +842,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
843 } 842 }
844 843
845 /* Select new active and retran paths. */ 844 /* Select new active and retran paths. */
846 845 sctp_select_active_and_retran_path(asoc);
847 /* Look for the two most recently used active transports.
848 *
849 * This code produces the wrong ordering whenever jiffies
850 * rolls over, but we still get usable transports, so we don't
851 * worry about it.
852 */
853 first = NULL; second = NULL;
854
855 list_for_each_entry(t, &asoc->peer.transport_addr_list,
856 transports) {
857
858 if ((t->state == SCTP_INACTIVE) ||
859 (t->state == SCTP_UNCONFIRMED) ||
860 (t->state == SCTP_PF))
861 continue;
862 if (!first || t->last_time_heard > first->last_time_heard) {
863 second = first;
864 first = t;
865 } else if (!second ||
866 t->last_time_heard > second->last_time_heard)
867 second = t;
868 }
869
870 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
871 *
872 * By default, an endpoint should always transmit to the
873 * primary path, unless the SCTP user explicitly specifies the
874 * destination transport address (and possibly source
875 * transport address) to use.
876 *
877 * [If the primary is active but not most recent, bump the most
878 * recently used transport.]
879 */
880 if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
881 (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
882 first != asoc->peer.primary_path) {
883 second = first;
884 first = asoc->peer.primary_path;
885 }
886
887 if (!second)
888 second = first;
889 /* If we failed to find a usable transport, just camp on the
890 * primary, even if it is inactive.
891 */
892 if (!first) {
893 first = asoc->peer.primary_path;
894 second = asoc->peer.primary_path;
895 }
896
897 /* Set the active and retran transports. */
898 asoc->peer.active_path = first;
899 asoc->peer.retran_path = second;
900} 846}
901 847
902/* Hold a reference to an association. */ 848/* Hold a reference to an association. */
@@ -1090,7 +1036,7 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1090 } 1036 }
1091 1037
1092 if (chunk->transport) 1038 if (chunk->transport)
1093 chunk->transport->last_time_heard = jiffies; 1039 chunk->transport->last_time_heard = ktime_get();
1094 1040
1095 /* Run through the state machine. */ 1041 /* Run through the state machine. */
1096 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, 1042 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
@@ -1278,13 +1224,41 @@ static u8 sctp_trans_score(const struct sctp_transport *trans)
1278 return sctp_trans_state_to_prio_map[trans->state]; 1224 return sctp_trans_state_to_prio_map[trans->state];
1279} 1225}
1280 1226
1227static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1228 struct sctp_transport *trans2)
1229{
1230 if (trans1->error_count > trans2->error_count) {
1231 return trans2;
1232 } else if (trans1->error_count == trans2->error_count &&
1233 ktime_after(trans2->last_time_heard,
1234 trans1->last_time_heard)) {
1235 return trans2;
1236 } else {
1237 return trans1;
1238 }
1239}
1240
1281static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, 1241static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1282 struct sctp_transport *best) 1242 struct sctp_transport *best)
1283{ 1243{
1244 u8 score_curr, score_best;
1245
1284 if (best == NULL) 1246 if (best == NULL)
1285 return curr; 1247 return curr;
1286 1248
1287 return sctp_trans_score(curr) > sctp_trans_score(best) ? curr : best; 1249 score_curr = sctp_trans_score(curr);
1250 score_best = sctp_trans_score(best);
1251
1252 /* First, try a score-based selection if both transport states
1253 * differ. If we're in a tie, lets try to make a more clever
1254 * decision here based on error counts and last time heard.
1255 */
1256 if (score_curr > score_best)
1257 return curr;
1258 else if (score_curr == score_best)
1259 return sctp_trans_elect_tie(curr, best);
1260 else
1261 return best;
1288} 1262}
1289 1263
1290void sctp_assoc_update_retran_path(struct sctp_association *asoc) 1264void sctp_assoc_update_retran_path(struct sctp_association *asoc)
@@ -1325,6 +1299,76 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1325 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa); 1299 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1326} 1300}
1327 1301
1302static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1303{
1304 struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1305 struct sctp_transport *trans_pf = NULL;
1306
1307 /* Look for the two most recently used active transports. */
1308 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1309 transports) {
1310 /* Skip uninteresting transports. */
1311 if (trans->state == SCTP_INACTIVE ||
1312 trans->state == SCTP_UNCONFIRMED)
1313 continue;
1314 /* Keep track of the best PF transport from our
1315 * list in case we don't find an active one.
1316 */
1317 if (trans->state == SCTP_PF) {
1318 trans_pf = sctp_trans_elect_best(trans, trans_pf);
1319 continue;
1320 }
1321 /* For active transports, pick the most recent ones. */
1322 if (trans_pri == NULL ||
1323 ktime_after(trans->last_time_heard,
1324 trans_pri->last_time_heard)) {
1325 trans_sec = trans_pri;
1326 trans_pri = trans;
1327 } else if (trans_sec == NULL ||
1328 ktime_after(trans->last_time_heard,
1329 trans_sec->last_time_heard)) {
1330 trans_sec = trans;
1331 }
1332 }
1333
1334 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
1335 *
1336 * By default, an endpoint should always transmit to the primary
1337 * path, unless the SCTP user explicitly specifies the
1338 * destination transport address (and possibly source transport
1339 * address) to use. [If the primary is active but not most recent,
1340 * bump the most recently used transport.]
1341 */
1342 if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1343 asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1344 asoc->peer.primary_path != trans_pri) {
1345 trans_sec = trans_pri;
1346 trans_pri = asoc->peer.primary_path;
1347 }
1348
1349 /* We did not find anything useful for a possible retransmission
1350 * path; either primary path that we found is the the same as
1351 * the current one, or we didn't generally find an active one.
1352 */
1353 if (trans_sec == NULL)
1354 trans_sec = trans_pri;
1355
1356 /* If we failed to find a usable transport, just camp on the
1357 * primary or retran, even if they are inactive, if possible
1358 * pick a PF iff it's the better choice.
1359 */
1360 if (trans_pri == NULL) {
1361 trans_pri = sctp_trans_elect_best(asoc->peer.primary_path,
1362 asoc->peer.retran_path);
1363 trans_pri = sctp_trans_elect_best(trans_pri, trans_pf);
1364 trans_sec = asoc->peer.primary_path;
1365 }
1366
1367 /* Set the active and retran transports. */
1368 asoc->peer.active_path = trans_pri;
1369 asoc->peer.retran_path = trans_sec;
1370}
1371
1328struct sctp_transport * 1372struct sctp_transport *
1329sctp_assoc_choose_alter_transport(struct sctp_association *asoc, 1373sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1330 struct sctp_transport *last_sent_to) 1374 struct sctp_transport *last_sent_to)
@@ -1547,7 +1591,7 @@ int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1547/* Set an association id for a given association */ 1591/* Set an association id for a given association */
1548int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) 1592int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1549{ 1593{
1550 bool preload = gfp & __GFP_WAIT; 1594 bool preload = !!(gfp & __GFP_WAIT);
1551 int ret; 1595 int ret;
1552 1596
1553 /* If the id is already assigned, keep it. */ 1597 /* If the id is already assigned, keep it. */
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 3d9f429858dc..9da76ba4d10f 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -481,7 +481,7 @@ normal:
481 } 481 }
482 482
483 if (chunk->transport) 483 if (chunk->transport)
484 chunk->transport->last_time_heard = jiffies; 484 chunk->transport->last_time_heard = ktime_get();
485 485
486 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state, 486 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state,
487 ep, asoc, chunk, GFP_ATOMIC); 487 ep, asoc, chunk, GFP_ATOMIC);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 2b1738ef9394..1999592ba88c 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -216,7 +216,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
216 IP6_ECN_flow_xmit(sk, fl6->flowlabel); 216 IP6_ECN_flow_xmit(sk, fl6->flowlabel);
217 217
218 if (!(transport->param_flags & SPP_PMTUD_ENABLE)) 218 if (!(transport->param_flags & SPP_PMTUD_ENABLE))
219 skb->local_df = 1; 219 skb->ignore_df = 1;
220 220
221 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); 221 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
222 222
@@ -943,7 +943,6 @@ static struct inet_protosw sctpv6_seqpacket_protosw = {
943 .protocol = IPPROTO_SCTP, 943 .protocol = IPPROTO_SCTP,
944 .prot = &sctpv6_prot, 944 .prot = &sctpv6_prot,
945 .ops = &inet6_seqpacket_ops, 945 .ops = &inet6_seqpacket_ops,
946 .no_check = 0,
947 .flags = SCTP_PROTOSW_FLAG 946 .flags = SCTP_PROTOSW_FLAG
948}; 947};
949static struct inet_protosw sctpv6_stream_protosw = { 948static struct inet_protosw sctpv6_stream_protosw = {
@@ -951,7 +950,6 @@ static struct inet_protosw sctpv6_stream_protosw = {
951 .protocol = IPPROTO_SCTP, 950 .protocol = IPPROTO_SCTP,
952 .prot = &sctpv6_prot, 951 .prot = &sctpv6_prot,
953 .ops = &inet6_seqpacket_ops, 952 .ops = &inet6_seqpacket_ops,
954 .no_check = 0,
955 .flags = SCTP_PROTOSW_FLAG, 953 .flags = SCTP_PROTOSW_FLAG,
956}; 954};
957 955
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 0f4d15fc2627..01ab8e0723f0 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -591,7 +591,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
591 591
592 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb->len); 592 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb->len);
593 593
594 nskb->local_df = packet->ipfragok; 594 nskb->ignore_df = packet->ipfragok;
595 tp->af_specific->sctp_xmit(nskb, tp); 595 tp->af_specific->sctp_xmit(nskb, tp);
596 596
597out: 597out:
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 0947f1e15eb8..34229ee7f379 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -78,7 +78,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
78 78
79 for (i = 0; sctp_snmp_list[i].name != NULL; i++) 79 for (i = 0; sctp_snmp_list[i].name != NULL; i++)
80 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, 80 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
81 snmp_fold_field((void __percpu **)net->sctp.sctp_statistics, 81 snmp_fold_field(net->sctp.sctp_statistics,
82 sctp_snmp_list[i].entry)); 82 sctp_snmp_list[i].entry));
83 83
84 return 0; 84 return 0;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 44cbb54c8574..6789d785e698 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1017,7 +1017,6 @@ static struct inet_protosw sctp_seqpacket_protosw = {
1017 .protocol = IPPROTO_SCTP, 1017 .protocol = IPPROTO_SCTP,
1018 .prot = &sctp_prot, 1018 .prot = &sctp_prot,
1019 .ops = &inet_seqpacket_ops, 1019 .ops = &inet_seqpacket_ops,
1020 .no_check = 0,
1021 .flags = SCTP_PROTOSW_FLAG 1020 .flags = SCTP_PROTOSW_FLAG
1022}; 1021};
1023static struct inet_protosw sctp_stream_protosw = { 1022static struct inet_protosw sctp_stream_protosw = {
@@ -1025,7 +1024,6 @@ static struct inet_protosw sctp_stream_protosw = {
1025 .protocol = IPPROTO_SCTP, 1024 .protocol = IPPROTO_SCTP,
1026 .prot = &sctp_prot, 1025 .prot = &sctp_prot,
1027 .ops = &inet_seqpacket_ops, 1026 .ops = &inet_seqpacket_ops,
1028 .no_check = 0,
1029 .flags = SCTP_PROTOSW_FLAG 1027 .flags = SCTP_PROTOSW_FLAG
1030}; 1028};
1031 1029
@@ -1105,14 +1103,15 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
1105 1103
1106static inline int init_sctp_mibs(struct net *net) 1104static inline int init_sctp_mibs(struct net *net)
1107{ 1105{
1108 return snmp_mib_init((void __percpu **)net->sctp.sctp_statistics, 1106 net->sctp.sctp_statistics = alloc_percpu(struct sctp_mib);
1109 sizeof(struct sctp_mib), 1107 if (!net->sctp.sctp_statistics)
1110 __alignof__(struct sctp_mib)); 1108 return -ENOMEM;
1109 return 0;
1111} 1110}
1112 1111
1113static inline void cleanup_sctp_mibs(struct net *net) 1112static inline void cleanup_sctp_mibs(struct net *net)
1114{ 1113{
1115 snmp_mib_free((void __percpu **)net->sctp.sctp_statistics); 1114 free_percpu(net->sctp.sctp_statistics);
1116} 1115}
1117 1116
1118static void sctp_v4_pf_init(void) 1117static void sctp_v4_pf_init(void)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index fee5552ddf92..ae0e616a7ca5 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1782,7 +1782,7 @@ no_hmac:
1782 else 1782 else
1783 kt = ktime_get(); 1783 kt = ktime_get();
1784 1784
1785 if (!asoc && ktime_compare(bear_cookie->expiration, kt) < 0) { 1785 if (!asoc && ktime_before(bear_cookie->expiration, kt)) {
1786 /* 1786 /*
1787 * Section 3.3.10.3 Stale Cookie Error (3) 1787 * Section 3.3.10.3 Stale Cookie Error (3)
1788 * 1788 *
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index fee06b99a4da..429899689408 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -71,6 +71,7 @@
71#include <net/route.h> 71#include <net/route.h>
72#include <net/ipv6.h> 72#include <net/ipv6.h>
73#include <net/inet_common.h> 73#include <net/inet_common.h>
74#include <net/busy_poll.h>
74 75
75#include <linux/socket.h> /* for sa_family_t */ 76#include <linux/socket.h> /* for sa_family_t */
76#include <linux/export.h> 77#include <linux/export.h>
@@ -5945,8 +5946,9 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5945 /* Search for an available port. */ 5946 /* Search for an available port. */
5946 int low, high, remaining, index; 5947 int low, high, remaining, index;
5947 unsigned int rover; 5948 unsigned int rover;
5949 struct net *net = sock_net(sk);
5948 5950
5949 inet_get_local_port_range(sock_net(sk), &low, &high); 5951 inet_get_local_port_range(net, &low, &high);
5950 remaining = (high - low) + 1; 5952 remaining = (high - low) + 1;
5951 rover = prandom_u32() % remaining + low; 5953 rover = prandom_u32() % remaining + low;
5952 5954
@@ -5954,7 +5956,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5954 rover++; 5956 rover++;
5955 if ((rover < low) || (rover > high)) 5957 if ((rover < low) || (rover > high))
5956 rover = low; 5958 rover = low;
5957 if (inet_is_reserved_local_port(rover)) 5959 if (inet_is_local_reserved_port(net, rover))
5958 continue; 5960 continue;
5959 index = sctp_phashfn(sock_net(sk), rover); 5961 index = sctp_phashfn(sock_net(sk), rover);
5960 head = &sctp_port_hashtable[index]; 5962 head = &sctp_port_hashtable[index];
@@ -6557,6 +6559,10 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
6557 if (sk->sk_shutdown & RCV_SHUTDOWN) 6559 if (sk->sk_shutdown & RCV_SHUTDOWN)
6558 break; 6560 break;
6559 6561
6562 if (sk_can_busy_loop(sk) &&
6563 sk_busy_loop(sk, noblock))
6564 continue;
6565
6560 /* User doesn't want to wait. */ 6566 /* User doesn't want to wait. */
6561 error = -EAGAIN; 6567 error = -EAGAIN;
6562 if (!timeo) 6568 if (!timeo)
@@ -6940,7 +6946,8 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
6940 newsk->sk_type = sk->sk_type; 6946 newsk->sk_type = sk->sk_type;
6941 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 6947 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
6942 newsk->sk_flags = sk->sk_flags; 6948 newsk->sk_flags = sk->sk_flags;
6943 newsk->sk_no_check = sk->sk_no_check; 6949 newsk->sk_no_check_tx = sk->sk_no_check_tx;
6950 newsk->sk_no_check_rx = sk->sk_no_check_rx;
6944 newsk->sk_reuse = sk->sk_reuse; 6951 newsk->sk_reuse = sk->sk_reuse;
6945 6952
6946 newsk->sk_shutdown = sk->sk_shutdown; 6953 newsk->sk_shutdown = sk->sk_shutdown;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index c82fdc1eab7c..7e5eb7554990 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -436,20 +436,21 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
436 436
437int sctp_sysctl_net_register(struct net *net) 437int sctp_sysctl_net_register(struct net *net)
438{ 438{
439 struct ctl_table *table = sctp_net_table; 439 struct ctl_table *table;
440 440 int i;
441 if (!net_eq(net, &init_net)) {
442 int i;
443 441
444 table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); 442 table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
445 if (!table) 443 if (!table)
446 return -ENOMEM; 444 return -ENOMEM;
447 445
448 for (i = 0; table[i].data; i++) 446 for (i = 0; table[i].data; i++)
449 table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; 447 table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
450 }
451 448
452 net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); 449 net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
450 if (net->sctp.sysctl_header == NULL) {
451 kfree(table);
452 return -ENOMEM;
453 }
453 return 0; 454 return 0;
454} 455}
455 456
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 1d348d15b33d..7dd672fa651f 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -72,7 +72,7 @@ static struct sctp_transport *sctp_transport_init(struct net *net,
72 */ 72 */
73 peer->rto = msecs_to_jiffies(net->sctp.rto_initial); 73 peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
74 74
75 peer->last_time_heard = jiffies; 75 peer->last_time_heard = ktime_get();
76 peer->last_time_ecne_reduced = jiffies; 76 peer->last_time_ecne_reduced = jiffies;
77 77
78 peer->param_flags = SPP_HB_DISABLE | 78 peer->param_flags = SPP_HB_DISABLE |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 7144eb6a1b95..d49dc2ed30ad 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -38,6 +38,7 @@
38#include <linux/types.h> 38#include <linux/types.h>
39#include <linux/skbuff.h> 39#include <linux/skbuff.h>
40#include <net/sock.h> 40#include <net/sock.h>
41#include <net/busy_poll.h>
41#include <net/sctp/structs.h> 42#include <net/sctp/structs.h>
42#include <net/sctp/sctp.h> 43#include <net/sctp/sctp.h>
43#include <net/sctp/sm.h> 44#include <net/sctp/sm.h>
@@ -204,6 +205,9 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
204 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) 205 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
205 goto out_free; 206 goto out_free;
206 207
208 if (!sctp_ulpevent_is_notification(event))
209 sk_mark_napi_id(sk, skb);
210
207 /* Check if the user wishes to receive this event. */ 211 /* Check if the user wishes to receive this event. */
208 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe)) 212 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
209 goto out_free; 213 goto out_free;
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 0a648c502fc3..2df87f78e518 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -173,7 +173,8 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
173 return -1; 173 return -1;
174 if (csum_fold(desc.csum)) 174 if (csum_fold(desc.csum))
175 return -1; 175 return -1;
176 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) 176 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
177 !skb->csum_complete_sw)
177 netdev_rx_csum_fault(skb->dev); 178 netdev_rx_csum_fault(skb->dev);
178 return 0; 179 return 0;
179no_checksum: 180no_checksum:
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 402a7e9a16b7..be8bbd5d65ec 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -866,8 +866,6 @@ static void xs_reset_transport(struct sock_xprt *transport)
866 xs_restore_old_callbacks(transport, sk); 866 xs_restore_old_callbacks(transport, sk);
867 write_unlock_bh(&sk->sk_callback_lock); 867 write_unlock_bh(&sk->sk_callback_lock);
868 868
869 sk->sk_no_check = 0;
870
871 trace_rpc_socket_close(&transport->xprt, sock); 869 trace_rpc_socket_close(&transport->xprt, sock);
872 sock_release(sock); 870 sock_release(sock);
873} 871}
@@ -2046,7 +2044,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2046 sk->sk_user_data = xprt; 2044 sk->sk_user_data = xprt;
2047 sk->sk_data_ready = xs_udp_data_ready; 2045 sk->sk_data_ready = xs_udp_data_ready;
2048 sk->sk_write_space = xs_udp_write_space; 2046 sk->sk_write_space = xs_udp_write_space;
2049 sk->sk_no_check = UDP_CSUM_NORCV;
2050 sk->sk_allocation = GFP_ATOMIC; 2047 sk->sk_allocation = GFP_ATOMIC;
2051 2048
2052 xprt_set_connected(xprt); 2049 xprt_set_connected(xprt);
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index b282f7130d2b..a080c66d819a 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -5,7 +5,7 @@
5obj-$(CONFIG_TIPC) := tipc.o 5obj-$(CONFIG_TIPC) := tipc.o
6 6
7tipc-y += addr.o bcast.o bearer.o config.o \ 7tipc-y += addr.o bcast.o bearer.o config.o \
8 core.o handler.o link.o discover.o msg.o \ 8 core.o link.o discover.o msg.o \
9 name_distr.o subscr.o name_table.o net.o \ 9 name_distr.o subscr.o name_table.o net.o \
10 netlink.o node.o node_subscr.o port.o ref.o \ 10 netlink.o node.o node_subscr.o port.o ref.o \
11 socket.o log.o eth_media.o server.o 11 socket.o log.o eth_media.o server.o
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 95ab5ef92920..26631679a1fa 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -71,7 +71,7 @@ struct tipc_bcbearer_pair {
71 * Note: The fields labelled "temporary" are incorporated into the bearer 71 * Note: The fields labelled "temporary" are incorporated into the bearer
72 * to avoid consuming potentially limited stack space through the use of 72 * to avoid consuming potentially limited stack space through the use of
73 * large local variables within multicast routines. Concurrent access is 73 * large local variables within multicast routines. Concurrent access is
74 * prevented through use of the spinlock "bc_lock". 74 * prevented through use of the spinlock "bclink_lock".
75 */ 75 */
76struct tipc_bcbearer { 76struct tipc_bcbearer {
77 struct tipc_bearer bearer; 77 struct tipc_bearer bearer;
@@ -84,34 +84,64 @@ struct tipc_bcbearer {
84 84
85/** 85/**
86 * struct tipc_bclink - link used for broadcast messages 86 * struct tipc_bclink - link used for broadcast messages
87 * @lock: spinlock governing access to structure
87 * @link: (non-standard) broadcast link structure 88 * @link: (non-standard) broadcast link structure
88 * @node: (non-standard) node structure representing b'cast link's peer node 89 * @node: (non-standard) node structure representing b'cast link's peer node
90 * @flags: represent bclink states
89 * @bcast_nodes: map of broadcast-capable nodes 91 * @bcast_nodes: map of broadcast-capable nodes
90 * @retransmit_to: node that most recently requested a retransmit 92 * @retransmit_to: node that most recently requested a retransmit
91 * 93 *
92 * Handles sequence numbering, fragmentation, bundling, etc. 94 * Handles sequence numbering, fragmentation, bundling, etc.
93 */ 95 */
94struct tipc_bclink { 96struct tipc_bclink {
97 spinlock_t lock;
95 struct tipc_link link; 98 struct tipc_link link;
96 struct tipc_node node; 99 struct tipc_node node;
100 unsigned int flags;
97 struct tipc_node_map bcast_nodes; 101 struct tipc_node_map bcast_nodes;
98 struct tipc_node *retransmit_to; 102 struct tipc_node *retransmit_to;
99}; 103};
100 104
101static struct tipc_bcbearer bcast_bearer; 105static struct tipc_bcbearer *bcbearer;
102static struct tipc_bclink bcast_link; 106static struct tipc_bclink *bclink;
103 107static struct tipc_link *bcl;
104static struct tipc_bcbearer *bcbearer = &bcast_bearer;
105static struct tipc_bclink *bclink = &bcast_link;
106static struct tipc_link *bcl = &bcast_link.link;
107
108static DEFINE_SPINLOCK(bc_lock);
109 108
110const char tipc_bclink_name[] = "broadcast-link"; 109const char tipc_bclink_name[] = "broadcast-link";
111 110
112static void tipc_nmap_diff(struct tipc_node_map *nm_a, 111static void tipc_nmap_diff(struct tipc_node_map *nm_a,
113 struct tipc_node_map *nm_b, 112 struct tipc_node_map *nm_b,
114 struct tipc_node_map *nm_diff); 113 struct tipc_node_map *nm_diff);
114static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
115static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
116
117static void tipc_bclink_lock(void)
118{
119 spin_lock_bh(&bclink->lock);
120}
121
122static void tipc_bclink_unlock(void)
123{
124 struct tipc_node *node = NULL;
125
126 if (likely(!bclink->flags)) {
127 spin_unlock_bh(&bclink->lock);
128 return;
129 }
130
131 if (bclink->flags & TIPC_BCLINK_RESET) {
132 bclink->flags &= ~TIPC_BCLINK_RESET;
133 node = tipc_bclink_retransmit_to();
134 }
135 spin_unlock_bh(&bclink->lock);
136
137 if (node)
138 tipc_link_reset_all(node);
139}
140
141void tipc_bclink_set_flags(unsigned int flags)
142{
143 bclink->flags |= flags;
144}
115 145
116static u32 bcbuf_acks(struct sk_buff *buf) 146static u32 bcbuf_acks(struct sk_buff *buf)
117{ 147{
@@ -130,16 +160,16 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
130 160
131void tipc_bclink_add_node(u32 addr) 161void tipc_bclink_add_node(u32 addr)
132{ 162{
133 spin_lock_bh(&bc_lock); 163 tipc_bclink_lock();
134 tipc_nmap_add(&bclink->bcast_nodes, addr); 164 tipc_nmap_add(&bclink->bcast_nodes, addr);
135 spin_unlock_bh(&bc_lock); 165 tipc_bclink_unlock();
136} 166}
137 167
138void tipc_bclink_remove_node(u32 addr) 168void tipc_bclink_remove_node(u32 addr)
139{ 169{
140 spin_lock_bh(&bc_lock); 170 tipc_bclink_lock();
141 tipc_nmap_remove(&bclink->bcast_nodes, addr); 171 tipc_nmap_remove(&bclink->bcast_nodes, addr);
142 spin_unlock_bh(&bc_lock); 172 tipc_bclink_unlock();
143} 173}
144 174
145static void bclink_set_last_sent(void) 175static void bclink_set_last_sent(void)
@@ -165,7 +195,7 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
165/** 195/**
166 * tipc_bclink_retransmit_to - get most recent node to request retransmission 196 * tipc_bclink_retransmit_to - get most recent node to request retransmission
167 * 197 *
168 * Called with bc_lock locked 198 * Called with bclink_lock locked
169 */ 199 */
170struct tipc_node *tipc_bclink_retransmit_to(void) 200struct tipc_node *tipc_bclink_retransmit_to(void)
171{ 201{
@@ -177,7 +207,7 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
177 * @after: sequence number of last packet to *not* retransmit 207 * @after: sequence number of last packet to *not* retransmit
178 * @to: sequence number of last packet to retransmit 208 * @to: sequence number of last packet to retransmit
179 * 209 *
180 * Called with bc_lock locked 210 * Called with bclink_lock locked
181 */ 211 */
182static void bclink_retransmit_pkt(u32 after, u32 to) 212static void bclink_retransmit_pkt(u32 after, u32 to)
183{ 213{
@@ -194,7 +224,7 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
194 * @n_ptr: node that sent acknowledgement info 224 * @n_ptr: node that sent acknowledgement info
195 * @acked: broadcast sequence # that has been acknowledged 225 * @acked: broadcast sequence # that has been acknowledged
196 * 226 *
197 * Node is locked, bc_lock unlocked. 227 * Node is locked, bclink_lock unlocked.
198 */ 228 */
199void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) 229void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
200{ 230{
@@ -202,8 +232,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
202 struct sk_buff *next; 232 struct sk_buff *next;
203 unsigned int released = 0; 233 unsigned int released = 0;
204 234
205 spin_lock_bh(&bc_lock); 235 tipc_bclink_lock();
206
207 /* Bail out if tx queue is empty (no clean up is required) */ 236 /* Bail out if tx queue is empty (no clean up is required) */
208 crs = bcl->first_out; 237 crs = bcl->first_out;
209 if (!crs) 238 if (!crs)
@@ -267,13 +296,13 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
267 if (unlikely(released && !list_empty(&bcl->waiting_ports))) 296 if (unlikely(released && !list_empty(&bcl->waiting_ports)))
268 tipc_link_wakeup_ports(bcl, 0); 297 tipc_link_wakeup_ports(bcl, 0);
269exit: 298exit:
270 spin_unlock_bh(&bc_lock); 299 tipc_bclink_unlock();
271} 300}
272 301
273/** 302/**
274 * tipc_bclink_update_link_state - update broadcast link state 303 * tipc_bclink_update_link_state - update broadcast link state
275 * 304 *
276 * tipc_net_lock and node lock set 305 * RCU and node lock set
277 */ 306 */
278void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) 307void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
279{ 308{
@@ -320,10 +349,10 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
320 ? buf_seqno(n_ptr->bclink.deferred_head) - 1 349 ? buf_seqno(n_ptr->bclink.deferred_head) - 1
321 : n_ptr->bclink.last_sent); 350 : n_ptr->bclink.last_sent);
322 351
323 spin_lock_bh(&bc_lock); 352 tipc_bclink_lock();
324 tipc_bearer_send(&bcbearer->bearer, buf, NULL); 353 tipc_bearer_send(MAX_BEARERS, buf, NULL);
325 bcl->stats.sent_nacks++; 354 bcl->stats.sent_nacks++;
326 spin_unlock_bh(&bc_lock); 355 tipc_bclink_unlock();
327 kfree_skb(buf); 356 kfree_skb(buf);
328 357
329 n_ptr->bclink.oos_state++; 358 n_ptr->bclink.oos_state++;
@@ -335,8 +364,6 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
335 * 364 *
336 * Delay any upcoming NACK by this node if another node has already 365 * Delay any upcoming NACK by this node if another node has already
337 * requested the first message this node is going to ask for. 366 * requested the first message this node is going to ask for.
338 *
339 * Only tipc_net_lock set.
340 */ 367 */
341static void bclink_peek_nack(struct tipc_msg *msg) 368static void bclink_peek_nack(struct tipc_msg *msg)
342{ 369{
@@ -362,7 +389,7 @@ int tipc_bclink_xmit(struct sk_buff *buf)
362{ 389{
363 int res; 390 int res;
364 391
365 spin_lock_bh(&bc_lock); 392 tipc_bclink_lock();
366 393
367 if (!bclink->bcast_nodes.count) { 394 if (!bclink->bcast_nodes.count) {
368 res = msg_data_sz(buf_msg(buf)); 395 res = msg_data_sz(buf_msg(buf));
@@ -377,14 +404,14 @@ int tipc_bclink_xmit(struct sk_buff *buf)
377 bcl->stats.accu_queue_sz += bcl->out_queue_size; 404 bcl->stats.accu_queue_sz += bcl->out_queue_size;
378 } 405 }
379exit: 406exit:
380 spin_unlock_bh(&bc_lock); 407 tipc_bclink_unlock();
381 return res; 408 return res;
382} 409}
383 410
384/** 411/**
385 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet 412 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
386 * 413 *
387 * Called with both sending node's lock and bc_lock taken. 414 * Called with both sending node's lock and bclink_lock taken.
388 */ 415 */
389static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) 416static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
390{ 417{
@@ -408,7 +435,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
408/** 435/**
409 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards 436 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
410 * 437 *
411 * tipc_net_lock is read_locked, no other locks set 438 * RCU is locked, no other locks set
412 */ 439 */
413void tipc_bclink_rcv(struct sk_buff *buf) 440void tipc_bclink_rcv(struct sk_buff *buf)
414{ 441{
@@ -439,12 +466,12 @@ void tipc_bclink_rcv(struct sk_buff *buf)
439 if (msg_destnode(msg) == tipc_own_addr) { 466 if (msg_destnode(msg) == tipc_own_addr) {
440 tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); 467 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
441 tipc_node_unlock(node); 468 tipc_node_unlock(node);
442 spin_lock_bh(&bc_lock); 469 tipc_bclink_lock();
443 bcl->stats.recv_nacks++; 470 bcl->stats.recv_nacks++;
444 bclink->retransmit_to = node; 471 bclink->retransmit_to = node;
445 bclink_retransmit_pkt(msg_bcgap_after(msg), 472 bclink_retransmit_pkt(msg_bcgap_after(msg),
446 msg_bcgap_to(msg)); 473 msg_bcgap_to(msg));
447 spin_unlock_bh(&bc_lock); 474 tipc_bclink_unlock();
448 } else { 475 } else {
449 tipc_node_unlock(node); 476 tipc_node_unlock(node);
450 bclink_peek_nack(msg); 477 bclink_peek_nack(msg);
@@ -462,51 +489,47 @@ receive:
462 /* Deliver message to destination */ 489 /* Deliver message to destination */
463 490
464 if (likely(msg_isdata(msg))) { 491 if (likely(msg_isdata(msg))) {
465 spin_lock_bh(&bc_lock); 492 tipc_bclink_lock();
466 bclink_accept_pkt(node, seqno); 493 bclink_accept_pkt(node, seqno);
467 spin_unlock_bh(&bc_lock); 494 tipc_bclink_unlock();
468 tipc_node_unlock(node); 495 tipc_node_unlock(node);
469 if (likely(msg_mcast(msg))) 496 if (likely(msg_mcast(msg)))
470 tipc_port_mcast_rcv(buf, NULL); 497 tipc_port_mcast_rcv(buf, NULL);
471 else 498 else
472 kfree_skb(buf); 499 kfree_skb(buf);
473 } else if (msg_user(msg) == MSG_BUNDLER) { 500 } else if (msg_user(msg) == MSG_BUNDLER) {
474 spin_lock_bh(&bc_lock); 501 tipc_bclink_lock();
475 bclink_accept_pkt(node, seqno); 502 bclink_accept_pkt(node, seqno);
476 bcl->stats.recv_bundles++; 503 bcl->stats.recv_bundles++;
477 bcl->stats.recv_bundled += msg_msgcnt(msg); 504 bcl->stats.recv_bundled += msg_msgcnt(msg);
478 spin_unlock_bh(&bc_lock); 505 tipc_bclink_unlock();
479 tipc_node_unlock(node); 506 tipc_node_unlock(node);
480 tipc_link_bundle_rcv(buf); 507 tipc_link_bundle_rcv(buf);
481 } else if (msg_user(msg) == MSG_FRAGMENTER) { 508 } else if (msg_user(msg) == MSG_FRAGMENTER) {
482 int ret; 509 tipc_buf_append(&node->bclink.reasm_buf, &buf);
483 ret = tipc_link_frag_rcv(&node->bclink.reasm_head, 510 if (unlikely(!buf && !node->bclink.reasm_buf))
484 &node->bclink.reasm_tail,
485 &buf);
486 if (ret == LINK_REASM_ERROR)
487 goto unlock; 511 goto unlock;
488 spin_lock_bh(&bc_lock); 512 tipc_bclink_lock();
489 bclink_accept_pkt(node, seqno); 513 bclink_accept_pkt(node, seqno);
490 bcl->stats.recv_fragments++; 514 bcl->stats.recv_fragments++;
491 if (ret == LINK_REASM_COMPLETE) { 515 if (buf) {
492 bcl->stats.recv_fragmented++; 516 bcl->stats.recv_fragmented++;
493 /* Point msg to inner header */
494 msg = buf_msg(buf); 517 msg = buf_msg(buf);
495 spin_unlock_bh(&bc_lock); 518 tipc_bclink_unlock();
496 goto receive; 519 goto receive;
497 } 520 }
498 spin_unlock_bh(&bc_lock); 521 tipc_bclink_unlock();
499 tipc_node_unlock(node); 522 tipc_node_unlock(node);
500 } else if (msg_user(msg) == NAME_DISTRIBUTOR) { 523 } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
501 spin_lock_bh(&bc_lock); 524 tipc_bclink_lock();
502 bclink_accept_pkt(node, seqno); 525 bclink_accept_pkt(node, seqno);
503 spin_unlock_bh(&bc_lock); 526 tipc_bclink_unlock();
504 tipc_node_unlock(node); 527 tipc_node_unlock(node);
505 tipc_named_rcv(buf); 528 tipc_named_rcv(buf);
506 } else { 529 } else {
507 spin_lock_bh(&bc_lock); 530 tipc_bclink_lock();
508 bclink_accept_pkt(node, seqno); 531 bclink_accept_pkt(node, seqno);
509 spin_unlock_bh(&bc_lock); 532 tipc_bclink_unlock();
510 tipc_node_unlock(node); 533 tipc_node_unlock(node);
511 kfree_skb(buf); 534 kfree_skb(buf);
512 } 535 }
@@ -552,14 +575,14 @@ receive:
552 } else 575 } else
553 deferred = 0; 576 deferred = 0;
554 577
555 spin_lock_bh(&bc_lock); 578 tipc_bclink_lock();
556 579
557 if (deferred) 580 if (deferred)
558 bcl->stats.deferred_recv++; 581 bcl->stats.deferred_recv++;
559 else 582 else
560 bcl->stats.duplicates++; 583 bcl->stats.duplicates++;
561 584
562 spin_unlock_bh(&bc_lock); 585 tipc_bclink_unlock();
563 586
564unlock: 587unlock:
565 tipc_node_unlock(node); 588 tipc_node_unlock(node);
@@ -627,13 +650,13 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
627 650
628 if (bp_index == 0) { 651 if (bp_index == 0) {
629 /* Use original buffer for first bearer */ 652 /* Use original buffer for first bearer */
630 tipc_bearer_send(b, buf, &b->bcast_addr); 653 tipc_bearer_send(b->identity, buf, &b->bcast_addr);
631 } else { 654 } else {
632 /* Avoid concurrent buffer access */ 655 /* Avoid concurrent buffer access */
633 tbuf = pskb_copy(buf, GFP_ATOMIC); 656 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
634 if (!tbuf) 657 if (!tbuf)
635 break; 658 break;
636 tipc_bearer_send(b, tbuf, &b->bcast_addr); 659 tipc_bearer_send(b->identity, tbuf, &b->bcast_addr);
637 kfree_skb(tbuf); /* Bearer keeps a clone */ 660 kfree_skb(tbuf); /* Bearer keeps a clone */
638 } 661 }
639 662
@@ -655,20 +678,27 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
655/** 678/**
656 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer 679 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
657 */ 680 */
658void tipc_bcbearer_sort(void) 681void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
659{ 682{
660 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; 683 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
661 struct tipc_bcbearer_pair *bp_curr; 684 struct tipc_bcbearer_pair *bp_curr;
685 struct tipc_bearer *b;
662 int b_index; 686 int b_index;
663 int pri; 687 int pri;
664 688
665 spin_lock_bh(&bc_lock); 689 tipc_bclink_lock();
690
691 if (action)
692 tipc_nmap_add(nm_ptr, node);
693 else
694 tipc_nmap_remove(nm_ptr, node);
666 695
667 /* Group bearers by priority (can assume max of two per priority) */ 696 /* Group bearers by priority (can assume max of two per priority) */
668 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); 697 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
669 698
699 rcu_read_lock();
670 for (b_index = 0; b_index < MAX_BEARERS; b_index++) { 700 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
671 struct tipc_bearer *b = bearer_list[b_index]; 701 b = rcu_dereference_rtnl(bearer_list[b_index]);
672 if (!b || !b->nodes.count) 702 if (!b || !b->nodes.count)
673 continue; 703 continue;
674 704
@@ -677,6 +707,7 @@ void tipc_bcbearer_sort(void)
677 else 707 else
678 bp_temp[b->priority].secondary = b; 708 bp_temp[b->priority].secondary = b;
679 } 709 }
710 rcu_read_unlock();
680 711
681 /* Create array of bearer pairs for broadcasting */ 712 /* Create array of bearer pairs for broadcasting */
682 bp_curr = bcbearer->bpairs; 713 bp_curr = bcbearer->bpairs;
@@ -702,7 +733,7 @@ void tipc_bcbearer_sort(void)
702 bp_curr++; 733 bp_curr++;
703 } 734 }
704 735
705 spin_unlock_bh(&bc_lock); 736 tipc_bclink_unlock();
706} 737}
707 738
708 739
@@ -714,7 +745,7 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
714 if (!bcl) 745 if (!bcl)
715 return 0; 746 return 0;
716 747
717 spin_lock_bh(&bc_lock); 748 tipc_bclink_lock();
718 749
719 s = &bcl->stats; 750 s = &bcl->stats;
720 751
@@ -743,7 +774,7 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
743 s->queue_sz_counts ? 774 s->queue_sz_counts ?
744 (s->accu_queue_sz / s->queue_sz_counts) : 0); 775 (s->accu_queue_sz / s->queue_sz_counts) : 0);
745 776
746 spin_unlock_bh(&bc_lock); 777 tipc_bclink_unlock();
747 return ret; 778 return ret;
748} 779}
749 780
@@ -752,9 +783,9 @@ int tipc_bclink_reset_stats(void)
752 if (!bcl) 783 if (!bcl)
753 return -ENOPROTOOPT; 784 return -ENOPROTOOPT;
754 785
755 spin_lock_bh(&bc_lock); 786 tipc_bclink_lock();
756 memset(&bcl->stats, 0, sizeof(bcl->stats)); 787 memset(&bcl->stats, 0, sizeof(bcl->stats));
757 spin_unlock_bh(&bc_lock); 788 tipc_bclink_unlock();
758 return 0; 789 return 0;
759} 790}
760 791
@@ -765,46 +796,59 @@ int tipc_bclink_set_queue_limits(u32 limit)
765 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN)) 796 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
766 return -EINVAL; 797 return -EINVAL;
767 798
768 spin_lock_bh(&bc_lock); 799 tipc_bclink_lock();
769 tipc_link_set_queue_limits(bcl, limit); 800 tipc_link_set_queue_limits(bcl, limit);
770 spin_unlock_bh(&bc_lock); 801 tipc_bclink_unlock();
771 return 0; 802 return 0;
772} 803}
773 804
774void tipc_bclink_init(void) 805int tipc_bclink_init(void)
775{ 806{
807 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
808 if (!bcbearer)
809 return -ENOMEM;
810
811 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
812 if (!bclink) {
813 kfree(bcbearer);
814 return -ENOMEM;
815 }
816
817 bcl = &bclink->link;
776 bcbearer->bearer.media = &bcbearer->media; 818 bcbearer->bearer.media = &bcbearer->media;
777 bcbearer->media.send_msg = tipc_bcbearer_send; 819 bcbearer->media.send_msg = tipc_bcbearer_send;
778 sprintf(bcbearer->media.name, "tipc-broadcast"); 820 sprintf(bcbearer->media.name, "tipc-broadcast");
779 821
822 spin_lock_init(&bclink->lock);
780 INIT_LIST_HEAD(&bcl->waiting_ports); 823 INIT_LIST_HEAD(&bcl->waiting_ports);
781 bcl->next_out_no = 1; 824 bcl->next_out_no = 1;
782 spin_lock_init(&bclink->node.lock); 825 spin_lock_init(&bclink->node.lock);
783 bcl->owner = &bclink->node; 826 bcl->owner = &bclink->node;
784 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 827 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
785 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 828 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
786 bcl->b_ptr = &bcbearer->bearer; 829 bcl->bearer_id = MAX_BEARERS;
787 bearer_list[BCBEARER] = &bcbearer->bearer; 830 rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer);
788 bcl->state = WORKING_WORKING; 831 bcl->state = WORKING_WORKING;
789 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); 832 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
833 return 0;
790} 834}
791 835
792void tipc_bclink_stop(void) 836void tipc_bclink_stop(void)
793{ 837{
794 spin_lock_bh(&bc_lock); 838 tipc_bclink_lock();
795 tipc_link_purge_queues(bcl); 839 tipc_link_purge_queues(bcl);
796 spin_unlock_bh(&bc_lock); 840 tipc_bclink_unlock();
797 841
798 bearer_list[BCBEARER] = NULL; 842 RCU_INIT_POINTER(bearer_list[BCBEARER], NULL);
799 memset(bclink, 0, sizeof(*bclink)); 843 synchronize_net();
800 memset(bcbearer, 0, sizeof(*bcbearer)); 844 kfree(bcbearer);
845 kfree(bclink);
801} 846}
802 847
803
804/** 848/**
805 * tipc_nmap_add - add a node to a node map 849 * tipc_nmap_add - add a node to a node map
806 */ 850 */
807void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) 851static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
808{ 852{
809 int n = tipc_node(node); 853 int n = tipc_node(node);
810 int w = n / WSIZE; 854 int w = n / WSIZE;
@@ -819,7 +863,7 @@ void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
819/** 863/**
820 * tipc_nmap_remove - remove a node from a node map 864 * tipc_nmap_remove - remove a node from a node map
821 */ 865 */
822void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) 866static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
823{ 867{
824 int n = tipc_node(node); 868 int n = tipc_node(node);
825 int w = n / WSIZE; 869 int w = n / WSIZE;
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index a80ef54b818e..00330c45df3e 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -39,6 +39,7 @@
39 39
40#define MAX_NODES 4096 40#define MAX_NODES 4096
41#define WSIZE 32 41#define WSIZE 32
42#define TIPC_BCLINK_RESET 1
42 43
43/** 44/**
44 * struct tipc_node_map - set of node identifiers 45 * struct tipc_node_map - set of node identifiers
@@ -69,9 +70,6 @@ struct tipc_node;
69 70
70extern const char tipc_bclink_name[]; 71extern const char tipc_bclink_name[];
71 72
72void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
73void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
74
75/** 73/**
76 * tipc_nmap_equal - test for equality of node maps 74 * tipc_nmap_equal - test for equality of node maps
77 */ 75 */
@@ -84,8 +82,9 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
84void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port); 82void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port);
85void tipc_port_list_free(struct tipc_port_list *pl_ptr); 83void tipc_port_list_free(struct tipc_port_list *pl_ptr);
86 84
87void tipc_bclink_init(void); 85int tipc_bclink_init(void);
88void tipc_bclink_stop(void); 86void tipc_bclink_stop(void);
87void tipc_bclink_set_flags(unsigned int flags);
89void tipc_bclink_add_node(u32 addr); 88void tipc_bclink_add_node(u32 addr);
90void tipc_bclink_remove_node(u32 addr); 89void tipc_bclink_remove_node(u32 addr);
91struct tipc_node *tipc_bclink_retransmit_to(void); 90struct tipc_node *tipc_bclink_retransmit_to(void);
@@ -98,6 +97,6 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
98int tipc_bclink_stats(char *stats_buf, const u32 buf_size); 97int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
99int tipc_bclink_reset_stats(void); 98int tipc_bclink_reset_stats(void);
100int tipc_bclink_set_queue_limits(u32 limit); 99int tipc_bclink_set_queue_limits(u32 limit);
101void tipc_bcbearer_sort(void); 100void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
102 101
103#endif 102#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 3fef7eb776dc..264474394f9f 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -49,7 +49,7 @@ static struct tipc_media * const media_info_array[] = {
49 NULL 49 NULL
50}; 50};
51 51
52struct tipc_bearer *bearer_list[MAX_BEARERS + 1]; 52struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
53 53
54static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down); 54static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down);
55 55
@@ -178,7 +178,7 @@ struct tipc_bearer *tipc_bearer_find(const char *name)
178 u32 i; 178 u32 i;
179 179
180 for (i = 0; i < MAX_BEARERS; i++) { 180 for (i = 0; i < MAX_BEARERS; i++) {
181 b_ptr = bearer_list[i]; 181 b_ptr = rtnl_dereference(bearer_list[i]);
182 if (b_ptr && (!strcmp(b_ptr->name, name))) 182 if (b_ptr && (!strcmp(b_ptr->name, name)))
183 return b_ptr; 183 return b_ptr;
184 } 184 }
@@ -198,10 +198,9 @@ struct sk_buff *tipc_bearer_get_names(void)
198 if (!buf) 198 if (!buf)
199 return NULL; 199 return NULL;
200 200
201 read_lock_bh(&tipc_net_lock);
202 for (i = 0; media_info_array[i] != NULL; i++) { 201 for (i = 0; media_info_array[i] != NULL; i++) {
203 for (j = 0; j < MAX_BEARERS; j++) { 202 for (j = 0; j < MAX_BEARERS; j++) {
204 b = bearer_list[j]; 203 b = rtnl_dereference(bearer_list[j]);
205 if (!b) 204 if (!b)
206 continue; 205 continue;
207 if (b->media == media_info_array[i]) { 206 if (b->media == media_info_array[i]) {
@@ -211,22 +210,33 @@ struct sk_buff *tipc_bearer_get_names(void)
211 } 210 }
212 } 211 }
213 } 212 }
214 read_unlock_bh(&tipc_net_lock);
215 return buf; 213 return buf;
216} 214}
217 215
218void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest) 216void tipc_bearer_add_dest(u32 bearer_id, u32 dest)
219{ 217{
220 tipc_nmap_add(&b_ptr->nodes, dest); 218 struct tipc_bearer *b_ptr;
221 tipc_bcbearer_sort(); 219
222 tipc_disc_add_dest(b_ptr->link_req); 220 rcu_read_lock();
221 b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
222 if (b_ptr) {
223 tipc_bcbearer_sort(&b_ptr->nodes, dest, true);
224 tipc_disc_add_dest(b_ptr->link_req);
225 }
226 rcu_read_unlock();
223} 227}
224 228
225void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest) 229void tipc_bearer_remove_dest(u32 bearer_id, u32 dest)
226{ 230{
227 tipc_nmap_remove(&b_ptr->nodes, dest); 231 struct tipc_bearer *b_ptr;
228 tipc_bcbearer_sort(); 232
229 tipc_disc_remove_dest(b_ptr->link_req); 233 rcu_read_lock();
234 b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
235 if (b_ptr) {
236 tipc_bcbearer_sort(&b_ptr->nodes, dest, false);
237 tipc_disc_remove_dest(b_ptr->link_req);
238 }
239 rcu_read_unlock();
230} 240}
231 241
232/** 242/**
@@ -271,13 +281,11 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
271 return -EINVAL; 281 return -EINVAL;
272 } 282 }
273 283
274 write_lock_bh(&tipc_net_lock);
275
276 m_ptr = tipc_media_find(b_names.media_name); 284 m_ptr = tipc_media_find(b_names.media_name);
277 if (!m_ptr) { 285 if (!m_ptr) {
278 pr_warn("Bearer <%s> rejected, media <%s> not registered\n", 286 pr_warn("Bearer <%s> rejected, media <%s> not registered\n",
279 name, b_names.media_name); 287 name, b_names.media_name);
280 goto exit; 288 return -EINVAL;
281 } 289 }
282 290
283 if (priority == TIPC_MEDIA_LINK_PRI) 291 if (priority == TIPC_MEDIA_LINK_PRI)
@@ -287,7 +295,7 @@ restart:
287 bearer_id = MAX_BEARERS; 295 bearer_id = MAX_BEARERS;
288 with_this_prio = 1; 296 with_this_prio = 1;
289 for (i = MAX_BEARERS; i-- != 0; ) { 297 for (i = MAX_BEARERS; i-- != 0; ) {
290 b_ptr = bearer_list[i]; 298 b_ptr = rtnl_dereference(bearer_list[i]);
291 if (!b_ptr) { 299 if (!b_ptr) {
292 bearer_id = i; 300 bearer_id = i;
293 continue; 301 continue;
@@ -295,14 +303,14 @@ restart:
295 if (!strcmp(name, b_ptr->name)) { 303 if (!strcmp(name, b_ptr->name)) {
296 pr_warn("Bearer <%s> rejected, already enabled\n", 304 pr_warn("Bearer <%s> rejected, already enabled\n",
297 name); 305 name);
298 goto exit; 306 return -EINVAL;
299 } 307 }
300 if ((b_ptr->priority == priority) && 308 if ((b_ptr->priority == priority) &&
301 (++with_this_prio > 2)) { 309 (++with_this_prio > 2)) {
302 if (priority-- == 0) { 310 if (priority-- == 0) {
303 pr_warn("Bearer <%s> rejected, duplicate priority\n", 311 pr_warn("Bearer <%s> rejected, duplicate priority\n",
304 name); 312 name);
305 goto exit; 313 return -EINVAL;
306 } 314 }
307 pr_warn("Bearer <%s> priority adjustment required %u->%u\n", 315 pr_warn("Bearer <%s> priority adjustment required %u->%u\n",
308 name, priority + 1, priority); 316 name, priority + 1, priority);
@@ -312,21 +320,20 @@ restart:
312 if (bearer_id >= MAX_BEARERS) { 320 if (bearer_id >= MAX_BEARERS) {
313 pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n", 321 pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
314 name, MAX_BEARERS); 322 name, MAX_BEARERS);
315 goto exit; 323 return -EINVAL;
316 } 324 }
317 325
318 b_ptr = kzalloc(sizeof(*b_ptr), GFP_ATOMIC); 326 b_ptr = kzalloc(sizeof(*b_ptr), GFP_ATOMIC);
319 if (!b_ptr) { 327 if (!b_ptr)
320 res = -ENOMEM; 328 return -ENOMEM;
321 goto exit; 329
322 }
323 strcpy(b_ptr->name, name); 330 strcpy(b_ptr->name, name);
324 b_ptr->media = m_ptr; 331 b_ptr->media = m_ptr;
325 res = m_ptr->enable_media(b_ptr); 332 res = m_ptr->enable_media(b_ptr);
326 if (res) { 333 if (res) {
327 pr_warn("Bearer <%s> rejected, enable failure (%d)\n", 334 pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
328 name, -res); 335 name, -res);
329 goto exit; 336 return -EINVAL;
330 } 337 }
331 338
332 b_ptr->identity = bearer_id; 339 b_ptr->identity = bearer_id;
@@ -341,16 +348,14 @@ restart:
341 bearer_disable(b_ptr, false); 348 bearer_disable(b_ptr, false);
342 pr_warn("Bearer <%s> rejected, discovery object creation failed\n", 349 pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
343 name); 350 name);
344 goto exit; 351 return -EINVAL;
345 } 352 }
346 353
347 bearer_list[bearer_id] = b_ptr; 354 rcu_assign_pointer(bearer_list[bearer_id], b_ptr);
348 355
349 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 356 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
350 name, 357 name,
351 tipc_addr_string_fill(addr_string, disc_domain), priority); 358 tipc_addr_string_fill(addr_string, disc_domain), priority);
352exit:
353 write_unlock_bh(&tipc_net_lock);
354 return res; 359 return res;
355} 360}
356 361
@@ -359,19 +364,16 @@ exit:
359 */ 364 */
360static int tipc_reset_bearer(struct tipc_bearer *b_ptr) 365static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
361{ 366{
362 read_lock_bh(&tipc_net_lock);
363 pr_info("Resetting bearer <%s>\n", b_ptr->name); 367 pr_info("Resetting bearer <%s>\n", b_ptr->name);
364 tipc_disc_delete(b_ptr->link_req);
365 tipc_link_reset_list(b_ptr->identity); 368 tipc_link_reset_list(b_ptr->identity);
366 tipc_disc_create(b_ptr, &b_ptr->bcast_addr); 369 tipc_disc_reset(b_ptr);
367 read_unlock_bh(&tipc_net_lock);
368 return 0; 370 return 0;
369} 371}
370 372
371/** 373/**
372 * bearer_disable 374 * bearer_disable
373 * 375 *
374 * Note: This routine assumes caller holds tipc_net_lock. 376 * Note: This routine assumes caller holds RTNL lock.
375 */ 377 */
376static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down) 378static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
377{ 379{
@@ -385,12 +387,12 @@ static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
385 tipc_disc_delete(b_ptr->link_req); 387 tipc_disc_delete(b_ptr->link_req);
386 388
387 for (i = 0; i < MAX_BEARERS; i++) { 389 for (i = 0; i < MAX_BEARERS; i++) {
388 if (b_ptr == bearer_list[i]) { 390 if (b_ptr == rtnl_dereference(bearer_list[i])) {
389 bearer_list[i] = NULL; 391 RCU_INIT_POINTER(bearer_list[i], NULL);
390 break; 392 break;
391 } 393 }
392 } 394 }
393 kfree(b_ptr); 395 kfree_rcu(b_ptr, rcu);
394} 396}
395 397
396int tipc_disable_bearer(const char *name) 398int tipc_disable_bearer(const char *name)
@@ -398,7 +400,6 @@ int tipc_disable_bearer(const char *name)
398 struct tipc_bearer *b_ptr; 400 struct tipc_bearer *b_ptr;
399 int res; 401 int res;
400 402
401 write_lock_bh(&tipc_net_lock);
402 b_ptr = tipc_bearer_find(name); 403 b_ptr = tipc_bearer_find(name);
403 if (b_ptr == NULL) { 404 if (b_ptr == NULL) {
404 pr_warn("Attempt to disable unknown bearer <%s>\n", name); 405 pr_warn("Attempt to disable unknown bearer <%s>\n", name);
@@ -407,32 +408,9 @@ int tipc_disable_bearer(const char *name)
407 bearer_disable(b_ptr, false); 408 bearer_disable(b_ptr, false);
408 res = 0; 409 res = 0;
409 } 410 }
410 write_unlock_bh(&tipc_net_lock);
411 return res; 411 return res;
412} 412}
413 413
414
415/* tipc_l2_media_addr_set - initialize Ethernet media address structure
416 *
417 * Media-dependent "value" field stores MAC address in first 6 bytes
418 * and zeroes out the remaining bytes.
419 */
420void tipc_l2_media_addr_set(const struct tipc_bearer *b,
421 struct tipc_media_addr *a, char *mac)
422{
423 int len = b->media->hwaddr_len;
424
425 if (unlikely(sizeof(a->value) < len)) {
426 WARN_ONCE(1, "Media length invalid\n");
427 return;
428 }
429
430 memcpy(a->value, mac, len);
431 memset(a->value + len, 0, sizeof(a->value) - len);
432 a->media_id = b->media->type_id;
433 a->broadcast = !memcmp(mac, b->bcast_addr.value, len);
434}
435
436int tipc_enable_l2_media(struct tipc_bearer *b) 414int tipc_enable_l2_media(struct tipc_bearer *b)
437{ 415{
438 struct net_device *dev; 416 struct net_device *dev;
@@ -443,33 +421,37 @@ int tipc_enable_l2_media(struct tipc_bearer *b)
443 if (!dev) 421 if (!dev)
444 return -ENODEV; 422 return -ENODEV;
445 423
446 /* Associate TIPC bearer with Ethernet bearer */ 424 /* Associate TIPC bearer with L2 bearer */
447 b->media_ptr = dev; 425 rcu_assign_pointer(b->media_ptr, dev);
448 memset(b->bcast_addr.value, 0, sizeof(b->bcast_addr.value)); 426 memset(&b->bcast_addr, 0, sizeof(b->bcast_addr));
449 memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len); 427 memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len);
450 b->bcast_addr.media_id = b->media->type_id; 428 b->bcast_addr.media_id = b->media->type_id;
451 b->bcast_addr.broadcast = 1; 429 b->bcast_addr.broadcast = 1;
452 b->mtu = dev->mtu; 430 b->mtu = dev->mtu;
453 tipc_l2_media_addr_set(b, &b->addr, (char *)dev->dev_addr); 431 b->media->raw2addr(b, &b->addr, (char *)dev->dev_addr);
454 rcu_assign_pointer(dev->tipc_ptr, b); 432 rcu_assign_pointer(dev->tipc_ptr, b);
455 return 0; 433 return 0;
456} 434}
457 435
458/* tipc_disable_l2_media - detach TIPC bearer from an Ethernet interface 436/* tipc_disable_l2_media - detach TIPC bearer from an L2 interface
459 * 437 *
460 * Mark Ethernet bearer as inactive so that incoming buffers are thrown away, 438 * Mark L2 bearer as inactive so that incoming buffers are thrown away,
461 * then get worker thread to complete bearer cleanup. (Can't do cleanup 439 * then get worker thread to complete bearer cleanup. (Can't do cleanup
462 * here because cleanup code needs to sleep and caller holds spinlocks.) 440 * here because cleanup code needs to sleep and caller holds spinlocks.)
463 */ 441 */
464void tipc_disable_l2_media(struct tipc_bearer *b) 442void tipc_disable_l2_media(struct tipc_bearer *b)
465{ 443{
466 struct net_device *dev = (struct net_device *)b->media_ptr; 444 struct net_device *dev;
445
446 dev = (struct net_device *)rtnl_dereference(b->media_ptr);
447 RCU_INIT_POINTER(b->media_ptr, NULL);
467 RCU_INIT_POINTER(dev->tipc_ptr, NULL); 448 RCU_INIT_POINTER(dev->tipc_ptr, NULL);
449 synchronize_net();
468 dev_put(dev); 450 dev_put(dev);
469} 451}
470 452
471/** 453/**
472 * tipc_l2_send_msg - send a TIPC packet out over an Ethernet interface 454 * tipc_l2_send_msg - send a TIPC packet out over an L2 interface
473 * @buf: the packet to be sent 455 * @buf: the packet to be sent
474 * @b_ptr: the bearer through which the packet is to be sent 456 * @b_ptr: the bearer through which the packet is to be sent
475 * @dest: peer destination address 457 * @dest: peer destination address
@@ -478,8 +460,12 @@ int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
478 struct tipc_media_addr *dest) 460 struct tipc_media_addr *dest)
479{ 461{
480 struct sk_buff *clone; 462 struct sk_buff *clone;
463 struct net_device *dev;
481 int delta; 464 int delta;
482 struct net_device *dev = (struct net_device *)b->media_ptr; 465
466 dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
467 if (!dev)
468 return 0;
483 469
484 clone = skb_clone(buf, GFP_ATOMIC); 470 clone = skb_clone(buf, GFP_ATOMIC);
485 if (!clone) 471 if (!clone)
@@ -507,10 +493,16 @@ int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
507 * The media send routine must not alter the buffer being passed in 493 * The media send routine must not alter the buffer being passed in
508 * as it may be needed for later retransmission! 494 * as it may be needed for later retransmission!
509 */ 495 */
510void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf, 496void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
511 struct tipc_media_addr *dest) 497 struct tipc_media_addr *dest)
512{ 498{
513 b->media->send_msg(buf, b, dest); 499 struct tipc_bearer *b_ptr;
500
501 rcu_read_lock();
502 b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
503 if (likely(b_ptr))
504 b_ptr->media->send_msg(buf, b_ptr, dest);
505 rcu_read_unlock();
514} 506}
515 507
516/** 508/**
@@ -535,7 +527,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
535 } 527 }
536 528
537 rcu_read_lock(); 529 rcu_read_lock();
538 b_ptr = rcu_dereference(dev->tipc_ptr); 530 b_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
539 if (likely(b_ptr)) { 531 if (likely(b_ptr)) {
540 if (likely(buf->pkt_type <= PACKET_BROADCAST)) { 532 if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
541 buf->next = NULL; 533 buf->next = NULL;
@@ -568,12 +560,9 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
568 if (!net_eq(dev_net(dev), &init_net)) 560 if (!net_eq(dev_net(dev), &init_net))
569 return NOTIFY_DONE; 561 return NOTIFY_DONE;
570 562
571 rcu_read_lock(); 563 b_ptr = rtnl_dereference(dev->tipc_ptr);
572 b_ptr = rcu_dereference(dev->tipc_ptr); 564 if (!b_ptr)
573 if (!b_ptr) {
574 rcu_read_unlock();
575 return NOTIFY_DONE; 565 return NOTIFY_DONE;
576 }
577 566
578 b_ptr->mtu = dev->mtu; 567 b_ptr->mtu = dev->mtu;
579 568
@@ -586,17 +575,15 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
586 tipc_reset_bearer(b_ptr); 575 tipc_reset_bearer(b_ptr);
587 break; 576 break;
588 case NETDEV_CHANGEADDR: 577 case NETDEV_CHANGEADDR:
589 tipc_l2_media_addr_set(b_ptr, &b_ptr->addr, 578 b_ptr->media->raw2addr(b_ptr, &b_ptr->addr,
590 (char *)dev->dev_addr); 579 (char *)dev->dev_addr);
591 tipc_reset_bearer(b_ptr); 580 tipc_reset_bearer(b_ptr);
592 break; 581 break;
593 case NETDEV_UNREGISTER: 582 case NETDEV_UNREGISTER:
594 case NETDEV_CHANGENAME: 583 case NETDEV_CHANGENAME:
595 tipc_disable_bearer(b_ptr->name); 584 bearer_disable(b_ptr, false);
596 break; 585 break;
597 } 586 }
598 rcu_read_unlock();
599
600 return NOTIFY_OK; 587 return NOTIFY_OK;
601} 588}
602 589
@@ -633,7 +620,7 @@ void tipc_bearer_stop(void)
633 u32 i; 620 u32 i;
634 621
635 for (i = 0; i < MAX_BEARERS; i++) { 622 for (i = 0; i < MAX_BEARERS; i++) {
636 b_ptr = bearer_list[i]; 623 b_ptr = rtnl_dereference(bearer_list[i]);
637 if (b_ptr) { 624 if (b_ptr) {
638 bearer_disable(b_ptr, true); 625 bearer_disable(b_ptr, true);
639 bearer_list[i] = NULL; 626 bearer_list[i] = NULL;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index ba48145e871d..78fccc49de23 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -42,14 +42,12 @@
42#define MAX_BEARERS 2 42#define MAX_BEARERS 2
43#define MAX_MEDIA 2 43#define MAX_MEDIA 2
44 44
45/* 45/* Identifiers associated with TIPC message header media address info
46 * Identifiers associated with TIPC message header media address info 46 * - address info field is 32 bytes long
47 * 47 * - the field's actual content and length is defined per media
48 * - address info field is 20 bytes long 48 * - remaining unused bytes in the field are set to zero
49 * - media type identifier located at offset 3
50 * - remaining bytes vary according to media type
51 */ 49 */
52#define TIPC_MEDIA_ADDR_SIZE 20 50#define TIPC_MEDIA_ADDR_SIZE 32
53#define TIPC_MEDIA_TYPE_OFFSET 3 51#define TIPC_MEDIA_TYPE_OFFSET 3
54 52
55/* 53/*
@@ -77,9 +75,10 @@ struct tipc_bearer;
77 * @send_msg: routine which handles buffer transmission 75 * @send_msg: routine which handles buffer transmission
78 * @enable_media: routine which enables a media 76 * @enable_media: routine which enables a media
79 * @disable_media: routine which disables a media 77 * @disable_media: routine which disables a media
80 * @addr2str: routine which converts media address to string 78 * @addr2str: convert media address format to string
81 * @addr2msg: routine which converts media address to protocol message area 79 * @addr2msg: convert from media addr format to discovery msg addr format
82 * @msg2addr: routine which converts media address from protocol message area 80 * @msg2addr: convert from discovery msg addr format to media addr format
81 * @raw2addr: convert from raw addr format to media addr format
83 * @priority: default link (and bearer) priority 82 * @priority: default link (and bearer) priority
84 * @tolerance: default time (in ms) before declaring link failure 83 * @tolerance: default time (in ms) before declaring link failure
85 * @window: default window (in packets) before declaring link congestion 84 * @window: default window (in packets) before declaring link congestion
@@ -93,10 +92,16 @@ struct tipc_media {
93 struct tipc_media_addr *dest); 92 struct tipc_media_addr *dest);
94 int (*enable_media)(struct tipc_bearer *b_ptr); 93 int (*enable_media)(struct tipc_bearer *b_ptr);
95 void (*disable_media)(struct tipc_bearer *b_ptr); 94 void (*disable_media)(struct tipc_bearer *b_ptr);
96 int (*addr2str)(struct tipc_media_addr *a, char *str_buf, int str_size); 95 int (*addr2str)(struct tipc_media_addr *addr,
97 int (*addr2msg)(struct tipc_media_addr *a, char *msg_area); 96 char *strbuf,
98 int (*msg2addr)(const struct tipc_bearer *b_ptr, 97 int bufsz);
99 struct tipc_media_addr *a, char *msg_area); 98 int (*addr2msg)(char *msg, struct tipc_media_addr *addr);
99 int (*msg2addr)(struct tipc_bearer *b,
100 struct tipc_media_addr *addr,
101 char *msg);
102 int (*raw2addr)(struct tipc_bearer *b,
103 struct tipc_media_addr *addr,
104 char *raw);
100 u32 priority; 105 u32 priority;
101 u32 tolerance; 106 u32 tolerance;
102 u32 window; 107 u32 window;
@@ -113,6 +118,7 @@ struct tipc_media {
113 * @name: bearer name (format = media:interface) 118 * @name: bearer name (format = media:interface)
114 * @media: ptr to media structure associated with bearer 119 * @media: ptr to media structure associated with bearer
115 * @bcast_addr: media address used in broadcasting 120 * @bcast_addr: media address used in broadcasting
121 * @rcu: rcu struct for tipc_bearer
116 * @priority: default link priority for bearer 122 * @priority: default link priority for bearer
117 * @window: default window size for bearer 123 * @window: default window size for bearer
118 * @tolerance: default link tolerance for bearer 124 * @tolerance: default link tolerance for bearer
@@ -127,12 +133,13 @@ struct tipc_media {
127 * care of initializing all other fields. 133 * care of initializing all other fields.
128 */ 134 */
129struct tipc_bearer { 135struct tipc_bearer {
130 void *media_ptr; /* initalized by media */ 136 void __rcu *media_ptr; /* initalized by media */
131 u32 mtu; /* initalized by media */ 137 u32 mtu; /* initalized by media */
132 struct tipc_media_addr addr; /* initalized by media */ 138 struct tipc_media_addr addr; /* initalized by media */
133 char name[TIPC_MAX_BEARER_NAME]; 139 char name[TIPC_MAX_BEARER_NAME];
134 struct tipc_media *media; 140 struct tipc_media *media;
135 struct tipc_media_addr bcast_addr; 141 struct tipc_media_addr bcast_addr;
142 struct rcu_head rcu;
136 u32 priority; 143 u32 priority;
137 u32 window; 144 u32 window;
138 u32 tolerance; 145 u32 tolerance;
@@ -150,7 +157,7 @@ struct tipc_bearer_names {
150 157
151struct tipc_link; 158struct tipc_link;
152 159
153extern struct tipc_bearer *bearer_list[]; 160extern struct tipc_bearer __rcu *bearer_list[];
154 161
155/* 162/*
156 * TIPC routines available to supported media types 163 * TIPC routines available to supported media types
@@ -173,22 +180,20 @@ int tipc_media_set_priority(const char *name, u32 new_value);
173int tipc_media_set_window(const char *name, u32 new_value); 180int tipc_media_set_window(const char *name, u32 new_value);
174void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a); 181void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
175struct sk_buff *tipc_media_get_names(void); 182struct sk_buff *tipc_media_get_names(void);
176void tipc_l2_media_addr_set(const struct tipc_bearer *b,
177 struct tipc_media_addr *a, char *mac);
178int tipc_enable_l2_media(struct tipc_bearer *b); 183int tipc_enable_l2_media(struct tipc_bearer *b);
179void tipc_disable_l2_media(struct tipc_bearer *b); 184void tipc_disable_l2_media(struct tipc_bearer *b);
180int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b, 185int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
181 struct tipc_media_addr *dest); 186 struct tipc_media_addr *dest);
182 187
183struct sk_buff *tipc_bearer_get_names(void); 188struct sk_buff *tipc_bearer_get_names(void);
184void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest); 189void tipc_bearer_add_dest(u32 bearer_id, u32 dest);
185void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest); 190void tipc_bearer_remove_dest(u32 bearer_id, u32 dest);
186struct tipc_bearer *tipc_bearer_find(const char *name); 191struct tipc_bearer *tipc_bearer_find(const char *name);
187struct tipc_media *tipc_media_find(const char *name); 192struct tipc_media *tipc_media_find(const char *name);
188int tipc_bearer_setup(void); 193int tipc_bearer_setup(void);
189void tipc_bearer_cleanup(void); 194void tipc_bearer_cleanup(void);
190void tipc_bearer_stop(void); 195void tipc_bearer_stop(void);
191void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf, 196void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
192 struct tipc_media_addr *dest); 197 struct tipc_media_addr *dest);
193 198
194#endif /* _TIPC_BEARER_H */ 199#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 4b981c053823..2b42403ad33a 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -42,8 +42,6 @@
42 42
43#define REPLY_TRUNCATED "<truncated>\n" 43#define REPLY_TRUNCATED "<truncated>\n"
44 44
45static DEFINE_MUTEX(config_mutex);
46
47static const void *req_tlv_area; /* request message TLV area */ 45static const void *req_tlv_area; /* request message TLV area */
48static int req_tlv_space; /* request message TLV area size */ 46static int req_tlv_space; /* request message TLV area size */
49static int rep_headroom; /* reply message headroom to use */ 47static int rep_headroom; /* reply message headroom to use */
@@ -179,8 +177,10 @@ static struct sk_buff *cfg_set_own_addr(void)
179 if (tipc_own_addr) 177 if (tipc_own_addr)
180 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 178 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
181 " (cannot change node address once assigned)"); 179 " (cannot change node address once assigned)");
182 tipc_net_start(addr); 180 if (!tipc_net_start(addr))
183 return tipc_cfg_reply_none(); 181 return tipc_cfg_reply_none();
182
183 return tipc_cfg_reply_error_string("cannot change to network mode");
184} 184}
185 185
186static struct sk_buff *cfg_set_max_ports(void) 186static struct sk_buff *cfg_set_max_ports(void)
@@ -223,7 +223,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
223{ 223{
224 struct sk_buff *rep_tlv_buf; 224 struct sk_buff *rep_tlv_buf;
225 225
226 mutex_lock(&config_mutex); 226 rtnl_lock();
227 227
228 /* Save request and reply details in a well-known location */ 228 /* Save request and reply details in a well-known location */
229 req_tlv_area = request_area; 229 req_tlv_area = request_area;
@@ -337,6 +337,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
337 337
338 /* Return reply buffer */ 338 /* Return reply buffer */
339exit: 339exit:
340 mutex_unlock(&config_mutex); 340 rtnl_unlock();
341 return rep_tlv_buf; 341 return rep_tlv_buf;
342} 342}
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 50d57429ebca..676d18015dd8 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -80,7 +80,6 @@ struct sk_buff *tipc_buf_acquire(u32 size)
80 */ 80 */
81static void tipc_core_stop(void) 81static void tipc_core_stop(void)
82{ 82{
83 tipc_handler_stop();
84 tipc_net_stop(); 83 tipc_net_stop();
85 tipc_bearer_cleanup(); 84 tipc_bearer_cleanup();
86 tipc_netlink_stop(); 85 tipc_netlink_stop();
@@ -100,10 +99,6 @@ static int tipc_core_start(void)
100 99
101 get_random_bytes(&tipc_random, sizeof(tipc_random)); 100 get_random_bytes(&tipc_random, sizeof(tipc_random));
102 101
103 err = tipc_handler_start();
104 if (err)
105 goto out_handler;
106
107 err = tipc_ref_table_init(tipc_max_ports, tipc_random); 102 err = tipc_ref_table_init(tipc_max_ports, tipc_random);
108 if (err) 103 if (err)
109 goto out_reftbl; 104 goto out_reftbl;
@@ -146,8 +141,6 @@ out_netlink:
146out_nametbl: 141out_nametbl:
147 tipc_ref_table_stop(); 142 tipc_ref_table_stop();
148out_reftbl: 143out_reftbl:
149 tipc_handler_stop();
150out_handler:
151 return err; 144 return err;
152} 145}
153 146
@@ -161,10 +154,11 @@ static int __init tipc_init(void)
161 tipc_max_ports = CONFIG_TIPC_PORTS; 154 tipc_max_ports = CONFIG_TIPC_PORTS;
162 tipc_net_id = 4711; 155 tipc_net_id = 4711;
163 156
164 sysctl_tipc_rmem[0] = CONN_OVERLOAD_LIMIT >> 4 << TIPC_LOW_IMPORTANCE; 157 sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
165 sysctl_tipc_rmem[1] = CONN_OVERLOAD_LIMIT >> 4 << 158 TIPC_LOW_IMPORTANCE;
159 sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
166 TIPC_CRITICAL_IMPORTANCE; 160 TIPC_CRITICAL_IMPORTANCE;
167 sysctl_tipc_rmem[2] = CONN_OVERLOAD_LIMIT; 161 sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
168 162
169 res = tipc_core_start(); 163 res = tipc_core_start();
170 if (res) 164 if (res)
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 8985bbcb942b..bb26ed1ee966 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -56,7 +56,8 @@
56#include <linux/list.h> 56#include <linux/list.h>
57#include <linux/slab.h> 57#include <linux/slab.h>
58#include <linux/vmalloc.h> 58#include <linux/vmalloc.h>
59 59#include <linux/rtnetlink.h>
60#include <linux/etherdevice.h>
60 61
61#define TIPC_MOD_VER "2.0.0" 62#define TIPC_MOD_VER "2.0.0"
62 63
@@ -89,8 +90,6 @@ extern int tipc_random __read_mostly;
89/* 90/*
90 * Routines available to privileged subsystems 91 * Routines available to privileged subsystems
91 */ 92 */
92int tipc_handler_start(void);
93void tipc_handler_stop(void);
94int tipc_netlink_start(void); 93int tipc_netlink_start(void);
95void tipc_netlink_stop(void); 94void tipc_netlink_stop(void);
96int tipc_socket_init(void); 95int tipc_socket_init(void);
@@ -109,12 +108,10 @@ void tipc_unregister_sysctl(void);
109#endif 108#endif
110 109
111/* 110/*
112 * TIPC timer and signal code 111 * TIPC timer code
113 */ 112 */
114typedef void (*Handler) (unsigned long); 113typedef void (*Handler) (unsigned long);
115 114
116u32 tipc_k_signal(Handler routine, unsigned long argument);
117
118/** 115/**
119 * k_init_timer - initialize a timer 116 * k_init_timer - initialize a timer
120 * @timer: pointer to timer structure 117 * @timer: pointer to timer structure
@@ -191,6 +188,7 @@ static inline void k_term_timer(struct timer_list *timer)
191struct tipc_skb_cb { 188struct tipc_skb_cb {
192 void *handle; 189 void *handle;
193 bool deferred; 190 bool deferred;
191 struct sk_buff *tail;
194}; 192};
195 193
196#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) 194#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 542fe3413dc4..aa722a42ef8b 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/discover.c 2 * net/tipc/discover.c
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, 2014, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems 5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -46,8 +46,9 @@
46 46
47/** 47/**
48 * struct tipc_link_req - information about an ongoing link setup request 48 * struct tipc_link_req - information about an ongoing link setup request
49 * @bearer: bearer issuing requests 49 * @bearer_id: identity of bearer issuing requests
50 * @dest: destination address for request messages 50 * @dest: destination address for request messages
51 * @domain: network domain to which links can be established
51 * @num_nodes: number of nodes currently discovered (i.e. with an active link) 52 * @num_nodes: number of nodes currently discovered (i.e. with an active link)
52 * @lock: spinlock for controlling access to requests 53 * @lock: spinlock for controlling access to requests
53 * @buf: request message to be (repeatedly) sent 54 * @buf: request message to be (repeatedly) sent
@@ -55,8 +56,9 @@
55 * @timer_intv: current interval between requests (in ms) 56 * @timer_intv: current interval between requests (in ms)
56 */ 57 */
57struct tipc_link_req { 58struct tipc_link_req {
58 struct tipc_bearer *bearer; 59 u32 bearer_id;
59 struct tipc_media_addr dest; 60 struct tipc_media_addr dest;
61 u32 domain;
60 int num_nodes; 62 int num_nodes;
61 spinlock_t lock; 63 spinlock_t lock;
62 struct sk_buff *buf; 64 struct sk_buff *buf;
@@ -69,22 +71,19 @@ struct tipc_link_req {
69 * @type: message type (request or response) 71 * @type: message type (request or response)
70 * @b_ptr: ptr to bearer issuing message 72 * @b_ptr: ptr to bearer issuing message
71 */ 73 */
72static struct sk_buff *tipc_disc_init_msg(u32 type, struct tipc_bearer *b_ptr) 74static void tipc_disc_init_msg(struct sk_buff *buf, u32 type,
75 struct tipc_bearer *b_ptr)
73{ 76{
74 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE);
75 struct tipc_msg *msg; 77 struct tipc_msg *msg;
76 u32 dest_domain = b_ptr->domain; 78 u32 dest_domain = b_ptr->domain;
77 79
78 if (buf) { 80 msg = buf_msg(buf);
79 msg = buf_msg(buf); 81 tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
80 tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain); 82 msg_set_non_seq(msg, 1);
81 msg_set_non_seq(msg, 1); 83 msg_set_node_sig(msg, tipc_random);
82 msg_set_node_sig(msg, tipc_random); 84 msg_set_dest_domain(msg, dest_domain);
83 msg_set_dest_domain(msg, dest_domain); 85 msg_set_bc_netid(msg, tipc_net_id);
84 msg_set_bc_netid(msg, tipc_net_id); 86 b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
85 b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg));
86 }
87 return buf;
88} 87}
89 88
90/** 89/**
@@ -107,146 +106,150 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
107} 106}
108 107
109/** 108/**
110 * tipc_disc_rcv - handle incoming link setup message (request or response) 109 * tipc_disc_rcv - handle incoming discovery message (request or response)
111 * @buf: buffer containing message 110 * @buf: buffer containing message
112 * @b_ptr: bearer that message arrived on 111 * @bearer: bearer that message arrived on
113 */ 112 */
114void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr) 113void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
115{ 114{
116 struct tipc_node *n_ptr; 115 struct tipc_node *node;
117 struct tipc_link *link; 116 struct tipc_link *link;
118 struct tipc_media_addr media_addr; 117 struct tipc_media_addr maddr;
119 struct sk_buff *rbuf; 118 struct sk_buff *rbuf;
120 struct tipc_msg *msg = buf_msg(buf); 119 struct tipc_msg *msg = buf_msg(buf);
121 u32 dest = msg_dest_domain(msg); 120 u32 ddom = msg_dest_domain(msg);
122 u32 orig = msg_prevnode(msg); 121 u32 onode = msg_prevnode(msg);
123 u32 net_id = msg_bc_netid(msg); 122 u32 net_id = msg_bc_netid(msg);
124 u32 type = msg_type(msg); 123 u32 mtyp = msg_type(msg);
125 u32 signature = msg_node_sig(msg); 124 u32 signature = msg_node_sig(msg);
126 int addr_mismatch; 125 bool addr_match = false;
127 int link_fully_up; 126 bool sign_match = false;
128 127 bool link_up = false;
129 media_addr.broadcast = 1; 128 bool accept_addr = false;
130 b_ptr->media->msg2addr(b_ptr, &media_addr, msg_media_addr(msg)); 129 bool accept_sign = false;
130 bool respond = false;
131
132 bearer->media->msg2addr(bearer, &maddr, msg_media_addr(msg));
131 kfree_skb(buf); 133 kfree_skb(buf);
132 134
133 /* Ensure message from node is valid and communication is permitted */ 135 /* Ensure message from node is valid and communication is permitted */
134 if (net_id != tipc_net_id) 136 if (net_id != tipc_net_id)
135 return; 137 return;
136 if (media_addr.broadcast) 138 if (maddr.broadcast)
137 return; 139 return;
138 if (!tipc_addr_domain_valid(dest)) 140 if (!tipc_addr_domain_valid(ddom))
139 return; 141 return;
140 if (!tipc_addr_node_valid(orig)) 142 if (!tipc_addr_node_valid(onode))
141 return; 143 return;
142 if (orig == tipc_own_addr) { 144
143 if (memcmp(&media_addr, &b_ptr->addr, sizeof(media_addr))) 145 if (in_own_node(onode)) {
144 disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr); 146 if (memcmp(&maddr, &bearer->addr, sizeof(maddr)))
147 disc_dupl_alert(bearer, tipc_own_addr, &maddr);
145 return; 148 return;
146 } 149 }
147 if (!tipc_in_scope(dest, tipc_own_addr)) 150 if (!tipc_in_scope(ddom, tipc_own_addr))
148 return; 151 return;
149 if (!tipc_in_scope(b_ptr->domain, orig)) 152 if (!tipc_in_scope(bearer->domain, onode))
150 return; 153 return;
151 154
152 /* Locate structure corresponding to requesting node */ 155 /* Locate, or if necessary, create, node: */
153 n_ptr = tipc_node_find(orig); 156 node = tipc_node_find(onode);
154 if (!n_ptr) { 157 if (!node)
155 n_ptr = tipc_node_create(orig); 158 node = tipc_node_create(onode);
156 if (!n_ptr) 159 if (!node)
157 return; 160 return;
158 }
159 tipc_node_lock(n_ptr);
160 161
161 /* Prepare to validate requesting node's signature and media address */ 162 tipc_node_lock(node);
162 link = n_ptr->links[b_ptr->identity]; 163 link = node->links[bearer->identity];
163 addr_mismatch = (link != NULL) &&
164 memcmp(&link->media_addr, &media_addr, sizeof(media_addr));
165 164
166 /* 165 /* Prepare to validate requesting node's signature and media address */
167 * Ensure discovery message's signature is correct 166 sign_match = (signature == node->signature);
168 * 167 addr_match = link && !memcmp(&link->media_addr, &maddr, sizeof(maddr));
169 * If signature is incorrect and there is no working link to the node, 168 link_up = link && tipc_link_is_up(link);
170 * accept the new signature but invalidate all existing links to the 169
171 * node so they won't re-activate without a new discovery message. 170
172 * 171 /* These three flags give us eight permutations: */
173 * If signature is incorrect and the requested link to the node is 172
174 * working, accept the new signature. (This is an instance of delayed 173 if (sign_match && addr_match && link_up) {
175 * rediscovery, where a link endpoint was able to re-establish contact 174 /* All is fine. Do nothing. */
176 * with its peer endpoint on a node that rebooted before receiving a 175 } else if (sign_match && addr_match && !link_up) {
177 * discovery message from that node.) 176 /* Respond. The link will come up in due time */
178 * 177 respond = true;
179 * If signature is incorrect and there is a working link to the node 178 } else if (sign_match && !addr_match && link_up) {
180 * that is not the requested link, reject the request (must be from 179 /* Peer has changed i/f address without rebooting.
181 * a duplicate node). 180 * If so, the link will reset soon, and the next
182 */ 181 * discovery will be accepted. So we can ignore it.
183 if (signature != n_ptr->signature) { 182 * It may also be an cloned or malicious peer having
184 if (n_ptr->working_links == 0) { 183 * chosen the same node address and signature as an
185 struct tipc_link *curr_link; 184 * existing one.
186 int i; 185 * Ignore requests until the link goes down, if ever.
187 186 */
188 for (i = 0; i < MAX_BEARERS; i++) { 187 disc_dupl_alert(bearer, onode, &maddr);
189 curr_link = n_ptr->links[i]; 188 } else if (sign_match && !addr_match && !link_up) {
190 if (curr_link) { 189 /* Peer link has changed i/f address without rebooting.
191 memset(&curr_link->media_addr, 0, 190 * It may also be a cloned or malicious peer; we can't
192 sizeof(media_addr)); 191 * distinguish between the two.
193 tipc_link_reset(curr_link); 192 * The signature is correct, so we must accept.
194 } 193 */
195 } 194 accept_addr = true;
196 addr_mismatch = (link != NULL); 195 respond = true;
197 } else if (tipc_link_is_up(link) && !addr_mismatch) { 196 } else if (!sign_match && addr_match && link_up) {
198 /* delayed rediscovery */ 197 /* Peer node rebooted. Two possibilities:
199 } else { 198 * - Delayed re-discovery; this link endpoint has already
200 disc_dupl_alert(b_ptr, orig, &media_addr); 199 * reset and re-established contact with the peer, before
201 tipc_node_unlock(n_ptr); 200 * receiving a discovery message from that node.
202 return; 201 * (The peer happened to receive one from this node first).
203 } 202 * - The peer came back so fast that our side has not
204 n_ptr->signature = signature; 203 * discovered it yet. Probing from this side will soon
204 * reset the link, since there can be no working link
205 * endpoint at the peer end, and the link will re-establish.
206 * Accept the signature, since it comes from a known peer.
207 */
208 accept_sign = true;
209 } else if (!sign_match && addr_match && !link_up) {
210 /* The peer node has rebooted.
211 * Accept signature, since it is a known peer.
212 */
213 accept_sign = true;
214 respond = true;
215 } else if (!sign_match && !addr_match && link_up) {
216 /* Peer rebooted with new address, or a new/duplicate peer.
217 * Ignore until the link goes down, if ever.
218 */
219 disc_dupl_alert(bearer, onode, &maddr);
220 } else if (!sign_match && !addr_match && !link_up) {
221 /* Peer rebooted with new address, or it is a new peer.
222 * Accept signature and address.
223 */
224 accept_sign = true;
225 accept_addr = true;
226 respond = true;
205 } 227 }
206 228
207 /* 229 if (accept_sign)
208 * Ensure requesting node's media address is correct 230 node->signature = signature;
209 *
210 * If media address doesn't match and the link is working, reject the
211 * request (must be from a duplicate node).
212 *
213 * If media address doesn't match and the link is not working, accept
214 * the new media address and reset the link to ensure it starts up
215 * cleanly.
216 */
217 if (addr_mismatch) {
218 if (tipc_link_is_up(link)) {
219 disc_dupl_alert(b_ptr, orig, &media_addr);
220 tipc_node_unlock(n_ptr);
221 return;
222 } else {
223 memcpy(&link->media_addr, &media_addr,
224 sizeof(media_addr));
225 tipc_link_reset(link);
226 }
227 }
228 231
229 /* Create a link endpoint for this bearer, if necessary */ 232 if (accept_addr) {
230 if (!link) { 233 if (!link)
231 link = tipc_link_create(n_ptr, b_ptr, &media_addr); 234 link = tipc_link_create(node, bearer, &maddr);
232 if (!link) { 235 if (link) {
233 tipc_node_unlock(n_ptr); 236 memcpy(&link->media_addr, &maddr, sizeof(maddr));
234 return; 237 tipc_link_reset(link);
238 } else {
239 respond = false;
235 } 240 }
236 } 241 }
237 242
238 /* Accept discovery message & send response, if necessary */ 243 /* Send response, if necessary */
239 link_fully_up = link_working_working(link); 244 if (respond && (mtyp == DSC_REQ_MSG)) {
240 245 rbuf = tipc_buf_acquire(INT_H_SIZE);
241 if ((type == DSC_REQ_MSG) && !link_fully_up) {
242 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, b_ptr);
243 if (rbuf) { 246 if (rbuf) {
244 tipc_bearer_send(b_ptr, rbuf, &media_addr); 247 tipc_disc_init_msg(rbuf, DSC_RESP_MSG, bearer);
248 tipc_bearer_send(bearer->identity, rbuf, &maddr);
245 kfree_skb(rbuf); 249 kfree_skb(rbuf);
246 } 250 }
247 } 251 }
248 252 tipc_node_unlock(node);
249 tipc_node_unlock(n_ptr);
250} 253}
251 254
252/** 255/**
@@ -303,7 +306,7 @@ static void disc_timeout(struct tipc_link_req *req)
303 spin_lock_bh(&req->lock); 306 spin_lock_bh(&req->lock);
304 307
305 /* Stop searching if only desired node has been found */ 308 /* Stop searching if only desired node has been found */
306 if (tipc_node(req->bearer->domain) && req->num_nodes) { 309 if (tipc_node(req->domain) && req->num_nodes) {
307 req->timer_intv = TIPC_LINK_REQ_INACTIVE; 310 req->timer_intv = TIPC_LINK_REQ_INACTIVE;
308 goto exit; 311 goto exit;
309 } 312 }
@@ -315,7 +318,7 @@ static void disc_timeout(struct tipc_link_req *req)
315 * hold at fast polling rate if don't have any associated nodes, 318 * hold at fast polling rate if don't have any associated nodes,
316 * otherwise hold at slow polling rate 319 * otherwise hold at slow polling rate
317 */ 320 */
318 tipc_bearer_send(req->bearer, req->buf, &req->dest); 321 tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
319 322
320 323
321 req->timer_intv *= 2; 324 req->timer_intv *= 2;
@@ -347,21 +350,23 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
347 if (!req) 350 if (!req)
348 return -ENOMEM; 351 return -ENOMEM;
349 352
350 req->buf = tipc_disc_init_msg(DSC_REQ_MSG, b_ptr); 353 req->buf = tipc_buf_acquire(INT_H_SIZE);
351 if (!req->buf) { 354 if (!req->buf) {
352 kfree(req); 355 kfree(req);
353 return -ENOMSG; 356 return -ENOMEM;
354 } 357 }
355 358
359 tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
356 memcpy(&req->dest, dest, sizeof(*dest)); 360 memcpy(&req->dest, dest, sizeof(*dest));
357 req->bearer = b_ptr; 361 req->bearer_id = b_ptr->identity;
362 req->domain = b_ptr->domain;
358 req->num_nodes = 0; 363 req->num_nodes = 0;
359 req->timer_intv = TIPC_LINK_REQ_INIT; 364 req->timer_intv = TIPC_LINK_REQ_INIT;
360 spin_lock_init(&req->lock); 365 spin_lock_init(&req->lock);
361 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req); 366 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
362 k_start_timer(&req->timer, req->timer_intv); 367 k_start_timer(&req->timer, req->timer_intv);
363 b_ptr->link_req = req; 368 b_ptr->link_req = req;
364 tipc_bearer_send(req->bearer, req->buf, &req->dest); 369 tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
365 return 0; 370 return 0;
366} 371}
367 372
@@ -376,3 +381,23 @@ void tipc_disc_delete(struct tipc_link_req *req)
376 kfree_skb(req->buf); 381 kfree_skb(req->buf);
377 kfree(req); 382 kfree(req);
378} 383}
384
385/**
386 * tipc_disc_reset - reset object to send periodic link setup requests
387 * @b_ptr: ptr to bearer issuing requests
388 * @dest_domain: network domain to which links can be established
389 */
390void tipc_disc_reset(struct tipc_bearer *b_ptr)
391{
392 struct tipc_link_req *req = b_ptr->link_req;
393
394 spin_lock_bh(&req->lock);
395 tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
396 req->bearer_id = b_ptr->identity;
397 req->domain = b_ptr->domain;
398 req->num_nodes = 0;
399 req->timer_intv = TIPC_LINK_REQ_INIT;
400 k_start_timer(&req->timer, req->timer_intv);
401 tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
402 spin_unlock_bh(&req->lock);
403}
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index 07f34729459d..515b57392f4d 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -41,6 +41,7 @@ struct tipc_link_req;
41 41
42int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest); 42int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest);
43void tipc_disc_delete(struct tipc_link_req *req); 43void tipc_disc_delete(struct tipc_link_req *req);
44void tipc_disc_reset(struct tipc_bearer *b_ptr);
44void tipc_disc_add_dest(struct tipc_link_req *req); 45void tipc_disc_add_dest(struct tipc_link_req *req);
45void tipc_disc_remove_dest(struct tipc_link_req *req); 46void tipc_disc_remove_dest(struct tipc_link_req *req);
46void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr); 47void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr);
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 67cf3f935dba..5e1426f1751f 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC 2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC
3 * 3 *
4 * Copyright (c) 2001-2007, 2013, Ericsson AB 4 * Copyright (c) 2001-2007, 2013-2014, Ericsson AB
5 * Copyright (c) 2005-2008, 2011-2013, Wind River Systems 5 * Copyright (c) 2005-2008, 2011-2013, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -37,39 +37,52 @@
37#include "core.h" 37#include "core.h"
38#include "bearer.h" 38#include "bearer.h"
39 39
40#define ETH_ADDR_OFFSET 4 /* message header offset of MAC address */ 40#define ETH_ADDR_OFFSET 4 /* MAC addr position inside address field */
41 41
42/* convert Ethernet address to string */ 42/* Convert Ethernet address (media address format) to string */
43static int tipc_eth_addr2str(struct tipc_media_addr *a, char *str_buf, 43static int tipc_eth_addr2str(struct tipc_media_addr *addr,
44 int str_size) 44 char *strbuf, int bufsz)
45{ 45{
46 if (str_size < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */ 46 if (bufsz < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
47 return 1; 47 return 1;
48 48
49 sprintf(str_buf, "%pM", a->value); 49 sprintf(strbuf, "%pM", addr->value);
50 return 0; 50 return 0;
51} 51}
52 52
53/* convert Ethernet address format to message header format */ 53/* Convert from media address format to discovery message addr format */
54static int tipc_eth_addr2msg(struct tipc_media_addr *a, char *msg_area) 54static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr)
55{ 55{
56 memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE); 56 memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
57 msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH; 57 msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
58 memcpy(msg_area + ETH_ADDR_OFFSET, a->value, ETH_ALEN); 58 memcpy(msg + ETH_ADDR_OFFSET, addr->value, ETH_ALEN);
59 return 0; 59 return 0;
60} 60}
61 61
62/* convert message header address format to Ethernet format */ 62/* Convert raw mac address format to media addr format */
63static int tipc_eth_msg2addr(const struct tipc_bearer *tb_ptr, 63static int tipc_eth_raw2addr(struct tipc_bearer *b,
64 struct tipc_media_addr *a, char *msg_area) 64 struct tipc_media_addr *addr,
65 char *msg)
65{ 66{
66 if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH) 67 char bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
67 return 1;
68 68
69 tipc_l2_media_addr_set(tb_ptr, a, msg_area + ETH_ADDR_OFFSET); 69 memset(addr, 0, sizeof(*addr));
70 ether_addr_copy(addr->value, msg);
71 addr->media_id = TIPC_MEDIA_TYPE_ETH;
72 addr->broadcast = !memcmp(addr->value, bcast_mac, ETH_ALEN);
70 return 0; 73 return 0;
71} 74}
72 75
76/* Convert discovery msg addr format to Ethernet media addr format */
77static int tipc_eth_msg2addr(struct tipc_bearer *b,
78 struct tipc_media_addr *addr,
79 char *msg)
80{
81 /* Skip past preamble: */
82 msg += ETH_ADDR_OFFSET;
83 return tipc_eth_raw2addr(b, addr, msg);
84}
85
73/* Ethernet media registration info */ 86/* Ethernet media registration info */
74struct tipc_media eth_media_info = { 87struct tipc_media eth_media_info = {
75 .send_msg = tipc_l2_send_msg, 88 .send_msg = tipc_l2_send_msg,
@@ -78,6 +91,7 @@ struct tipc_media eth_media_info = {
78 .addr2str = tipc_eth_addr2str, 91 .addr2str = tipc_eth_addr2str,
79 .addr2msg = tipc_eth_addr2msg, 92 .addr2msg = tipc_eth_addr2msg,
80 .msg2addr = tipc_eth_msg2addr, 93 .msg2addr = tipc_eth_msg2addr,
94 .raw2addr = tipc_eth_raw2addr,
81 .priority = TIPC_DEF_LINK_PRI, 95 .priority = TIPC_DEF_LINK_PRI,
82 .tolerance = TIPC_DEF_LINK_TOL, 96 .tolerance = TIPC_DEF_LINK_TOL,
83 .window = TIPC_DEF_LINK_WIN, 97 .window = TIPC_DEF_LINK_WIN,
@@ -85,4 +99,3 @@ struct tipc_media eth_media_info = {
85 .hwaddr_len = ETH_ALEN, 99 .hwaddr_len = ETH_ALEN,
86 .name = "eth" 100 .name = "eth"
87}; 101};
88
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
deleted file mode 100644
index 1fabf160501f..000000000000
--- a/net/tipc/handler.c
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 * net/tipc/handler.c: TIPC signal handling
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38
39struct queue_item {
40 struct list_head next_signal;
41 void (*handler) (unsigned long);
42 unsigned long data;
43};
44
45static struct kmem_cache *tipc_queue_item_cache;
46static struct list_head signal_queue_head;
47static DEFINE_SPINLOCK(qitem_lock);
48static int handler_enabled __read_mostly;
49
50static void process_signal_queue(unsigned long dummy);
51
52static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
53
54
55unsigned int tipc_k_signal(Handler routine, unsigned long argument)
56{
57 struct queue_item *item;
58
59 spin_lock_bh(&qitem_lock);
60 if (!handler_enabled) {
61 spin_unlock_bh(&qitem_lock);
62 return -ENOPROTOOPT;
63 }
64
65 item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
66 if (!item) {
67 pr_err("Signal queue out of memory\n");
68 spin_unlock_bh(&qitem_lock);
69 return -ENOMEM;
70 }
71 item->handler = routine;
72 item->data = argument;
73 list_add_tail(&item->next_signal, &signal_queue_head);
74 spin_unlock_bh(&qitem_lock);
75 tasklet_schedule(&tipc_tasklet);
76 return 0;
77}
78
79static void process_signal_queue(unsigned long dummy)
80{
81 struct queue_item *__volatile__ item;
82 struct list_head *l, *n;
83
84 spin_lock_bh(&qitem_lock);
85 list_for_each_safe(l, n, &signal_queue_head) {
86 item = list_entry(l, struct queue_item, next_signal);
87 list_del(&item->next_signal);
88 spin_unlock_bh(&qitem_lock);
89 item->handler(item->data);
90 spin_lock_bh(&qitem_lock);
91 kmem_cache_free(tipc_queue_item_cache, item);
92 }
93 spin_unlock_bh(&qitem_lock);
94}
95
96int tipc_handler_start(void)
97{
98 tipc_queue_item_cache =
99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
100 0, SLAB_HWCACHE_ALIGN, NULL);
101 if (!tipc_queue_item_cache)
102 return -ENOMEM;
103
104 INIT_LIST_HEAD(&signal_queue_head);
105 tasklet_enable(&tipc_tasklet);
106 handler_enabled = 1;
107 return 0;
108}
109
110void tipc_handler_stop(void)
111{
112 struct list_head *l, *n;
113 struct queue_item *item;
114
115 spin_lock_bh(&qitem_lock);
116 if (!handler_enabled) {
117 spin_unlock_bh(&qitem_lock);
118 return;
119 }
120 handler_enabled = 0;
121 spin_unlock_bh(&qitem_lock);
122
123 tasklet_kill(&tipc_tasklet);
124
125 spin_lock_bh(&qitem_lock);
126 list_for_each_safe(l, n, &signal_queue_head) {
127 item = list_entry(l, struct queue_item, next_signal);
128 list_del(&item->next_signal);
129 kmem_cache_free(tipc_queue_item_cache, item);
130 }
131 spin_unlock_bh(&qitem_lock);
132
133 kmem_cache_destroy(tipc_queue_item_cache);
134}
diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c
index 844a77e25828..8522eef9c136 100644
--- a/net/tipc/ib_media.c
+++ b/net/tipc/ib_media.c
@@ -42,7 +42,7 @@
42#include "core.h" 42#include "core.h"
43#include "bearer.h" 43#include "bearer.h"
44 44
45/* convert InfiniBand address to string */ 45/* convert InfiniBand address (media address format) media address to string */
46static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf, 46static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
47 int str_size) 47 int str_size)
48{ 48{
@@ -54,23 +54,35 @@ static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
54 return 0; 54 return 0;
55} 55}
56 56
57/* convert InfiniBand address format to message header format */ 57/* Convert from media address format to discovery message addr format */
58static int tipc_ib_addr2msg(struct tipc_media_addr *a, char *msg_area) 58static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr)
59{ 59{
60 memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE); 60 memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
61 msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_IB; 61 memcpy(msg, addr->value, INFINIBAND_ALEN);
62 memcpy(msg_area, a->value, INFINIBAND_ALEN);
63 return 0; 62 return 0;
64} 63}
65 64
66/* convert message header address format to InfiniBand format */ 65/* Convert raw InfiniBand address format to media addr format */
67static int tipc_ib_msg2addr(const struct tipc_bearer *tb_ptr, 66static int tipc_ib_raw2addr(struct tipc_bearer *b,
68 struct tipc_media_addr *a, char *msg_area) 67 struct tipc_media_addr *addr,
68 char *msg)
69{ 69{
70 tipc_l2_media_addr_set(tb_ptr, a, msg_area); 70 memset(addr, 0, sizeof(*addr));
71 memcpy(addr->value, msg, INFINIBAND_ALEN);
72 addr->media_id = TIPC_MEDIA_TYPE_IB;
73 addr->broadcast = !memcmp(msg, b->bcast_addr.value,
74 INFINIBAND_ALEN);
71 return 0; 75 return 0;
72} 76}
73 77
78/* Convert discovery msg addr format to InfiniBand media addr format */
79static int tipc_ib_msg2addr(struct tipc_bearer *b,
80 struct tipc_media_addr *addr,
81 char *msg)
82{
83 return tipc_ib_raw2addr(b, addr, msg);
84}
85
74/* InfiniBand media registration info */ 86/* InfiniBand media registration info */
75struct tipc_media ib_media_info = { 87struct tipc_media ib_media_info = {
76 .send_msg = tipc_l2_send_msg, 88 .send_msg = tipc_l2_send_msg,
@@ -79,6 +91,7 @@ struct tipc_media ib_media_info = {
79 .addr2str = tipc_ib_addr2str, 91 .addr2str = tipc_ib_addr2str,
80 .addr2msg = tipc_ib_addr2msg, 92 .addr2msg = tipc_ib_addr2msg,
81 .msg2addr = tipc_ib_msg2addr, 93 .msg2addr = tipc_ib_msg2addr,
94 .raw2addr = tipc_ib_raw2addr,
82 .priority = TIPC_DEF_LINK_PRI, 95 .priority = TIPC_DEF_LINK_PRI,
83 .tolerance = TIPC_DEF_LINK_TOL, 96 .tolerance = TIPC_DEF_LINK_TOL,
84 .window = TIPC_DEF_LINK_WIN, 97 .window = TIPC_DEF_LINK_WIN,
@@ -86,4 +99,3 @@ struct tipc_media ib_media_info = {
86 .hwaddr_len = INFINIBAND_ALEN, 99 .hwaddr_len = INFINIBAND_ALEN,
87 .name = "ib" 100 .name = "ib"
88}; 101};
89
diff --git a/net/tipc/link.c b/net/tipc/link.c
index c5190ab75290..ad2c57f5868d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -37,6 +37,7 @@
37#include "core.h" 37#include "core.h"
38#include "link.h" 38#include "link.h"
39#include "port.h" 39#include "port.h"
40#include "socket.h"
40#include "name_distr.h" 41#include "name_distr.h"
41#include "discover.h" 42#include "discover.h"
42#include "config.h" 43#include "config.h"
@@ -101,9 +102,18 @@ static unsigned int align(unsigned int i)
101 102
102static void link_init_max_pkt(struct tipc_link *l_ptr) 103static void link_init_max_pkt(struct tipc_link *l_ptr)
103{ 104{
105 struct tipc_bearer *b_ptr;
104 u32 max_pkt; 106 u32 max_pkt;
105 107
106 max_pkt = (l_ptr->b_ptr->mtu & ~3); 108 rcu_read_lock();
109 b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
110 if (!b_ptr) {
111 rcu_read_unlock();
112 return;
113 }
114 max_pkt = (b_ptr->mtu & ~3);
115 rcu_read_unlock();
116
107 if (max_pkt > MAX_MSG_SIZE) 117 if (max_pkt > MAX_MSG_SIZE)
108 max_pkt = MAX_MSG_SIZE; 118 max_pkt = MAX_MSG_SIZE;
109 119
@@ -248,7 +258,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
248 l_ptr->owner = n_ptr; 258 l_ptr->owner = n_ptr;
249 l_ptr->checkpoint = 1; 259 l_ptr->checkpoint = 1;
250 l_ptr->peer_session = INVALID_SESSION; 260 l_ptr->peer_session = INVALID_SESSION;
251 l_ptr->b_ptr = b_ptr; 261 l_ptr->bearer_id = b_ptr->identity;
252 link_set_supervision_props(l_ptr, b_ptr->tolerance); 262 link_set_supervision_props(l_ptr, b_ptr->tolerance);
253 l_ptr->state = RESET_UNKNOWN; 263 l_ptr->state = RESET_UNKNOWN;
254 264
@@ -263,6 +273,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
263 l_ptr->priority = b_ptr->priority; 273 l_ptr->priority = b_ptr->priority;
264 tipc_link_set_queue_limits(l_ptr, b_ptr->window); 274 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
265 275
276 l_ptr->net_plane = b_ptr->net_plane;
266 link_init_max_pkt(l_ptr); 277 link_init_max_pkt(l_ptr);
267 278
268 l_ptr->next_out_no = 1; 279 l_ptr->next_out_no = 1;
@@ -287,14 +298,14 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
287 298
288 rcu_read_lock(); 299 rcu_read_lock();
289 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 300 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
290 spin_lock_bh(&n_ptr->lock); 301 tipc_node_lock(n_ptr);
291 l_ptr = n_ptr->links[bearer_id]; 302 l_ptr = n_ptr->links[bearer_id];
292 if (l_ptr) { 303 if (l_ptr) {
293 tipc_link_reset(l_ptr); 304 tipc_link_reset(l_ptr);
294 if (shutting_down || !tipc_node_is_up(n_ptr)) { 305 if (shutting_down || !tipc_node_is_up(n_ptr)) {
295 tipc_node_detach_link(l_ptr->owner, l_ptr); 306 tipc_node_detach_link(l_ptr->owner, l_ptr);
296 tipc_link_reset_fragments(l_ptr); 307 tipc_link_reset_fragments(l_ptr);
297 spin_unlock_bh(&n_ptr->lock); 308 tipc_node_unlock(n_ptr);
298 309
299 /* Nobody else can access this link now: */ 310 /* Nobody else can access this link now: */
300 del_timer_sync(&l_ptr->timer); 311 del_timer_sync(&l_ptr->timer);
@@ -302,12 +313,12 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
302 } else { 313 } else {
303 /* Detach/delete when failover is finished: */ 314 /* Detach/delete when failover is finished: */
304 l_ptr->flags |= LINK_STOPPED; 315 l_ptr->flags |= LINK_STOPPED;
305 spin_unlock_bh(&n_ptr->lock); 316 tipc_node_unlock(n_ptr);
306 del_timer_sync(&l_ptr->timer); 317 del_timer_sync(&l_ptr->timer);
307 } 318 }
308 continue; 319 continue;
309 } 320 }
310 spin_unlock_bh(&n_ptr->lock); 321 tipc_node_unlock(n_ptr);
311 } 322 }
312 rcu_read_unlock(); 323 rcu_read_unlock();
313} 324}
@@ -388,9 +399,8 @@ static void link_release_outqueue(struct tipc_link *l_ptr)
388 */ 399 */
389void tipc_link_reset_fragments(struct tipc_link *l_ptr) 400void tipc_link_reset_fragments(struct tipc_link *l_ptr)
390{ 401{
391 kfree_skb(l_ptr->reasm_head); 402 kfree_skb(l_ptr->reasm_buf);
392 l_ptr->reasm_head = NULL; 403 l_ptr->reasm_buf = NULL;
393 l_ptr->reasm_tail = NULL;
394} 404}
395 405
396/** 406/**
@@ -426,7 +436,7 @@ void tipc_link_reset(struct tipc_link *l_ptr)
426 return; 436 return;
427 437
428 tipc_node_link_down(l_ptr->owner, l_ptr); 438 tipc_node_link_down(l_ptr->owner, l_ptr);
429 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); 439 tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr);
430 440
431 if (was_active_link && tipc_node_active_links(l_ptr->owner)) { 441 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
432 l_ptr->reset_checkpoint = checkpoint; 442 l_ptr->reset_checkpoint = checkpoint;
@@ -464,11 +474,11 @@ void tipc_link_reset_list(unsigned int bearer_id)
464 474
465 rcu_read_lock(); 475 rcu_read_lock();
466 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 476 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
467 spin_lock_bh(&n_ptr->lock); 477 tipc_node_lock(n_ptr);
468 l_ptr = n_ptr->links[bearer_id]; 478 l_ptr = n_ptr->links[bearer_id];
469 if (l_ptr) 479 if (l_ptr)
470 tipc_link_reset(l_ptr); 480 tipc_link_reset(l_ptr);
471 spin_unlock_bh(&n_ptr->lock); 481 tipc_node_unlock(n_ptr);
472 } 482 }
473 rcu_read_unlock(); 483 rcu_read_unlock();
474} 484}
@@ -477,7 +487,7 @@ static void link_activate(struct tipc_link *l_ptr)
477{ 487{
478 l_ptr->next_in_no = l_ptr->stats.recv_info = 1; 488 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
479 tipc_node_link_up(l_ptr->owner, l_ptr); 489 tipc_node_link_up(l_ptr->owner, l_ptr);
480 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 490 tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr);
481} 491}
482 492
483/** 493/**
@@ -777,7 +787,7 @@ int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
777 if (likely(!link_congested(l_ptr))) { 787 if (likely(!link_congested(l_ptr))) {
778 link_add_to_outqueue(l_ptr, buf, msg); 788 link_add_to_outqueue(l_ptr, buf, msg);
779 789
780 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 790 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
781 l_ptr->unacked_window = 0; 791 l_ptr->unacked_window = 0;
782 return dsz; 792 return dsz;
783 } 793 }
@@ -825,7 +835,6 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
825 struct tipc_node *n_ptr; 835 struct tipc_node *n_ptr;
826 int res = -ELINKCONG; 836 int res = -ELINKCONG;
827 837
828 read_lock_bh(&tipc_net_lock);
829 n_ptr = tipc_node_find(dest); 838 n_ptr = tipc_node_find(dest);
830 if (n_ptr) { 839 if (n_ptr) {
831 tipc_node_lock(n_ptr); 840 tipc_node_lock(n_ptr);
@@ -838,7 +847,6 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
838 } else { 847 } else {
839 kfree_skb(buf); 848 kfree_skb(buf);
840 } 849 }
841 read_unlock_bh(&tipc_net_lock);
842 return res; 850 return res;
843} 851}
844 852
@@ -902,7 +910,6 @@ void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
902 if (list_empty(message_list)) 910 if (list_empty(message_list))
903 return; 911 return;
904 912
905 read_lock_bh(&tipc_net_lock);
906 n_ptr = tipc_node_find(dest); 913 n_ptr = tipc_node_find(dest);
907 if (n_ptr) { 914 if (n_ptr) {
908 tipc_node_lock(n_ptr); 915 tipc_node_lock(n_ptr);
@@ -917,7 +924,6 @@ void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
917 } 924 }
918 tipc_node_unlock(n_ptr); 925 tipc_node_unlock(n_ptr);
919 } 926 }
920 read_unlock_bh(&tipc_net_lock);
921 927
922 /* discard the messages if they couldn't be sent */ 928 /* discard the messages if they couldn't be sent */
923 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { 929 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
@@ -941,7 +947,7 @@ static int tipc_link_xmit_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
941 if (likely(!link_congested(l_ptr))) { 947 if (likely(!link_congested(l_ptr))) {
942 if (likely(msg_size(msg) <= l_ptr->max_pkt)) { 948 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
943 link_add_to_outqueue(l_ptr, buf, msg); 949 link_add_to_outqueue(l_ptr, buf, msg);
944 tipc_bearer_send(l_ptr->b_ptr, buf, 950 tipc_bearer_send(l_ptr->bearer_id, buf,
945 &l_ptr->media_addr); 951 &l_ptr->media_addr);
946 l_ptr->unacked_window = 0; 952 l_ptr->unacked_window = 0;
947 return res; 953 return res;
@@ -979,7 +985,6 @@ again:
979 if (unlikely(res < 0)) 985 if (unlikely(res < 0))
980 return res; 986 return res;
981 987
982 read_lock_bh(&tipc_net_lock);
983 node = tipc_node_find(destaddr); 988 node = tipc_node_find(destaddr);
984 if (likely(node)) { 989 if (likely(node)) {
985 tipc_node_lock(node); 990 tipc_node_lock(node);
@@ -990,7 +995,6 @@ again:
990 &sender->max_pkt); 995 &sender->max_pkt);
991exit: 996exit:
992 tipc_node_unlock(node); 997 tipc_node_unlock(node);
993 read_unlock_bh(&tipc_net_lock);
994 return res; 998 return res;
995 } 999 }
996 1000
@@ -1007,7 +1011,6 @@ exit:
1007 */ 1011 */
1008 sender->max_pkt = l_ptr->max_pkt; 1012 sender->max_pkt = l_ptr->max_pkt;
1009 tipc_node_unlock(node); 1013 tipc_node_unlock(node);
1010 read_unlock_bh(&tipc_net_lock);
1011 1014
1012 1015
1013 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) 1016 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
@@ -1018,7 +1021,6 @@ exit:
1018 } 1021 }
1019 tipc_node_unlock(node); 1022 tipc_node_unlock(node);
1020 } 1023 }
1021 read_unlock_bh(&tipc_net_lock);
1022 1024
1023 /* Couldn't find a link to the destination node */ 1025 /* Couldn't find a link to the destination node */
1024 kfree_skb(buf); 1026 kfree_skb(buf);
@@ -1204,7 +1206,7 @@ static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1204 if (r_q_size && buf) { 1206 if (r_q_size && buf) {
1205 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1207 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1206 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1208 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1207 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1209 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1208 l_ptr->retransm_queue_head = mod(++r_q_head); 1210 l_ptr->retransm_queue_head = mod(++r_q_head);
1209 l_ptr->retransm_queue_size = --r_q_size; 1211 l_ptr->retransm_queue_size = --r_q_size;
1210 l_ptr->stats.retransmitted++; 1212 l_ptr->stats.retransmitted++;
@@ -1216,7 +1218,7 @@ static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1216 if (buf) { 1218 if (buf) {
1217 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1219 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1218 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1220 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1219 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1221 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1220 l_ptr->unacked_window = 0; 1222 l_ptr->unacked_window = 0;
1221 kfree_skb(buf); 1223 kfree_skb(buf);
1222 l_ptr->proto_msg_queue = NULL; 1224 l_ptr->proto_msg_queue = NULL;
@@ -1233,7 +1235,8 @@ static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1233 if (mod(next - first) < l_ptr->queue_limit[0]) { 1235 if (mod(next - first) < l_ptr->queue_limit[0]) {
1234 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1236 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1235 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1237 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1236 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1238 tipc_bearer_send(l_ptr->bearer_id, buf,
1239 &l_ptr->media_addr);
1237 if (msg_user(msg) == MSG_BUNDLER) 1240 if (msg_user(msg) == MSG_BUNDLER)
1238 msg_set_type(msg, CLOSED_MSG); 1241 msg_set_type(msg, CLOSED_MSG);
1239 l_ptr->next_out = buf->next; 1242 l_ptr->next_out = buf->next;
@@ -1256,33 +1259,24 @@ void tipc_link_push_queue(struct tipc_link *l_ptr)
1256 } while (!res); 1259 } while (!res);
1257} 1260}
1258 1261
1259static void link_reset_all(unsigned long addr) 1262void tipc_link_reset_all(struct tipc_node *node)
1260{ 1263{
1261 struct tipc_node *n_ptr;
1262 char addr_string[16]; 1264 char addr_string[16];
1263 u32 i; 1265 u32 i;
1264 1266
1265 read_lock_bh(&tipc_net_lock); 1267 tipc_node_lock(node);
1266 n_ptr = tipc_node_find((u32)addr);
1267 if (!n_ptr) {
1268 read_unlock_bh(&tipc_net_lock);
1269 return; /* node no longer exists */
1270 }
1271
1272 tipc_node_lock(n_ptr);
1273 1268
1274 pr_warn("Resetting all links to %s\n", 1269 pr_warn("Resetting all links to %s\n",
1275 tipc_addr_string_fill(addr_string, n_ptr->addr)); 1270 tipc_addr_string_fill(addr_string, node->addr));
1276 1271
1277 for (i = 0; i < MAX_BEARERS; i++) { 1272 for (i = 0; i < MAX_BEARERS; i++) {
1278 if (n_ptr->links[i]) { 1273 if (node->links[i]) {
1279 link_print(n_ptr->links[i], "Resetting link\n"); 1274 link_print(node->links[i], "Resetting link\n");
1280 tipc_link_reset(n_ptr->links[i]); 1275 tipc_link_reset(node->links[i]);
1281 } 1276 }
1282 } 1277 }
1283 1278
1284 tipc_node_unlock(n_ptr); 1279 tipc_node_unlock(node);
1285 read_unlock_bh(&tipc_net_lock);
1286} 1280}
1287 1281
1288static void link_retransmit_failure(struct tipc_link *l_ptr, 1282static void link_retransmit_failure(struct tipc_link *l_ptr,
@@ -1319,10 +1313,9 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
1319 n_ptr->bclink.oos_state, 1313 n_ptr->bclink.oos_state,
1320 n_ptr->bclink.last_sent); 1314 n_ptr->bclink.last_sent);
1321 1315
1322 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1323
1324 tipc_node_unlock(n_ptr); 1316 tipc_node_unlock(n_ptr);
1325 1317
1318 tipc_bclink_set_flags(TIPC_BCLINK_RESET);
1326 l_ptr->stale_count = 0; 1319 l_ptr->stale_count = 0;
1327 } 1320 }
1328} 1321}
@@ -1352,7 +1345,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1352 msg = buf_msg(buf); 1345 msg = buf_msg(buf);
1353 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1346 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1354 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1347 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1355 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1348 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1356 buf = buf->next; 1349 buf = buf->next;
1357 retransmits--; 1350 retransmits--;
1358 l_ptr->stats.retransmitted++; 1351 l_ptr->stats.retransmitted++;
@@ -1440,14 +1433,13 @@ static int link_recv_buf_validate(struct sk_buff *buf)
1440/** 1433/**
1441 * tipc_rcv - process TIPC packets/messages arriving from off-node 1434 * tipc_rcv - process TIPC packets/messages arriving from off-node
1442 * @head: pointer to message buffer chain 1435 * @head: pointer to message buffer chain
1443 * @tb_ptr: pointer to bearer message arrived on 1436 * @b_ptr: pointer to bearer message arrived on
1444 * 1437 *
1445 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1438 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1446 * structure (i.e. cannot be NULL), but bearer can be inactive. 1439 * structure (i.e. cannot be NULL), but bearer can be inactive.
1447 */ 1440 */
1448void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) 1441void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1449{ 1442{
1450 read_lock_bh(&tipc_net_lock);
1451 while (head) { 1443 while (head) {
1452 struct tipc_node *n_ptr; 1444 struct tipc_node *n_ptr;
1453 struct tipc_link *l_ptr; 1445 struct tipc_link *l_ptr;
@@ -1497,14 +1489,14 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1497 goto unlock_discard; 1489 goto unlock_discard;
1498 1490
1499 /* Verify that communication with node is currently allowed */ 1491 /* Verify that communication with node is currently allowed */
1500 if ((n_ptr->block_setup & WAIT_PEER_DOWN) && 1492 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1501 msg_user(msg) == LINK_PROTOCOL && 1493 msg_user(msg) == LINK_PROTOCOL &&
1502 (msg_type(msg) == RESET_MSG || 1494 (msg_type(msg) == RESET_MSG ||
1503 msg_type(msg) == ACTIVATE_MSG) && 1495 msg_type(msg) == ACTIVATE_MSG) &&
1504 !msg_redundant_link(msg)) 1496 !msg_redundant_link(msg))
1505 n_ptr->block_setup &= ~WAIT_PEER_DOWN; 1497 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1506 1498
1507 if (n_ptr->block_setup) 1499 if (tipc_node_blocked(n_ptr))
1508 goto unlock_discard; 1500 goto unlock_discard;
1509 1501
1510 /* Validate message sequence number info */ 1502 /* Validate message sequence number info */
@@ -1581,17 +1573,12 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1581 } 1573 }
1582 msg = buf_msg(buf); 1574 msg = buf_msg(buf);
1583 } else if (msg_user(msg) == MSG_FRAGMENTER) { 1575 } else if (msg_user(msg) == MSG_FRAGMENTER) {
1584 int rc;
1585
1586 l_ptr->stats.recv_fragments++; 1576 l_ptr->stats.recv_fragments++;
1587 rc = tipc_link_frag_rcv(&l_ptr->reasm_head, 1577 if (tipc_buf_append(&l_ptr->reasm_buf, &buf)) {
1588 &l_ptr->reasm_tail,
1589 &buf);
1590 if (rc == LINK_REASM_COMPLETE) {
1591 l_ptr->stats.recv_fragmented++; 1578 l_ptr->stats.recv_fragmented++;
1592 msg = buf_msg(buf); 1579 msg = buf_msg(buf);
1593 } else { 1580 } else {
1594 if (rc == LINK_REASM_ERROR) 1581 if (!l_ptr->reasm_buf)
1595 tipc_link_reset(l_ptr); 1582 tipc_link_reset(l_ptr);
1596 tipc_node_unlock(n_ptr); 1583 tipc_node_unlock(n_ptr);
1597 continue; 1584 continue;
@@ -1604,7 +1591,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1604 case TIPC_HIGH_IMPORTANCE: 1591 case TIPC_HIGH_IMPORTANCE:
1605 case TIPC_CRITICAL_IMPORTANCE: 1592 case TIPC_CRITICAL_IMPORTANCE:
1606 tipc_node_unlock(n_ptr); 1593 tipc_node_unlock(n_ptr);
1607 tipc_port_rcv(buf); 1594 tipc_sk_rcv(buf);
1608 continue; 1595 continue;
1609 case MSG_BUNDLER: 1596 case MSG_BUNDLER:
1610 l_ptr->stats.recv_bundles++; 1597 l_ptr->stats.recv_bundles++;
@@ -1635,7 +1622,6 @@ unlock_discard:
1635discard: 1622discard:
1636 kfree_skb(buf); 1623 kfree_skb(buf);
1637 } 1624 }
1638 read_unlock_bh(&tipc_net_lock);
1639} 1625}
1640 1626
1641/** 1627/**
@@ -1747,12 +1733,12 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1747 return; 1733 return;
1748 1734
1749 /* Abort non-RESET send if communication with node is prohibited */ 1735 /* Abort non-RESET send if communication with node is prohibited */
1750 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) 1736 if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1751 return; 1737 return;
1752 1738
1753 /* Create protocol message with "out-of-sequence" sequence number */ 1739 /* Create protocol message with "out-of-sequence" sequence number */
1754 msg_set_type(msg, msg_typ); 1740 msg_set_type(msg, msg_typ);
1755 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1741 msg_set_net_plane(msg, l_ptr->net_plane);
1756 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1742 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1757 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 1743 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1758 1744
@@ -1818,7 +1804,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1818 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1804 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1819 buf->priority = TC_PRIO_CONTROL; 1805 buf->priority = TC_PRIO_CONTROL;
1820 1806
1821 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1807 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1822 l_ptr->unacked_window = 0; 1808 l_ptr->unacked_window = 0;
1823 kfree_skb(buf); 1809 kfree_skb(buf);
1824} 1810}
@@ -1840,12 +1826,9 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1840 if (l_ptr->exp_msg_count) 1826 if (l_ptr->exp_msg_count)
1841 goto exit; 1827 goto exit;
1842 1828
1843 /* record unnumbered packet arrival (force mismatch on next timeout) */ 1829 if (l_ptr->net_plane != msg_net_plane(msg))
1844 l_ptr->checkpoint--;
1845
1846 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
1847 if (tipc_own_addr > msg_prevnode(msg)) 1830 if (tipc_own_addr > msg_prevnode(msg))
1848 l_ptr->b_ptr->net_plane = msg_net_plane(msg); 1831 l_ptr->net_plane = msg_net_plane(msg);
1849 1832
1850 switch (msg_type(msg)) { 1833 switch (msg_type(msg)) {
1851 1834
@@ -1862,7 +1845,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1862 * peer has lost contact -- don't allow peer's links 1845 * peer has lost contact -- don't allow peer's links
1863 * to reactivate before we recognize loss & clean up 1846 * to reactivate before we recognize loss & clean up
1864 */ 1847 */
1865 l_ptr->owner->block_setup = WAIT_NODE_DOWN; 1848 l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1866 } 1849 }
1867 1850
1868 link_state_event(l_ptr, RESET_MSG); 1851 link_state_event(l_ptr, RESET_MSG);
@@ -1918,6 +1901,10 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1918 tipc_link_reset(l_ptr); /* Enforce change to take effect */ 1901 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1919 break; 1902 break;
1920 } 1903 }
1904
1905 /* Record reception; force mismatch at next timeout: */
1906 l_ptr->checkpoint--;
1907
1921 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1908 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1922 l_ptr->stats.recv_states++; 1909 l_ptr->stats.recv_states++;
1923 if (link_reset_unknown(l_ptr)) 1910 if (link_reset_unknown(l_ptr))
@@ -2177,9 +2164,7 @@ static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
2177 } 2164 }
2178 if (msg_user(msg) == MSG_FRAGMENTER) { 2165 if (msg_user(msg) == MSG_FRAGMENTER) {
2179 l_ptr->stats.recv_fragments++; 2166 l_ptr->stats.recv_fragments++;
2180 tipc_link_frag_rcv(&l_ptr->reasm_head, 2167 tipc_buf_append(&l_ptr->reasm_buf, &buf);
2181 &l_ptr->reasm_tail,
2182 &buf);
2183 } 2168 }
2184 } 2169 }
2185exit: 2170exit:
@@ -2317,53 +2302,6 @@ static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
2317 return dsz; 2302 return dsz;
2318} 2303}
2319 2304
2320/* tipc_link_frag_rcv(): Called with node lock on. Returns
2321 * the reassembled buffer if message is complete.
2322 */
2323int tipc_link_frag_rcv(struct sk_buff **head, struct sk_buff **tail,
2324 struct sk_buff **fbuf)
2325{
2326 struct sk_buff *frag = *fbuf;
2327 struct tipc_msg *msg = buf_msg(frag);
2328 u32 fragid = msg_type(msg);
2329 bool headstolen;
2330 int delta;
2331
2332 skb_pull(frag, msg_hdr_sz(msg));
2333 if (fragid == FIRST_FRAGMENT) {
2334 if (*head || skb_unclone(frag, GFP_ATOMIC))
2335 goto out_free;
2336 *head = frag;
2337 skb_frag_list_init(*head);
2338 *fbuf = NULL;
2339 return 0;
2340 } else if (*head &&
2341 skb_try_coalesce(*head, frag, &headstolen, &delta)) {
2342 kfree_skb_partial(frag, headstolen);
2343 } else {
2344 if (!*head)
2345 goto out_free;
2346 if (!skb_has_frag_list(*head))
2347 skb_shinfo(*head)->frag_list = frag;
2348 else
2349 (*tail)->next = frag;
2350 *tail = frag;
2351 (*head)->truesize += frag->truesize;
2352 }
2353 if (fragid == LAST_FRAGMENT) {
2354 *fbuf = *head;
2355 *tail = *head = NULL;
2356 return LINK_REASM_COMPLETE;
2357 }
2358 *fbuf = NULL;
2359 return 0;
2360out_free:
2361 pr_warn_ratelimited("Link unable to reassemble fragmented message\n");
2362 kfree_skb(*fbuf);
2363 *fbuf = NULL;
2364 return LINK_REASM_ERROR;
2365}
2366
2367static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) 2305static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2368{ 2306{
2369 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) 2307 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
@@ -2397,8 +2335,6 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2397/* tipc_link_find_owner - locate owner node of link by link's name 2335/* tipc_link_find_owner - locate owner node of link by link's name
2398 * @name: pointer to link name string 2336 * @name: pointer to link name string
2399 * @bearer_id: pointer to index in 'node->links' array where the link was found. 2337 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2400 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2401 * this also prevents link deletion.
2402 * 2338 *
2403 * Returns pointer to node owning the link, or 0 if no matching link is found. 2339 * Returns pointer to node owning the link, or 0 if no matching link is found.
2404 */ 2340 */
@@ -2460,7 +2396,7 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
2460 * @new_value: new value of link, bearer, or media setting 2396 * @new_value: new value of link, bearer, or media setting
2461 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) 2397 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2462 * 2398 *
2463 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted. 2399 * Caller must hold RTNL lock to ensure link/bearer/media is not deleted.
2464 * 2400 *
2465 * Returns 0 if value updated and negative value on error. 2401 * Returns 0 if value updated and negative value on error.
2466 */ 2402 */
@@ -2566,9 +2502,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
2566 " (cannot change setting on broadcast link)"); 2502 " (cannot change setting on broadcast link)");
2567 } 2503 }
2568 2504
2569 read_lock_bh(&tipc_net_lock);
2570 res = link_cmd_set_value(args->name, new_value, cmd); 2505 res = link_cmd_set_value(args->name, new_value, cmd);
2571 read_unlock_bh(&tipc_net_lock);
2572 if (res) 2506 if (res)
2573 return tipc_cfg_reply_error_string("cannot change link setting"); 2507 return tipc_cfg_reply_error_string("cannot change link setting");
2574 2508
@@ -2602,22 +2536,18 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
2602 return tipc_cfg_reply_error_string("link not found"); 2536 return tipc_cfg_reply_error_string("link not found");
2603 return tipc_cfg_reply_none(); 2537 return tipc_cfg_reply_none();
2604 } 2538 }
2605 read_lock_bh(&tipc_net_lock);
2606 node = tipc_link_find_owner(link_name, &bearer_id); 2539 node = tipc_link_find_owner(link_name, &bearer_id);
2607 if (!node) { 2540 if (!node)
2608 read_unlock_bh(&tipc_net_lock);
2609 return tipc_cfg_reply_error_string("link not found"); 2541 return tipc_cfg_reply_error_string("link not found");
2610 } 2542
2611 tipc_node_lock(node); 2543 tipc_node_lock(node);
2612 l_ptr = node->links[bearer_id]; 2544 l_ptr = node->links[bearer_id];
2613 if (!l_ptr) { 2545 if (!l_ptr) {
2614 tipc_node_unlock(node); 2546 tipc_node_unlock(node);
2615 read_unlock_bh(&tipc_net_lock);
2616 return tipc_cfg_reply_error_string("link not found"); 2547 return tipc_cfg_reply_error_string("link not found");
2617 } 2548 }
2618 link_reset_statistics(l_ptr); 2549 link_reset_statistics(l_ptr);
2619 tipc_node_unlock(node); 2550 tipc_node_unlock(node);
2620 read_unlock_bh(&tipc_net_lock);
2621 return tipc_cfg_reply_none(); 2551 return tipc_cfg_reply_none();
2622} 2552}
2623 2553
@@ -2650,18 +2580,15 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2650 if (!strcmp(name, tipc_bclink_name)) 2580 if (!strcmp(name, tipc_bclink_name))
2651 return tipc_bclink_stats(buf, buf_size); 2581 return tipc_bclink_stats(buf, buf_size);
2652 2582
2653 read_lock_bh(&tipc_net_lock);
2654 node = tipc_link_find_owner(name, &bearer_id); 2583 node = tipc_link_find_owner(name, &bearer_id);
2655 if (!node) { 2584 if (!node)
2656 read_unlock_bh(&tipc_net_lock);
2657 return 0; 2585 return 0;
2658 } 2586
2659 tipc_node_lock(node); 2587 tipc_node_lock(node);
2660 2588
2661 l = node->links[bearer_id]; 2589 l = node->links[bearer_id];
2662 if (!l) { 2590 if (!l) {
2663 tipc_node_unlock(node); 2591 tipc_node_unlock(node);
2664 read_unlock_bh(&tipc_net_lock);
2665 return 0; 2592 return 0;
2666 } 2593 }
2667 2594
@@ -2727,7 +2654,6 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2727 (s->accu_queue_sz / s->queue_sz_counts) : 0); 2654 (s->accu_queue_sz / s->queue_sz_counts) : 0);
2728 2655
2729 tipc_node_unlock(node); 2656 tipc_node_unlock(node);
2730 read_unlock_bh(&tipc_net_lock);
2731 return ret; 2657 return ret;
2732} 2658}
2733 2659
@@ -2778,7 +2704,6 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2778 if (dest == tipc_own_addr) 2704 if (dest == tipc_own_addr)
2779 return MAX_MSG_SIZE; 2705 return MAX_MSG_SIZE;
2780 2706
2781 read_lock_bh(&tipc_net_lock);
2782 n_ptr = tipc_node_find(dest); 2707 n_ptr = tipc_node_find(dest);
2783 if (n_ptr) { 2708 if (n_ptr) {
2784 tipc_node_lock(n_ptr); 2709 tipc_node_lock(n_ptr);
@@ -2787,13 +2712,18 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2787 res = l_ptr->max_pkt; 2712 res = l_ptr->max_pkt;
2788 tipc_node_unlock(n_ptr); 2713 tipc_node_unlock(n_ptr);
2789 } 2714 }
2790 read_unlock_bh(&tipc_net_lock);
2791 return res; 2715 return res;
2792} 2716}
2793 2717
2794static void link_print(struct tipc_link *l_ptr, const char *str) 2718static void link_print(struct tipc_link *l_ptr, const char *str)
2795{ 2719{
2796 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name); 2720 struct tipc_bearer *b_ptr;
2721
2722 rcu_read_lock();
2723 b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
2724 if (b_ptr)
2725 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
2726 rcu_read_unlock();
2797 2727
2798 if (link_working_unknown(l_ptr)) 2728 if (link_working_unknown(l_ptr))
2799 pr_cont(":WU\n"); 2729 pr_cont(":WU\n");
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 8c0b49b5b2ee..200d518b218e 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -40,11 +40,6 @@
40#include "msg.h" 40#include "msg.h"
41#include "node.h" 41#include "node.h"
42 42
43/* Link reassembly status codes
44 */
45#define LINK_REASM_ERROR -1
46#define LINK_REASM_COMPLETE 1
47
48/* Out-of-range value for link sequence numbers 43/* Out-of-range value for link sequence numbers
49 */ 44 */
50#define INVALID_LINK_SEQ 0x10000 45#define INVALID_LINK_SEQ 0x10000
@@ -107,7 +102,7 @@ struct tipc_stats {
107 * @checkpoint: reference point for triggering link continuity checking 102 * @checkpoint: reference point for triggering link continuity checking
108 * @peer_session: link session # being used by peer end of link 103 * @peer_session: link session # being used by peer end of link
109 * @peer_bearer_id: bearer id used by link's peer endpoint 104 * @peer_bearer_id: bearer id used by link's peer endpoint
110 * @b_ptr: pointer to bearer used by link 105 * @bearer_id: local bearer id used by link
111 * @tolerance: minimum link continuity loss needed to reset link [in ms] 106 * @tolerance: minimum link continuity loss needed to reset link [in ms]
112 * @continuity_interval: link continuity testing interval [in ms] 107 * @continuity_interval: link continuity testing interval [in ms]
113 * @abort_limit: # of unacknowledged continuity probes needed to reset link 108 * @abort_limit: # of unacknowledged continuity probes needed to reset link
@@ -116,6 +111,7 @@ struct tipc_stats {
116 * @proto_msg: template for control messages generated by link 111 * @proto_msg: template for control messages generated by link
117 * @pmsg: convenience pointer to "proto_msg" field 112 * @pmsg: convenience pointer to "proto_msg" field
118 * @priority: current link priority 113 * @priority: current link priority
114 * @net_plane: current link network plane ('A' through 'H')
119 * @queue_limit: outbound message queue congestion thresholds (indexed by user) 115 * @queue_limit: outbound message queue congestion thresholds (indexed by user)
120 * @exp_msg_count: # of tunnelled messages expected during link changeover 116 * @exp_msg_count: # of tunnelled messages expected during link changeover
121 * @reset_checkpoint: seq # of last acknowledged message at time of link reset 117 * @reset_checkpoint: seq # of last acknowledged message at time of link reset
@@ -139,8 +135,7 @@ struct tipc_stats {
139 * @next_out: ptr to first unsent outbound message in queue 135 * @next_out: ptr to first unsent outbound message in queue
140 * @waiting_ports: linked list of ports waiting for link congestion to abate 136 * @waiting_ports: linked list of ports waiting for link congestion to abate
141 * @long_msg_seq_no: next identifier to use for outbound fragmented messages 137 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
142 * @reasm_head: list head of partially reassembled inbound message fragments 138 * @reasm_buf: head of partially reassembled inbound message fragments
143 * @reasm_tail: last fragment received
144 * @stats: collects statistics regarding link activity 139 * @stats: collects statistics regarding link activity
145 */ 140 */
146struct tipc_link { 141struct tipc_link {
@@ -155,7 +150,7 @@ struct tipc_link {
155 u32 checkpoint; 150 u32 checkpoint;
156 u32 peer_session; 151 u32 peer_session;
157 u32 peer_bearer_id; 152 u32 peer_bearer_id;
158 struct tipc_bearer *b_ptr; 153 u32 bearer_id;
159 u32 tolerance; 154 u32 tolerance;
160 u32 continuity_interval; 155 u32 continuity_interval;
161 u32 abort_limit; 156 u32 abort_limit;
@@ -167,6 +162,7 @@ struct tipc_link {
167 } proto_msg; 162 } proto_msg;
168 struct tipc_msg *pmsg; 163 struct tipc_msg *pmsg;
169 u32 priority; 164 u32 priority;
165 char net_plane;
170 u32 queue_limit[15]; /* queue_limit[0]==window limit */ 166 u32 queue_limit[15]; /* queue_limit[0]==window limit */
171 167
172 /* Changeover */ 168 /* Changeover */
@@ -202,8 +198,7 @@ struct tipc_link {
202 198
203 /* Fragmentation/reassembly */ 199 /* Fragmentation/reassembly */
204 u32 long_msg_seq_no; 200 u32 long_msg_seq_no;
205 struct sk_buff *reasm_head; 201 struct sk_buff *reasm_buf;
206 struct sk_buff *reasm_tail;
207 202
208 /* Statistics */ 203 /* Statistics */
209 struct tipc_stats stats; 204 struct tipc_stats stats;
@@ -228,6 +223,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area,
228 int req_tlv_space); 223 int req_tlv_space);
229struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, 224struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
230 int req_tlv_space); 225 int req_tlv_space);
226void tipc_link_reset_all(struct tipc_node *node);
231void tipc_link_reset(struct tipc_link *l_ptr); 227void tipc_link_reset(struct tipc_link *l_ptr);
232void tipc_link_reset_list(unsigned int bearer_id); 228void tipc_link_reset_list(unsigned int bearer_id);
233int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector); 229int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector);
@@ -239,9 +235,6 @@ int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
239 struct iovec const *msg_sect, 235 struct iovec const *msg_sect,
240 unsigned int len, u32 destnode); 236 unsigned int len, u32 destnode);
241void tipc_link_bundle_rcv(struct sk_buff *buf); 237void tipc_link_bundle_rcv(struct sk_buff *buf);
242int tipc_link_frag_rcv(struct sk_buff **reasm_head,
243 struct sk_buff **reasm_tail,
244 struct sk_buff **fbuf);
245void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, 238void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
246 u32 gap, u32 tolerance, u32 priority, u32 acked_mtu); 239 u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
247void tipc_link_push_queue(struct tipc_link *l_ptr); 240void tipc_link_push_queue(struct tipc_link *l_ptr);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index e525f8ce1dee..8be6e94a1ca9 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/msg.c: TIPC message header routines 2 * net/tipc/msg.c: TIPC message header routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, 2014, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems 5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -99,3 +99,56 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
99 } 99 }
100 return dsz; 100 return dsz;
101} 101}
102
103/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
104 * Let first buffer become head buffer
105 * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0
106 * Leaves headbuf pointer at NULL if failure
107 */
108int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
109{
110 struct sk_buff *head = *headbuf;
111 struct sk_buff *frag = *buf;
112 struct sk_buff *tail;
113 struct tipc_msg *msg = buf_msg(frag);
114 u32 fragid = msg_type(msg);
115 bool headstolen;
116 int delta;
117
118 skb_pull(frag, msg_hdr_sz(msg));
119
120 if (fragid == FIRST_FRAGMENT) {
121 if (head || skb_unclone(frag, GFP_ATOMIC))
122 goto out_free;
123 head = *headbuf = frag;
124 skb_frag_list_init(head);
125 return 0;
126 }
127 if (!head)
128 goto out_free;
129 tail = TIPC_SKB_CB(head)->tail;
130 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
131 kfree_skb_partial(frag, headstolen);
132 } else {
133 if (!skb_has_frag_list(head))
134 skb_shinfo(head)->frag_list = frag;
135 else
136 tail->next = frag;
137 head->truesize += frag->truesize;
138 head->data_len += frag->len;
139 head->len += frag->len;
140 TIPC_SKB_CB(head)->tail = frag;
141 }
142 if (fragid == LAST_FRAGMENT) {
143 *buf = head;
144 TIPC_SKB_CB(head)->tail = NULL;
145 *headbuf = NULL;
146 return 1;
147 }
148 *buf = NULL;
149 return 0;
150out_free:
151 pr_warn_ratelimited("Unable to build fragment list\n");
152 kfree_skb(*buf);
153 return 0;
154}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 76d1269b9443..503511903d1d 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/msg.h: Include file for TIPC message header routines 2 * net/tipc/msg.h: Include file for TIPC message header routines
3 * 3 *
4 * Copyright (c) 2000-2007, Ericsson AB 4 * Copyright (c) 2000-2007, 2014, Ericsson AB
5 * Copyright (c) 2005-2008, 2010-2011, Wind River Systems 5 * Copyright (c) 2005-2008, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -711,4 +711,7 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
711 u32 destnode); 711 u32 destnode);
712int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, 712int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
713 unsigned int len, int max_size, struct sk_buff **buf); 713 unsigned int len, int max_size, struct sk_buff **buf);
714
715int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
716
714#endif 717#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index aff8041dc157..8ce730984aa1 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -38,34 +38,6 @@
38#include "link.h" 38#include "link.h"
39#include "name_distr.h" 39#include "name_distr.h"
40 40
41#define ITEM_SIZE sizeof(struct distr_item)
42
43/**
44 * struct distr_item - publication info distributed to other nodes
45 * @type: name sequence type
46 * @lower: name sequence lower bound
47 * @upper: name sequence upper bound
48 * @ref: publishing port reference
49 * @key: publication key
50 *
51 * ===> All fields are stored in network byte order. <===
52 *
53 * First 3 fields identify (name or) name sequence being published.
54 * Reference field uniquely identifies port that published name sequence.
55 * Key field uniquely identifies publication, in the event a port has
56 * multiple publications of the same name sequence.
57 *
58 * Note: There is no field that identifies the publishing node because it is
59 * the same for all items contained within a publication message.
60 */
61struct distr_item {
62 __be32 type;
63 __be32 lower;
64 __be32 upper;
65 __be32 ref;
66 __be32 key;
67};
68
69/** 41/**
70 * struct publ_list - list of publications made by this node 42 * struct publ_list - list of publications made by this node
71 * @list: circular list of publications 43 * @list: circular list of publications
@@ -127,7 +99,7 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
127 return buf; 99 return buf;
128} 100}
129 101
130static void named_cluster_distribute(struct sk_buff *buf) 102void named_cluster_distribute(struct sk_buff *buf)
131{ 103{
132 struct sk_buff *buf_copy; 104 struct sk_buff *buf_copy;
133 struct tipc_node *n_ptr; 105 struct tipc_node *n_ptr;
@@ -135,18 +107,18 @@ static void named_cluster_distribute(struct sk_buff *buf)
135 107
136 rcu_read_lock(); 108 rcu_read_lock();
137 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 109 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
138 spin_lock_bh(&n_ptr->lock); 110 tipc_node_lock(n_ptr);
139 l_ptr = n_ptr->active_links[n_ptr->addr & 1]; 111 l_ptr = n_ptr->active_links[n_ptr->addr & 1];
140 if (l_ptr) { 112 if (l_ptr) {
141 buf_copy = skb_copy(buf, GFP_ATOMIC); 113 buf_copy = skb_copy(buf, GFP_ATOMIC);
142 if (!buf_copy) { 114 if (!buf_copy) {
143 spin_unlock_bh(&n_ptr->lock); 115 tipc_node_unlock(n_ptr);
144 break; 116 break;
145 } 117 }
146 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr); 118 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
147 __tipc_link_xmit(l_ptr, buf_copy); 119 __tipc_link_xmit(l_ptr, buf_copy);
148 } 120 }
149 spin_unlock_bh(&n_ptr->lock); 121 tipc_node_unlock(n_ptr);
150 } 122 }
151 rcu_read_unlock(); 123 rcu_read_unlock();
152 124
@@ -156,7 +128,7 @@ static void named_cluster_distribute(struct sk_buff *buf)
156/** 128/**
157 * tipc_named_publish - tell other nodes about a new publication by this node 129 * tipc_named_publish - tell other nodes about a new publication by this node
158 */ 130 */
159void tipc_named_publish(struct publication *publ) 131struct sk_buff *tipc_named_publish(struct publication *publ)
160{ 132{
161 struct sk_buff *buf; 133 struct sk_buff *buf;
162 struct distr_item *item; 134 struct distr_item *item;
@@ -165,23 +137,23 @@ void tipc_named_publish(struct publication *publ)
165 publ_lists[publ->scope]->size++; 137 publ_lists[publ->scope]->size++;
166 138
167 if (publ->scope == TIPC_NODE_SCOPE) 139 if (publ->scope == TIPC_NODE_SCOPE)
168 return; 140 return NULL;
169 141
170 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); 142 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
171 if (!buf) { 143 if (!buf) {
172 pr_warn("Publication distribution failure\n"); 144 pr_warn("Publication distribution failure\n");
173 return; 145 return NULL;
174 } 146 }
175 147
176 item = (struct distr_item *)msg_data(buf_msg(buf)); 148 item = (struct distr_item *)msg_data(buf_msg(buf));
177 publ_to_item(item, publ); 149 publ_to_item(item, publ);
178 named_cluster_distribute(buf); 150 return buf;
179} 151}
180 152
181/** 153/**
182 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node 154 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
183 */ 155 */
184void tipc_named_withdraw(struct publication *publ) 156struct sk_buff *tipc_named_withdraw(struct publication *publ)
185{ 157{
186 struct sk_buff *buf; 158 struct sk_buff *buf;
187 struct distr_item *item; 159 struct distr_item *item;
@@ -190,17 +162,17 @@ void tipc_named_withdraw(struct publication *publ)
190 publ_lists[publ->scope]->size--; 162 publ_lists[publ->scope]->size--;
191 163
192 if (publ->scope == TIPC_NODE_SCOPE) 164 if (publ->scope == TIPC_NODE_SCOPE)
193 return; 165 return NULL;
194 166
195 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); 167 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
196 if (!buf) { 168 if (!buf) {
197 pr_warn("Withdrawal distribution failure\n"); 169 pr_warn("Withdrawal distribution failure\n");
198 return; 170 return NULL;
199 } 171 }
200 172
201 item = (struct distr_item *)msg_data(buf_msg(buf)); 173 item = (struct distr_item *)msg_data(buf_msg(buf));
202 publ_to_item(item, publ); 174 publ_to_item(item, publ);
203 named_cluster_distribute(buf); 175 return buf;
204} 176}
205 177
206/* 178/*
@@ -239,31 +211,9 @@ static void named_distribute(struct list_head *message_list, u32 node,
239/** 211/**
240 * tipc_named_node_up - tell specified node about all publications by this node 212 * tipc_named_node_up - tell specified node about all publications by this node
241 */ 213 */
242void tipc_named_node_up(unsigned long nodearg) 214void tipc_named_node_up(u32 max_item_buf, u32 node)
243{ 215{
244 struct tipc_node *n_ptr; 216 LIST_HEAD(message_list);
245 struct tipc_link *l_ptr;
246 struct list_head message_list;
247 u32 node = (u32)nodearg;
248 u32 max_item_buf = 0;
249
250 /* compute maximum amount of publication data to send per message */
251 read_lock_bh(&tipc_net_lock);
252 n_ptr = tipc_node_find(node);
253 if (n_ptr) {
254 tipc_node_lock(n_ptr);
255 l_ptr = n_ptr->active_links[0];
256 if (l_ptr)
257 max_item_buf = ((l_ptr->max_pkt - INT_H_SIZE) /
258 ITEM_SIZE) * ITEM_SIZE;
259 tipc_node_unlock(n_ptr);
260 }
261 read_unlock_bh(&tipc_net_lock);
262 if (!max_item_buf)
263 return;
264
265 /* create list of publication messages, then send them as a unit */
266 INIT_LIST_HEAD(&message_list);
267 217
268 read_lock_bh(&tipc_nametbl_lock); 218 read_lock_bh(&tipc_nametbl_lock);
269 named_distribute(&message_list, node, &publ_cluster, max_item_buf); 219 named_distribute(&message_list, node, &publ_cluster, max_item_buf);
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index 9b312ccfd43e..b2eed4ec1526 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -39,9 +39,38 @@
39 39
40#include "name_table.h" 40#include "name_table.h"
41 41
42void tipc_named_publish(struct publication *publ); 42#define ITEM_SIZE sizeof(struct distr_item)
43void tipc_named_withdraw(struct publication *publ); 43
44void tipc_named_node_up(unsigned long node); 44/**
45 * struct distr_item - publication info distributed to other nodes
46 * @type: name sequence type
47 * @lower: name sequence lower bound
48 * @upper: name sequence upper bound
49 * @ref: publishing port reference
50 * @key: publication key
51 *
52 * ===> All fields are stored in network byte order. <===
53 *
54 * First 3 fields identify (name or) name sequence being published.
55 * Reference field uniquely identifies port that published name sequence.
56 * Key field uniquely identifies publication, in the event a port has
57 * multiple publications of the same name sequence.
58 *
59 * Note: There is no field that identifies the publishing node because it is
60 * the same for all items contained within a publication message.
61 */
62struct distr_item {
63 __be32 type;
64 __be32 lower;
65 __be32 upper;
66 __be32 ref;
67 __be32 key;
68};
69
70struct sk_buff *tipc_named_publish(struct publication *publ);
71struct sk_buff *tipc_named_withdraw(struct publication *publ);
72void named_cluster_distribute(struct sk_buff *buf);
73void tipc_named_node_up(u32 max_item_buf, u32 node);
45void tipc_named_rcv(struct sk_buff *buf); 74void tipc_named_rcv(struct sk_buff *buf);
46void tipc_named_reinit(void); 75void tipc_named_reinit(void);
47 76
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 042e8e3cabc0..9d7d37d95187 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -664,6 +664,7 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
664 u32 scope, u32 port_ref, u32 key) 664 u32 scope, u32 port_ref, u32 key)
665{ 665{
666 struct publication *publ; 666 struct publication *publ;
667 struct sk_buff *buf = NULL;
667 668
668 if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) { 669 if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) {
669 pr_warn("Publication failed, local publication limit reached (%u)\n", 670 pr_warn("Publication failed, local publication limit reached (%u)\n",
@@ -676,9 +677,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
676 tipc_own_addr, port_ref, key); 677 tipc_own_addr, port_ref, key);
677 if (likely(publ)) { 678 if (likely(publ)) {
678 table.local_publ_count++; 679 table.local_publ_count++;
679 tipc_named_publish(publ); 680 buf = tipc_named_publish(publ);
680 } 681 }
681 write_unlock_bh(&tipc_nametbl_lock); 682 write_unlock_bh(&tipc_nametbl_lock);
683
684 if (buf)
685 named_cluster_distribute(buf);
682 return publ; 686 return publ;
683} 687}
684 688
@@ -688,15 +692,19 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
688int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) 692int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
689{ 693{
690 struct publication *publ; 694 struct publication *publ;
695 struct sk_buff *buf;
691 696
692 write_lock_bh(&tipc_nametbl_lock); 697 write_lock_bh(&tipc_nametbl_lock);
693 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); 698 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
694 if (likely(publ)) { 699 if (likely(publ)) {
695 table.local_publ_count--; 700 table.local_publ_count--;
696 tipc_named_withdraw(publ); 701 buf = tipc_named_withdraw(publ);
697 write_unlock_bh(&tipc_nametbl_lock); 702 write_unlock_bh(&tipc_nametbl_lock);
698 list_del_init(&publ->pport_list); 703 list_del_init(&publ->pport_list);
699 kfree(publ); 704 kfree(publ);
705
706 if (buf)
707 named_cluster_distribute(buf);
700 return 1; 708 return 1;
701 } 709 }
702 write_unlock_bh(&tipc_nametbl_lock); 710 write_unlock_bh(&tipc_nametbl_lock);
@@ -961,6 +969,7 @@ static void tipc_purge_publications(struct name_seq *seq)
961 list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) { 969 list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
962 tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node, 970 tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
963 publ->ref, publ->key); 971 publ->ref, publ->key);
972 kfree(publ);
964 } 973 }
965} 974}
966 975
@@ -982,7 +991,6 @@ void tipc_nametbl_stop(void)
982 hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) { 991 hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
983 tipc_purge_publications(seq); 992 tipc_purge_publications(seq);
984 } 993 }
985 continue;
986 } 994 }
987 kfree(table.types); 995 kfree(table.types);
988 table.types = NULL; 996 table.types = NULL;
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 4c564eb69e1a..f64375e7f99f 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -39,45 +39,41 @@
39#include "name_distr.h" 39#include "name_distr.h"
40#include "subscr.h" 40#include "subscr.h"
41#include "port.h" 41#include "port.h"
42#include "socket.h"
42#include "node.h" 43#include "node.h"
43#include "config.h" 44#include "config.h"
44 45
45/* 46/*
46 * The TIPC locking policy is designed to ensure a very fine locking 47 * The TIPC locking policy is designed to ensure a very fine locking
47 * granularity, permitting complete parallel access to individual 48 * granularity, permitting complete parallel access to individual
48 * port and node/link instances. The code consists of three major 49 * port and node/link instances. The code consists of four major
49 * locking domains, each protected with their own disjunct set of locks. 50 * locking domains, each protected with their own disjunct set of locks.
50 * 51 *
51 * 1: The routing hierarchy. 52 * 1: The bearer level.
52 * Comprises the structures 'zone', 'cluster', 'node', 'link' 53 * RTNL lock is used to serialize the process of configuring bearer
53 * and 'bearer'. The whole hierarchy is protected by a big 54 * on update side, and RCU lock is applied on read side to make
54 * read/write lock, tipc_net_lock, to enssure that nothing is added 55 * bearer instance valid on both paths of message transmission and
55 * or removed while code is accessing any of these structures. 56 * reception.
56 * This layer must not be called from the two others while they
57 * hold any of their own locks.
58 * Neither must it itself do any upcalls to the other two before
59 * it has released tipc_net_lock and other protective locks.
60 * 57 *
61 * Within the tipc_net_lock domain there are two sub-domains;'node' and 58 * 2: The node and link level.
62 * 'bearer', where local write operations are permitted, 59 * All node instances are saved into two tipc_node_list and node_htable
63 * provided that those are protected by individual spin_locks 60 * lists. The two lists are protected by node_list_lock on write side,
64 * per instance. Code holding tipc_net_lock(read) and a node spin_lock 61 * and they are guarded with RCU lock on read side. Especially node
65 * is permitted to poke around in both the node itself and its 62 * instance is destroyed only when TIPC module is removed, and we can
66 * subordinate links. I.e, it can update link counters and queues, 63 * confirm that there has no any user who is accessing the node at the
67 * change link state, send protocol messages, and alter the 64 * moment. Therefore, Except for iterating the two lists within RCU
68 * "active_links" array in the node; but it can _not_ remove a link 65 * protection, it's no needed to hold RCU that we access node instance
69 * or a node from the overall structure. 66 * in other places.
70 * Correspondingly, individual bearers may change status within a
71 * tipc_net_lock(read), protected by an individual spin_lock ber bearer
72 * instance, but it needs tipc_net_lock(write) to remove/add any bearers.
73 * 67 *
68 * In addition, all members in node structure including link instances
69 * are protected by node spin lock.
74 * 70 *
75 * 2: The transport level of the protocol. 71 * 3: The transport level of the protocol.
76 * This consists of the structures port, (and its user level 72 * This consists of the structures port, (and its user level
77 * representations, such as user_port and tipc_sock), reference and 73 * representations, such as user_port and tipc_sock), reference and
78 * tipc_user (port.c, reg.c, socket.c). 74 * tipc_user (port.c, reg.c, socket.c).
79 * 75 *
80 * This layer has four different locks: 76 * This layer has four different locks:
81 * - The tipc_port spin_lock. This is protecting each port instance 77 * - The tipc_port spin_lock. This is protecting each port instance
82 * from parallel data access and removal. Since we can not place 78 * from parallel data access and removal. Since we can not place
83 * this lock in the port itself, it has been placed in the 79 * this lock in the port itself, it has been placed in the
@@ -96,7 +92,7 @@
96 * There are two such lists; 'port_list', which is used for management, 92 * There are two such lists; 'port_list', which is used for management,
97 * and 'wait_list', which is used to queue ports during congestion. 93 * and 'wait_list', which is used to queue ports during congestion.
98 * 94 *
99 * 3: The name table (name_table.c, name_distr.c, subscription.c) 95 * 4: The name table (name_table.c, name_distr.c, subscription.c)
100 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the 96 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the
101 * overall name table structure. Nothing must be added/removed to 97 * overall name table structure. Nothing must be added/removed to
102 * this structure without holding write access to it. 98 * this structure without holding write access to it.
@@ -108,8 +104,6 @@
108 * - A local spin_lock protecting the queue of subscriber events. 104 * - A local spin_lock protecting the queue of subscriber events.
109*/ 105*/
110 106
111DEFINE_RWLOCK(tipc_net_lock);
112
113static void net_route_named_msg(struct sk_buff *buf) 107static void net_route_named_msg(struct sk_buff *buf)
114{ 108{
115 struct tipc_msg *msg = buf_msg(buf); 109 struct tipc_msg *msg = buf_msg(buf);
@@ -148,7 +142,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
148 if (msg_mcast(msg)) 142 if (msg_mcast(msg))
149 tipc_port_mcast_rcv(buf, NULL); 143 tipc_port_mcast_rcv(buf, NULL);
150 else if (msg_destport(msg)) 144 else if (msg_destport(msg))
151 tipc_port_rcv(buf); 145 tipc_sk_rcv(buf);
152 else 146 else
153 net_route_named_msg(buf); 147 net_route_named_msg(buf);
154 return; 148 return;
@@ -171,22 +165,25 @@ void tipc_net_route_msg(struct sk_buff *buf)
171 tipc_link_xmit(buf, dnode, msg_link_selector(msg)); 165 tipc_link_xmit(buf, dnode, msg_link_selector(msg));
172} 166}
173 167
174void tipc_net_start(u32 addr) 168int tipc_net_start(u32 addr)
175{ 169{
176 char addr_string[16]; 170 char addr_string[16];
171 int res;
177 172
178 write_lock_bh(&tipc_net_lock);
179 tipc_own_addr = addr; 173 tipc_own_addr = addr;
180 tipc_named_reinit(); 174 tipc_named_reinit();
181 tipc_port_reinit(); 175 tipc_port_reinit();
182 tipc_bclink_init(); 176 res = tipc_bclink_init();
183 write_unlock_bh(&tipc_net_lock); 177 if (res)
178 return res;
184 179
185 tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr, 180 tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr,
186 TIPC_ZONE_SCOPE, 0, tipc_own_addr); 181 TIPC_ZONE_SCOPE, 0, tipc_own_addr);
182
187 pr_info("Started in network mode\n"); 183 pr_info("Started in network mode\n");
188 pr_info("Own node address %s, network identity %u\n", 184 pr_info("Own node address %s, network identity %u\n",
189 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); 185 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
186 return 0;
190} 187}
191 188
192void tipc_net_stop(void) 189void tipc_net_stop(void)
@@ -195,11 +192,11 @@ void tipc_net_stop(void)
195 return; 192 return;
196 193
197 tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr); 194 tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr);
198 write_lock_bh(&tipc_net_lock); 195 rtnl_lock();
199 tipc_bearer_stop(); 196 tipc_bearer_stop();
200 tipc_bclink_stop(); 197 tipc_bclink_stop();
201 tipc_node_stop(); 198 tipc_node_stop();
202 write_unlock_bh(&tipc_net_lock); 199 rtnl_unlock();
203 200
204 pr_info("Left network mode\n"); 201 pr_info("Left network mode\n");
205} 202}
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 079daadb3f72..c6c2b46f7c28 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -37,11 +37,9 @@
37#ifndef _TIPC_NET_H 37#ifndef _TIPC_NET_H
38#define _TIPC_NET_H 38#define _TIPC_NET_H
39 39
40extern rwlock_t tipc_net_lock;
41
42void tipc_net_route_msg(struct sk_buff *buf); 40void tipc_net_route_msg(struct sk_buff *buf);
43 41
44void tipc_net_start(u32 addr); 42int tipc_net_start(u32 addr);
45void tipc_net_stop(void); 43void tipc_net_stop(void);
46 44
47#endif 45#endif
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 1d3a4999a70f..5b44c3041be4 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -108,7 +108,7 @@ struct tipc_node *tipc_node_create(u32 addr)
108 break; 108 break;
109 } 109 }
110 list_add_tail_rcu(&n_ptr->list, &temp_node->list); 110 list_add_tail_rcu(&n_ptr->list, &temp_node->list);
111 n_ptr->block_setup = WAIT_PEER_DOWN; 111 n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
112 n_ptr->signature = INVALID_NODE_SIG; 112 n_ptr->signature = INVALID_NODE_SIG;
113 113
114 tipc_num_nodes++; 114 tipc_num_nodes++;
@@ -144,11 +144,13 @@ void tipc_node_stop(void)
144void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 144void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
145{ 145{
146 struct tipc_link **active = &n_ptr->active_links[0]; 146 struct tipc_link **active = &n_ptr->active_links[0];
147 u32 addr = n_ptr->addr;
147 148
148 n_ptr->working_links++; 149 n_ptr->working_links++;
149 150 tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE,
151 l_ptr->bearer_id, addr);
150 pr_info("Established link <%s> on network plane %c\n", 152 pr_info("Established link <%s> on network plane %c\n",
151 l_ptr->name, l_ptr->b_ptr->net_plane); 153 l_ptr->name, l_ptr->net_plane);
152 154
153 if (!active[0]) { 155 if (!active[0]) {
154 active[0] = active[1] = l_ptr; 156 active[0] = active[1] = l_ptr;
@@ -203,16 +205,18 @@ static void node_select_active_links(struct tipc_node *n_ptr)
203void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 205void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
204{ 206{
205 struct tipc_link **active; 207 struct tipc_link **active;
208 u32 addr = n_ptr->addr;
206 209
207 n_ptr->working_links--; 210 n_ptr->working_links--;
211 tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, l_ptr->bearer_id, addr);
208 212
209 if (!tipc_link_is_active(l_ptr)) { 213 if (!tipc_link_is_active(l_ptr)) {
210 pr_info("Lost standby link <%s> on network plane %c\n", 214 pr_info("Lost standby link <%s> on network plane %c\n",
211 l_ptr->name, l_ptr->b_ptr->net_plane); 215 l_ptr->name, l_ptr->net_plane);
212 return; 216 return;
213 } 217 }
214 pr_info("Lost link <%s> on network plane %c\n", 218 pr_info("Lost link <%s> on network plane %c\n",
215 l_ptr->name, l_ptr->b_ptr->net_plane); 219 l_ptr->name, l_ptr->net_plane);
216 220
217 active = &n_ptr->active_links[0]; 221 active = &n_ptr->active_links[0];
218 if (active[0] == l_ptr) 222 if (active[0] == l_ptr)
@@ -239,7 +243,7 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
239 243
240void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 244void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
241{ 245{
242 n_ptr->links[l_ptr->b_ptr->identity] = l_ptr; 246 n_ptr->links[l_ptr->bearer_id] = l_ptr;
243 spin_lock_bh(&node_list_lock); 247 spin_lock_bh(&node_list_lock);
244 tipc_num_links++; 248 tipc_num_links++;
245 spin_unlock_bh(&node_list_lock); 249 spin_unlock_bh(&node_list_lock);
@@ -263,26 +267,12 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
263 267
264static void node_established_contact(struct tipc_node *n_ptr) 268static void node_established_contact(struct tipc_node *n_ptr)
265{ 269{
266 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 270 n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
267 n_ptr->bclink.oos_state = 0; 271 n_ptr->bclink.oos_state = 0;
268 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 272 n_ptr->bclink.acked = tipc_bclink_get_last_sent();
269 tipc_bclink_add_node(n_ptr->addr); 273 tipc_bclink_add_node(n_ptr->addr);
270} 274}
271 275
272static void node_name_purge_complete(unsigned long node_addr)
273{
274 struct tipc_node *n_ptr;
275
276 read_lock_bh(&tipc_net_lock);
277 n_ptr = tipc_node_find(node_addr);
278 if (n_ptr) {
279 tipc_node_lock(n_ptr);
280 n_ptr->block_setup &= ~WAIT_NAMES_GONE;
281 tipc_node_unlock(n_ptr);
282 }
283 read_unlock_bh(&tipc_net_lock);
284}
285
286static void node_lost_contact(struct tipc_node *n_ptr) 276static void node_lost_contact(struct tipc_node *n_ptr)
287{ 277{
288 char addr_string[16]; 278 char addr_string[16];
@@ -296,10 +286,9 @@ static void node_lost_contact(struct tipc_node *n_ptr)
296 kfree_skb_list(n_ptr->bclink.deferred_head); 286 kfree_skb_list(n_ptr->bclink.deferred_head);
297 n_ptr->bclink.deferred_size = 0; 287 n_ptr->bclink.deferred_size = 0;
298 288
299 if (n_ptr->bclink.reasm_head) { 289 if (n_ptr->bclink.reasm_buf) {
300 kfree_skb(n_ptr->bclink.reasm_head); 290 kfree_skb(n_ptr->bclink.reasm_buf);
301 n_ptr->bclink.reasm_head = NULL; 291 n_ptr->bclink.reasm_buf = NULL;
302 n_ptr->bclink.reasm_tail = NULL;
303 } 292 }
304 293
305 tipc_bclink_remove_node(n_ptr->addr); 294 tipc_bclink_remove_node(n_ptr->addr);
@@ -318,12 +307,13 @@ static void node_lost_contact(struct tipc_node *n_ptr)
318 tipc_link_reset_fragments(l_ptr); 307 tipc_link_reset_fragments(l_ptr);
319 } 308 }
320 309
321 /* Notify subscribers */ 310 n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
322 tipc_nodesub_notify(n_ptr);
323 311
324 /* Prevent re-contact with node until cleanup is done */ 312 /* Notify subscribers and prevent re-contact with node until
325 n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE; 313 * cleanup is done.
326 tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr); 314 */
315 n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN |
316 TIPC_NOTIFY_NODE_DOWN;
327} 317}
328 318
329struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) 319struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
@@ -436,3 +426,63 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
436 rcu_read_unlock(); 426 rcu_read_unlock();
437 return buf; 427 return buf;
438} 428}
429
430/**
431 * tipc_node_get_linkname - get the name of a link
432 *
433 * @bearer_id: id of the bearer
434 * @node: peer node address
435 * @linkname: link name output buffer
436 *
437 * Returns 0 on success
438 */
439int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
440{
441 struct tipc_link *link;
442 struct tipc_node *node = tipc_node_find(addr);
443
444 if ((bearer_id >= MAX_BEARERS) || !node)
445 return -EINVAL;
446 tipc_node_lock(node);
447 link = node->links[bearer_id];
448 if (link) {
449 strncpy(linkname, link->name, len);
450 tipc_node_unlock(node);
451 return 0;
452 }
453 tipc_node_unlock(node);
454 return -EINVAL;
455}
456
457void tipc_node_unlock(struct tipc_node *node)
458{
459 LIST_HEAD(nsub_list);
460 struct tipc_link *link;
461 int pkt_sz = 0;
462 u32 addr = 0;
463
464 if (likely(!node->action_flags)) {
465 spin_unlock_bh(&node->lock);
466 return;
467 }
468
469 if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) {
470 list_replace_init(&node->nsub, &nsub_list);
471 node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN;
472 }
473 if (node->action_flags & TIPC_NOTIFY_NODE_UP) {
474 link = node->active_links[0];
475 node->action_flags &= ~TIPC_NOTIFY_NODE_UP;
476 if (link) {
477 pkt_sz = ((link->max_pkt - INT_H_SIZE) / ITEM_SIZE) *
478 ITEM_SIZE;
479 addr = node->addr;
480 }
481 }
482 spin_unlock_bh(&node->lock);
483
484 if (!list_empty(&nsub_list))
485 tipc_nodesub_notify(&nsub_list);
486 if (pkt_sz)
487 tipc_named_node_up(pkt_sz, addr);
488}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 7cbb8cec1a93..9087063793f2 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -47,62 +47,73 @@
47 */ 47 */
48#define INVALID_NODE_SIG 0x10000 48#define INVALID_NODE_SIG 0x10000
49 49
50/* Flags used to block (re)establishment of contact with a neighboring node */ 50/* Flags used to take different actions according to flag type
51#define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */ 51 * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
52#define WAIT_NAMES_GONE 0x0002 /* wait for peer's publications to be purged */ 52 * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
53#define WAIT_NODE_DOWN 0x0004 /* wait until peer node is declared down */ 53 * TIPC_NOTIFY_NODE_DOWN: notify node is down
54 * TIPC_NOTIFY_NODE_UP: notify node is up
55 */
56enum {
57 TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1),
58 TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2),
59 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
60 TIPC_NOTIFY_NODE_UP = (1 << 4)
61};
62
63/**
64 * struct tipc_node_bclink - TIPC node bclink structure
65 * @acked: sequence # of last outbound b'cast message acknowledged by node
66 * @last_in: sequence # of last in-sequence b'cast message received from node
67 * @last_sent: sequence # of last b'cast message sent by node
68 * @oos_state: state tracker for handling OOS b'cast messages
69 * @deferred_size: number of OOS b'cast messages in deferred queue
70 * @deferred_head: oldest OOS b'cast message received from node
71 * @deferred_tail: newest OOS b'cast message received from node
72 * @reasm_buf: broadcast reassembly queue head from node
73 * @recv_permitted: true if node is allowed to receive b'cast messages
74 */
75struct tipc_node_bclink {
76 u32 acked;
77 u32 last_in;
78 u32 last_sent;
79 u32 oos_state;
80 u32 deferred_size;
81 struct sk_buff *deferred_head;
82 struct sk_buff *deferred_tail;
83 struct sk_buff *reasm_buf;
84 bool recv_permitted;
85};
54 86
55/** 87/**
56 * struct tipc_node - TIPC node structure 88 * struct tipc_node - TIPC node structure
57 * @addr: network address of node 89 * @addr: network address of node
58 * @lock: spinlock governing access to structure 90 * @lock: spinlock governing access to structure
59 * @hash: links to adjacent nodes in unsorted hash chain 91 * @hash: links to adjacent nodes in unsorted hash chain
60 * @list: links to adjacent nodes in sorted list of cluster's nodes
61 * @nsub: list of "node down" subscriptions monitoring node
62 * @active_links: pointers to active links to node 92 * @active_links: pointers to active links to node
63 * @links: pointers to all links to node 93 * @links: pointers to all links to node
94 * @action_flags: bit mask of different types of node actions
95 * @bclink: broadcast-related info
96 * @list: links to adjacent nodes in sorted list of cluster's nodes
64 * @working_links: number of working links to node (both active and standby) 97 * @working_links: number of working links to node (both active and standby)
65 * @block_setup: bit mask of conditions preventing link establishment to node
66 * @link_cnt: number of links to node 98 * @link_cnt: number of links to node
67 * @signature: node instance identifier 99 * @signature: node instance identifier
68 * @bclink: broadcast-related info 100 * @nsub: list of "node down" subscriptions monitoring node
69 * @rcu: rcu struct for tipc_node 101 * @rcu: rcu struct for tipc_node
70 * @acked: sequence # of last outbound b'cast message acknowledged by node
71 * @last_in: sequence # of last in-sequence b'cast message received from node
72 * @last_sent: sequence # of last b'cast message sent by node
73 * @oos_state: state tracker for handling OOS b'cast messages
74 * @deferred_size: number of OOS b'cast messages in deferred queue
75 * @deferred_head: oldest OOS b'cast message received from node
76 * @deferred_tail: newest OOS b'cast message received from node
77 * @reasm_head: broadcast reassembly queue head from node
78 * @reasm_tail: last broadcast fragment received from node
79 * @recv_permitted: true if node is allowed to receive b'cast messages
80 */ 102 */
81struct tipc_node { 103struct tipc_node {
82 u32 addr; 104 u32 addr;
83 spinlock_t lock; 105 spinlock_t lock;
84 struct hlist_node hash; 106 struct hlist_node hash;
85 struct list_head list;
86 struct list_head nsub;
87 struct tipc_link *active_links[2]; 107 struct tipc_link *active_links[2];
88 struct tipc_link *links[MAX_BEARERS]; 108 struct tipc_link *links[MAX_BEARERS];
109 unsigned int action_flags;
110 struct tipc_node_bclink bclink;
111 struct list_head list;
89 int link_cnt; 112 int link_cnt;
90 int working_links; 113 int working_links;
91 int block_setup;
92 u32 signature; 114 u32 signature;
115 struct list_head nsub;
93 struct rcu_head rcu; 116 struct rcu_head rcu;
94 struct {
95 u32 acked;
96 u32 last_in;
97 u32 last_sent;
98 u32 oos_state;
99 u32 deferred_size;
100 struct sk_buff *deferred_head;
101 struct sk_buff *deferred_tail;
102 struct sk_buff *reasm_head;
103 struct sk_buff *reasm_tail;
104 bool recv_permitted;
105 } bclink;
106}; 117};
107 118
108extern struct list_head tipc_node_list; 119extern struct list_head tipc_node_list;
@@ -118,15 +129,18 @@ int tipc_node_active_links(struct tipc_node *n_ptr);
118int tipc_node_is_up(struct tipc_node *n_ptr); 129int tipc_node_is_up(struct tipc_node *n_ptr);
119struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space); 130struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
120struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space); 131struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
132int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len);
133void tipc_node_unlock(struct tipc_node *node);
121 134
122static inline void tipc_node_lock(struct tipc_node *n_ptr) 135static inline void tipc_node_lock(struct tipc_node *node)
123{ 136{
124 spin_lock_bh(&n_ptr->lock); 137 spin_lock_bh(&node->lock);
125} 138}
126 139
127static inline void tipc_node_unlock(struct tipc_node *n_ptr) 140static inline bool tipc_node_blocked(struct tipc_node *node)
128{ 141{
129 spin_unlock_bh(&n_ptr->lock); 142 return (node->action_flags & (TIPC_WAIT_PEER_LINKS_DOWN |
143 TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
130} 144}
131 145
132#endif 146#endif
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index 8a7384c04add..7c59ab1d6ecb 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -81,14 +81,13 @@ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
81 * 81 *
82 * Note: node is locked by caller 82 * Note: node is locked by caller
83 */ 83 */
84void tipc_nodesub_notify(struct tipc_node *node) 84void tipc_nodesub_notify(struct list_head *nsub_list)
85{ 85{
86 struct tipc_node_subscr *ns; 86 struct tipc_node_subscr *ns, *safe;
87 87
88 list_for_each_entry(ns, &node->nsub, nodesub_list) { 88 list_for_each_entry_safe(ns, safe, nsub_list, nodesub_list) {
89 if (ns->handle_node_down) { 89 if (ns->handle_node_down) {
90 tipc_k_signal((Handler)ns->handle_node_down, 90 ns->handle_node_down(ns->usr_handle);
91 (unsigned long)ns->usr_handle);
92 ns->handle_node_down = NULL; 91 ns->handle_node_down = NULL;
93 } 92 }
94 } 93 }
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
index c95d20727ded..d91b8cc81e3d 100644
--- a/net/tipc/node_subscr.h
+++ b/net/tipc/node_subscr.h
@@ -58,6 +58,6 @@ struct tipc_node_subscr {
58void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, 58void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
59 void *usr_handle, net_ev_handler handle_down); 59 void *usr_handle, net_ev_handler handle_down);
60void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub); 60void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub);
61void tipc_nodesub_notify(struct tipc_node *node); 61void tipc_nodesub_notify(struct list_head *nsub_list);
62 62
63#endif 63#endif
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 5c14c7801ee6..5fd7acce01ea 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -165,7 +165,7 @@ void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
165 msg_set_destnode(msg, tipc_own_addr); 165 msg_set_destnode(msg, tipc_own_addr);
166 if (dp->count == 1) { 166 if (dp->count == 1) {
167 msg_set_destport(msg, dp->ports[0]); 167 msg_set_destport(msg, dp->ports[0]);
168 tipc_port_rcv(buf); 168 tipc_sk_rcv(buf);
169 tipc_port_list_free(dp); 169 tipc_port_list_free(dp);
170 return; 170 return;
171 } 171 }
@@ -180,7 +180,7 @@ void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
180 if ((index == 0) && (cnt != 0)) 180 if ((index == 0) && (cnt != 0))
181 item = item->next; 181 item = item->next;
182 msg_set_destport(buf_msg(b), item->ports[index]); 182 msg_set_destport(buf_msg(b), item->ports[index]);
183 tipc_port_rcv(b); 183 tipc_sk_rcv(b);
184 } 184 }
185 } 185 }
186exit: 186exit:
@@ -343,7 +343,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
343 /* send returned message & dispose of rejected message */ 343 /* send returned message & dispose of rejected message */
344 src_node = msg_prevnode(msg); 344 src_node = msg_prevnode(msg);
345 if (in_own_node(src_node)) 345 if (in_own_node(src_node))
346 tipc_port_rcv(rbuf); 346 tipc_sk_rcv(rbuf);
347 else 347 else
348 tipc_link_xmit(rbuf, src_node, msg_link_selector(rmsg)); 348 tipc_link_xmit(rbuf, src_node, msg_link_selector(rmsg));
349exit: 349exit:
@@ -754,37 +754,6 @@ int tipc_port_shutdown(u32 ref)
754 return tipc_port_disconnect(ref); 754 return tipc_port_disconnect(ref);
755} 755}
756 756
757/**
758 * tipc_port_rcv - receive message from lower layer and deliver to port user
759 */
760int tipc_port_rcv(struct sk_buff *buf)
761{
762 struct tipc_port *p_ptr;
763 struct tipc_msg *msg = buf_msg(buf);
764 u32 destport = msg_destport(msg);
765 u32 dsz = msg_data_sz(msg);
766 u32 err;
767
768 /* forward unresolved named message */
769 if (unlikely(!destport)) {
770 tipc_net_route_msg(buf);
771 return dsz;
772 }
773
774 /* validate destination & pass to port, otherwise reject message */
775 p_ptr = tipc_port_lock(destport);
776 if (likely(p_ptr)) {
777 err = tipc_sk_rcv(&tipc_port_to_sock(p_ptr)->sk, buf);
778 tipc_port_unlock(p_ptr);
779 if (likely(!err))
780 return dsz;
781 } else {
782 err = TIPC_ERR_NO_PORT;
783 }
784
785 return tipc_reject_msg(buf, err);
786}
787
788/* 757/*
789 * tipc_port_iovec_rcv: Concatenate and deliver sectioned 758 * tipc_port_iovec_rcv: Concatenate and deliver sectioned
790 * message for this node. 759 * message for this node.
@@ -798,7 +767,7 @@ static int tipc_port_iovec_rcv(struct tipc_port *sender,
798 767
799 res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf); 768 res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
800 if (likely(buf)) 769 if (likely(buf))
801 tipc_port_rcv(buf); 770 tipc_sk_rcv(buf);
802 return res; 771 return res;
803} 772}
804 773
diff --git a/net/tipc/port.h b/net/tipc/port.h
index a00397393bd1..cf4ca5b1d9a4 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -42,9 +42,10 @@
42#include "msg.h" 42#include "msg.h"
43#include "node_subscr.h" 43#include "node_subscr.h"
44 44
45#define TIPC_FLOW_CONTROL_WIN 512 45#define TIPC_CONNACK_INTV 256
46#define CONN_OVERLOAD_LIMIT ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \ 46#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2)
47 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) 47#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
48 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
48 49
49/** 50/**
50 * struct tipc_port - TIPC port structure 51 * struct tipc_port - TIPC port structure
@@ -134,7 +135,6 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
134/* 135/*
135 * TIPC messaging routines 136 * TIPC messaging routines
136 */ 137 */
137int tipc_port_rcv(struct sk_buff *buf);
138 138
139int tipc_send(struct tipc_port *port, 139int tipc_send(struct tipc_port *port,
140 struct iovec const *msg_sect, 140 struct iovec const *msg_sect,
@@ -187,7 +187,7 @@ static inline void tipc_port_unlock(struct tipc_port *p_ptr)
187 187
188static inline int tipc_port_congested(struct tipc_port *p_ptr) 188static inline int tipc_port_congested(struct tipc_port *p_ptr)
189{ 189{
190 return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2); 190 return ((p_ptr->sent - p_ptr->acked) >= TIPC_FLOWCTRL_WIN);
191} 191}
192 192
193 193
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3c0256962f7d..ef0475568f9e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -36,6 +36,7 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "port.h" 38#include "port.h"
39#include "node.h"
39 40
40#include <linux/export.h> 41#include <linux/export.h>
41 42
@@ -44,7 +45,7 @@
44 45
45#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 46#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
46 47
47static int backlog_rcv(struct sock *sk, struct sk_buff *skb); 48static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
48static void tipc_data_ready(struct sock *sk); 49static void tipc_data_ready(struct sock *sk);
49static void tipc_write_space(struct sock *sk); 50static void tipc_write_space(struct sock *sk);
50static int tipc_release(struct socket *sock); 51static int tipc_release(struct socket *sock);
@@ -195,11 +196,12 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
195 sock->state = state; 196 sock->state = state;
196 197
197 sock_init_data(sock, sk); 198 sock_init_data(sock, sk);
198 sk->sk_backlog_rcv = backlog_rcv; 199 sk->sk_backlog_rcv = tipc_backlog_rcv;
199 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 200 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
200 sk->sk_data_ready = tipc_data_ready; 201 sk->sk_data_ready = tipc_data_ready;
201 sk->sk_write_space = tipc_write_space; 202 sk->sk_write_space = tipc_write_space;
202 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT; 203 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
204 atomic_set(&tsk->dupl_rcvcnt, 0);
203 tipc_port_unlock(port); 205 tipc_port_unlock(port);
204 206
205 if (sock->state == SS_READY) { 207 if (sock->state == SS_READY) {
@@ -983,10 +985,11 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
983 return 0; 985 return 0;
984} 986}
985 987
986static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo) 988static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
987{ 989{
988 struct sock *sk = sock->sk; 990 struct sock *sk = sock->sk;
989 DEFINE_WAIT(wait); 991 DEFINE_WAIT(wait);
992 long timeo = *timeop;
990 int err; 993 int err;
991 994
992 for (;;) { 995 for (;;) {
@@ -1011,6 +1014,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
1011 break; 1014 break;
1012 } 1015 }
1013 finish_wait(sk_sleep(sk), &wait); 1016 finish_wait(sk_sleep(sk), &wait);
1017 *timeop = timeo;
1014 return err; 1018 return err;
1015} 1019}
1016 1020
@@ -1054,7 +1058,7 @@ static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
1054restart: 1058restart:
1055 1059
1056 /* Look for a message in receive queue; wait if necessary */ 1060 /* Look for a message in receive queue; wait if necessary */
1057 res = tipc_wait_for_rcvmsg(sock, timeo); 1061 res = tipc_wait_for_rcvmsg(sock, &timeo);
1058 if (res) 1062 if (res)
1059 goto exit; 1063 goto exit;
1060 1064
@@ -1100,7 +1104,7 @@ restart:
1100 /* Consume received message (optional) */ 1104 /* Consume received message (optional) */
1101 if (likely(!(flags & MSG_PEEK))) { 1105 if (likely(!(flags & MSG_PEEK))) {
1102 if ((sock->state != SS_READY) && 1106 if ((sock->state != SS_READY) &&
1103 (++port->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 1107 (++port->conn_unacked >= TIPC_CONNACK_INTV))
1104 tipc_acknowledge(port->ref, port->conn_unacked); 1108 tipc_acknowledge(port->ref, port->conn_unacked);
1105 advance_rx_queue(sk); 1109 advance_rx_queue(sk);
1106 } 1110 }
@@ -1152,7 +1156,7 @@ static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
1152 1156
1153restart: 1157restart:
1154 /* Look for a message in receive queue; wait if necessary */ 1158 /* Look for a message in receive queue; wait if necessary */
1155 res = tipc_wait_for_rcvmsg(sock, timeo); 1159 res = tipc_wait_for_rcvmsg(sock, &timeo);
1156 if (res) 1160 if (res)
1157 goto exit; 1161 goto exit;
1158 1162
@@ -1209,7 +1213,7 @@ restart:
1209 1213
1210 /* Consume received message (optional) */ 1214 /* Consume received message (optional) */
1211 if (likely(!(flags & MSG_PEEK))) { 1215 if (likely(!(flags & MSG_PEEK))) {
1212 if (unlikely(++port->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 1216 if (unlikely(++port->conn_unacked >= TIPC_CONNACK_INTV))
1213 tipc_acknowledge(port->ref, port->conn_unacked); 1217 tipc_acknowledge(port->ref, port->conn_unacked);
1214 advance_rx_queue(sk); 1218 advance_rx_queue(sk);
1215 } 1219 }
@@ -1415,7 +1419,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1415} 1419}
1416 1420
1417/** 1421/**
1418 * backlog_rcv - handle incoming message from backlog queue 1422 * tipc_backlog_rcv - handle incoming message from backlog queue
1419 * @sk: socket 1423 * @sk: socket
1420 * @buf: message 1424 * @buf: message
1421 * 1425 *
@@ -1423,47 +1427,74 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1423 * 1427 *
1424 * Returns 0 1428 * Returns 0
1425 */ 1429 */
1426static int backlog_rcv(struct sock *sk, struct sk_buff *buf) 1430static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
1427{ 1431{
1428 u32 res; 1432 u32 res;
1433 struct tipc_sock *tsk = tipc_sk(sk);
1434 uint truesize = buf->truesize;
1429 1435
1430 res = filter_rcv(sk, buf); 1436 res = filter_rcv(sk, buf);
1431 if (res) 1437 if (unlikely(res))
1432 tipc_reject_msg(buf, res); 1438 tipc_reject_msg(buf, res);
1439
1440 if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
1441 atomic_add(truesize, &tsk->dupl_rcvcnt);
1442
1433 return 0; 1443 return 0;
1434} 1444}
1435 1445
1436/** 1446/**
1437 * tipc_sk_rcv - handle incoming message 1447 * tipc_sk_rcv - handle incoming message
1438 * @sk: socket receiving message 1448 * @buf: buffer containing arriving message
1439 * @buf: message 1449 * Consumes buffer
1440 * 1450 * Returns 0 if success, or errno: -EHOSTUNREACH
1441 * Called with port lock already taken.
1442 *
1443 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1444 */ 1451 */
1445u32 tipc_sk_rcv(struct sock *sk, struct sk_buff *buf) 1452int tipc_sk_rcv(struct sk_buff *buf)
1446{ 1453{
1447 u32 res; 1454 struct tipc_sock *tsk;
1455 struct tipc_port *port;
1456 struct sock *sk;
1457 u32 dport = msg_destport(buf_msg(buf));
1458 int err = TIPC_OK;
1459 uint limit;
1448 1460
1449 /* 1461 /* Forward unresolved named message */
1450 * Process message if socket is unlocked; otherwise add to backlog queue 1462 if (unlikely(!dport)) {
1451 * 1463 tipc_net_route_msg(buf);
1452 * This code is based on sk_receive_skb(), but must be distinct from it 1464 return 0;
1453 * since a TIPC-specific filter/reject mechanism is utilized 1465 }
1454 */ 1466
1467 /* Validate destination */
1468 port = tipc_port_lock(dport);
1469 if (unlikely(!port)) {
1470 err = TIPC_ERR_NO_PORT;
1471 goto exit;
1472 }
1473
1474 tsk = tipc_port_to_sock(port);
1475 sk = &tsk->sk;
1476
1477 /* Queue message */
1455 bh_lock_sock(sk); 1478 bh_lock_sock(sk);
1479
1456 if (!sock_owned_by_user(sk)) { 1480 if (!sock_owned_by_user(sk)) {
1457 res = filter_rcv(sk, buf); 1481 err = filter_rcv(sk, buf);
1458 } else { 1482 } else {
1459 if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf))) 1483 if (sk->sk_backlog.len == 0)
1460 res = TIPC_ERR_OVERLOAD; 1484 atomic_set(&tsk->dupl_rcvcnt, 0);
1461 else 1485 limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt);
1462 res = TIPC_OK; 1486 if (sk_add_backlog(sk, buf, limit))
1487 err = TIPC_ERR_OVERLOAD;
1463 } 1488 }
1489
1464 bh_unlock_sock(sk); 1490 bh_unlock_sock(sk);
1491 tipc_port_unlock(port);
1465 1492
1466 return res; 1493 if (likely(!err))
1494 return 0;
1495exit:
1496 tipc_reject_msg(buf, err);
1497 return -EHOSTUNREACH;
1467} 1498}
1468 1499
1469static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) 1500static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -1905,6 +1936,28 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
1905 return put_user(sizeof(value), ol); 1936 return put_user(sizeof(value), ol);
1906} 1937}
1907 1938
1939int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
1940{
1941 struct tipc_sioc_ln_req lnr;
1942 void __user *argp = (void __user *)arg;
1943
1944 switch (cmd) {
1945 case SIOCGETLINKNAME:
1946 if (copy_from_user(&lnr, argp, sizeof(lnr)))
1947 return -EFAULT;
1948 if (!tipc_node_get_linkname(lnr.bearer_id, lnr.peer,
1949 lnr.linkname, TIPC_MAX_LINK_NAME)) {
1950 if (copy_to_user(argp, &lnr, sizeof(lnr)))
1951 return -EFAULT;
1952 return 0;
1953 }
1954 return -EADDRNOTAVAIL;
1955 break;
1956 default:
1957 return -ENOIOCTLCMD;
1958 }
1959}
1960
1908/* Protocol switches for the various types of TIPC sockets */ 1961/* Protocol switches for the various types of TIPC sockets */
1909 1962
1910static const struct proto_ops msg_ops = { 1963static const struct proto_ops msg_ops = {
@@ -1917,7 +1970,7 @@ static const struct proto_ops msg_ops = {
1917 .accept = sock_no_accept, 1970 .accept = sock_no_accept,
1918 .getname = tipc_getname, 1971 .getname = tipc_getname,
1919 .poll = tipc_poll, 1972 .poll = tipc_poll,
1920 .ioctl = sock_no_ioctl, 1973 .ioctl = tipc_ioctl,
1921 .listen = sock_no_listen, 1974 .listen = sock_no_listen,
1922 .shutdown = tipc_shutdown, 1975 .shutdown = tipc_shutdown,
1923 .setsockopt = tipc_setsockopt, 1976 .setsockopt = tipc_setsockopt,
@@ -1938,7 +1991,7 @@ static const struct proto_ops packet_ops = {
1938 .accept = tipc_accept, 1991 .accept = tipc_accept,
1939 .getname = tipc_getname, 1992 .getname = tipc_getname,
1940 .poll = tipc_poll, 1993 .poll = tipc_poll,
1941 .ioctl = sock_no_ioctl, 1994 .ioctl = tipc_ioctl,
1942 .listen = tipc_listen, 1995 .listen = tipc_listen,
1943 .shutdown = tipc_shutdown, 1996 .shutdown = tipc_shutdown,
1944 .setsockopt = tipc_setsockopt, 1997 .setsockopt = tipc_setsockopt,
@@ -1959,7 +2012,7 @@ static const struct proto_ops stream_ops = {
1959 .accept = tipc_accept, 2012 .accept = tipc_accept,
1960 .getname = tipc_getname, 2013 .getname = tipc_getname,
1961 .poll = tipc_poll, 2014 .poll = tipc_poll,
1962 .ioctl = sock_no_ioctl, 2015 .ioctl = tipc_ioctl,
1963 .listen = tipc_listen, 2016 .listen = tipc_listen,
1964 .shutdown = tipc_shutdown, 2017 .shutdown = tipc_shutdown,
1965 .setsockopt = tipc_setsockopt, 2018 .setsockopt = tipc_setsockopt,
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index 74e5c7f195a6..3afcd2a70b31 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -44,12 +44,14 @@
44 * @port: port - interacts with 'sk' and with the rest of the TIPC stack 44 * @port: port - interacts with 'sk' and with the rest of the TIPC stack
45 * @peer_name: the peer of the connection, if any 45 * @peer_name: the peer of the connection, if any
46 * @conn_timeout: the time we can wait for an unresponded setup request 46 * @conn_timeout: the time we can wait for an unresponded setup request
47 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
47 */ 48 */
48 49
49struct tipc_sock { 50struct tipc_sock {
50 struct sock sk; 51 struct sock sk;
51 struct tipc_port port; 52 struct tipc_port port;
52 unsigned int conn_timeout; 53 unsigned int conn_timeout;
54 atomic_t dupl_rcvcnt;
53}; 55};
54 56
55static inline struct tipc_sock *tipc_sk(const struct sock *sk) 57static inline struct tipc_sock *tipc_sk(const struct sock *sk)
@@ -67,6 +69,6 @@ static inline void tipc_sock_wakeup(struct tipc_sock *tsk)
67 tsk->sk.sk_write_space(&tsk->sk); 69 tsk->sk.sk_write_space(&tsk->sk);
68} 70}
69 71
70u32 tipc_sk_rcv(struct sock *sk, struct sk_buff *buf); 72int tipc_sk_rcv(struct sk_buff *buf);
71 73
72#endif 74#endif
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 749f80c21e22..e96884380732 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1492,10 +1492,14 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1492 if (len > sk->sk_sndbuf - 32) 1492 if (len > sk->sk_sndbuf - 32)
1493 goto out; 1493 goto out;
1494 1494
1495 if (len > SKB_MAX_ALLOC) 1495 if (len > SKB_MAX_ALLOC) {
1496 data_len = min_t(size_t, 1496 data_len = min_t(size_t,
1497 len - SKB_MAX_ALLOC, 1497 len - SKB_MAX_ALLOC,
1498 MAX_SKB_FRAGS * PAGE_SIZE); 1498 MAX_SKB_FRAGS * PAGE_SIZE);
1499 data_len = PAGE_ALIGN(data_len);
1500
1501 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1502 }
1499 1503
1500 skb = sock_alloc_send_pskb(sk, len - data_len, data_len, 1504 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1501 msg->msg_flags & MSG_DONTWAIT, &err, 1505 msg->msg_flags & MSG_DONTWAIT, &err,
@@ -1670,6 +1674,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1670 1674
1671 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0)); 1675 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
1672 1676
1677 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1678
1673 skb = sock_alloc_send_pskb(sk, size - data_len, data_len, 1679 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
1674 msg->msg_flags & MSG_DONTWAIT, &err, 1680 msg->msg_flags & MSG_DONTWAIT, &err,
1675 get_order(UNIX_SKB_FRAGS_SZ)); 1681 get_order(UNIX_SKB_FRAGS_SZ));
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 16d08b399210..405f3c4cf70c 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -95,6 +95,43 @@ config CFG80211_CERTIFICATION_ONUS
95 you are a wireless researcher and are working in a controlled 95 you are a wireless researcher and are working in a controlled
96 and approved environment by your local regulatory agency. 96 and approved environment by your local regulatory agency.
97 97
98config CFG80211_REG_CELLULAR_HINTS
99 bool "cfg80211 regulatory support for cellular base station hints"
100 depends on CFG80211_CERTIFICATION_ONUS
101 ---help---
102 This option enables support for parsing regulatory hints
103 from cellular base stations. If enabled and at least one driver
104 claims support for parsing cellular base station hints the
105 regulatory core will allow and parse these regulatory hints.
106 The regulatory core will only apply these regulatory hints on
107 drivers that support this feature. You should only enable this
108 feature if you have tested and validated this feature on your
109 systems.
110
111config CFG80211_REG_RELAX_NO_IR
112 bool "cfg80211 support for NO_IR relaxation"
113 depends on CFG80211_CERTIFICATION_ONUS
114 ---help---
115 This option enables support for relaxation of the NO_IR flag for
116 situations that certain regulatory bodies have provided clarifications
117 on how relaxation can occur. This feature has an inherent dependency on
118 userspace features which must have been properly tested and as such is
119 not enabled by default.
120
121 A relaxation feature example is allowing the operation of a P2P group
122 owner (GO) on channels marked with NO_IR if there is an additional BSS
123 interface which associated to an AP which userspace assumes or confirms
124 to be an authorized master, i.e., with radar detection support and DFS
125 capabilities. However, note that in order to not create daisy chain
126 scenarios, this relaxation is not allowed in cases that the BSS client
127 is associated to P2P GO and in addition the P2P GO instantiated on
128 a channel due to this relaxation should not allow connection from
129 non P2P clients.
130
131 The regulatory core will apply these relaxations only for drivers that
132 support this feature by declaring the appropriate channel flags and
133 capabilities in their registration flow.
134
98config CFG80211_DEFAULT_PS 135config CFG80211_DEFAULT_PS
99 bool "enable powersave by default" 136 bool "enable powersave by default"
100 depends on CFG80211 137 depends on CFG80211
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index 3e02ade508d8..bdad1f951561 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -6,8 +6,8 @@
6#include "rdev-ops.h" 6#include "rdev-ops.h"
7 7
8 8
9static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, 9int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
10 struct net_device *dev, bool notify) 10 struct net_device *dev, bool notify)
11{ 11{
12 struct wireless_dev *wdev = dev->ieee80211_ptr; 12 struct wireless_dev *wdev = dev->ieee80211_ptr;
13 int err; 13 int err;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 9c9501a35fb5..992b34070bcb 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -326,28 +326,57 @@ static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy,
326 326
327 327
328int cfg80211_chandef_dfs_required(struct wiphy *wiphy, 328int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
329 const struct cfg80211_chan_def *chandef) 329 const struct cfg80211_chan_def *chandef,
330 enum nl80211_iftype iftype)
330{ 331{
331 int width; 332 int width;
332 int r; 333 int ret;
333 334
334 if (WARN_ON(!cfg80211_chandef_valid(chandef))) 335 if (WARN_ON(!cfg80211_chandef_valid(chandef)))
335 return -EINVAL; 336 return -EINVAL;
336 337
337 width = cfg80211_chandef_get_width(chandef); 338 switch (iftype) {
338 if (width < 0) 339 case NL80211_IFTYPE_ADHOC:
339 return -EINVAL; 340 case NL80211_IFTYPE_AP:
341 case NL80211_IFTYPE_P2P_GO:
342 case NL80211_IFTYPE_MESH_POINT:
343 width = cfg80211_chandef_get_width(chandef);
344 if (width < 0)
345 return -EINVAL;
340 346
341 r = cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq1, 347 ret = cfg80211_get_chans_dfs_required(wiphy,
342 width); 348 chandef->center_freq1,
343 if (r) 349 width);
344 return r; 350 if (ret < 0)
351 return ret;
352 else if (ret > 0)
353 return BIT(chandef->width);
345 354
346 if (!chandef->center_freq2) 355 if (!chandef->center_freq2)
347 return 0; 356 return 0;
357
358 ret = cfg80211_get_chans_dfs_required(wiphy,
359 chandef->center_freq2,
360 width);
361 if (ret < 0)
362 return ret;
363 else if (ret > 0)
364 return BIT(chandef->width);
348 365
349 return cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq2, 366 break;
350 width); 367 case NL80211_IFTYPE_STATION:
368 case NL80211_IFTYPE_P2P_CLIENT:
369 case NL80211_IFTYPE_MONITOR:
370 case NL80211_IFTYPE_AP_VLAN:
371 case NL80211_IFTYPE_WDS:
372 case NL80211_IFTYPE_P2P_DEVICE:
373 break;
374 case NL80211_IFTYPE_UNSPECIFIED:
375 case NUM_NL80211_IFTYPES:
376 WARN_ON(1);
377 }
378
379 return 0;
351} 380}
352EXPORT_SYMBOL(cfg80211_chandef_dfs_required); 381EXPORT_SYMBOL(cfg80211_chandef_dfs_required);
353 382
@@ -587,12 +616,14 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
587 width = 5; 616 width = 5;
588 break; 617 break;
589 case NL80211_CHAN_WIDTH_10: 618 case NL80211_CHAN_WIDTH_10:
619 prohibited_flags |= IEEE80211_CHAN_NO_10MHZ;
590 width = 10; 620 width = 10;
591 break; 621 break;
592 case NL80211_CHAN_WIDTH_20: 622 case NL80211_CHAN_WIDTH_20:
593 if (!ht_cap->ht_supported) 623 if (!ht_cap->ht_supported)
594 return false; 624 return false;
595 case NL80211_CHAN_WIDTH_20_NOHT: 625 case NL80211_CHAN_WIDTH_20_NOHT:
626 prohibited_flags |= IEEE80211_CHAN_NO_20MHZ;
596 width = 20; 627 width = 20;
597 break; 628 break;
598 case NL80211_CHAN_WIDTH_40: 629 case NL80211_CHAN_WIDTH_40:
@@ -661,17 +692,111 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
661} 692}
662EXPORT_SYMBOL(cfg80211_chandef_usable); 693EXPORT_SYMBOL(cfg80211_chandef_usable);
663 694
695/*
696 * For GO only, check if the channel can be used under permissive conditions
697 * mandated by the some regulatory bodies, i.e., the channel is marked with
698 * IEEE80211_CHAN_GO_CONCURRENT and there is an additional station interface
699 * associated to an AP on the same channel or on the same UNII band
700 * (assuming that the AP is an authorized master).
701 * In addition allow the GO to operate on a channel on which indoor operation is
702 * allowed, iff we are currently operating in an indoor environment.
703 */
704static bool cfg80211_go_permissive_chan(struct cfg80211_registered_device *rdev,
705 struct ieee80211_channel *chan)
706{
707 struct wireless_dev *wdev_iter;
708 struct wiphy *wiphy = wiphy_idx_to_wiphy(rdev->wiphy_idx);
709
710 ASSERT_RTNL();
711
712 if (!config_enabled(CONFIG_CFG80211_REG_RELAX_NO_IR) ||
713 !(wiphy->regulatory_flags & REGULATORY_ENABLE_RELAX_NO_IR))
714 return false;
715
716 if (regulatory_indoor_allowed() &&
717 (chan->flags & IEEE80211_CHAN_INDOOR_ONLY))
718 return true;
719
720 if (!(chan->flags & IEEE80211_CHAN_GO_CONCURRENT))
721 return false;
722
723 /*
724 * Generally, it is possible to rely on another device/driver to allow
725 * the GO concurrent relaxation, however, since the device can further
726 * enforce the relaxation (by doing a similar verifications as this),
727 * and thus fail the GO instantiation, consider only the interfaces of
728 * the current registered device.
729 */
730 list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
731 struct ieee80211_channel *other_chan = NULL;
732 int r1, r2;
733
734 if (wdev_iter->iftype != NL80211_IFTYPE_STATION ||
735 !netif_running(wdev_iter->netdev))
736 continue;
737
738 wdev_lock(wdev_iter);
739 if (wdev_iter->current_bss)
740 other_chan = wdev_iter->current_bss->pub.channel;
741 wdev_unlock(wdev_iter);
742
743 if (!other_chan)
744 continue;
745
746 if (chan == other_chan)
747 return true;
748
749 if (chan->band != IEEE80211_BAND_5GHZ)
750 continue;
751
752 r1 = cfg80211_get_unii(chan->center_freq);
753 r2 = cfg80211_get_unii(other_chan->center_freq);
754
755 if (r1 != -EINVAL && r1 == r2) {
756 /*
757 * At some locations channels 149-165 are considered a
758 * bundle, but at other locations, e.g., Indonesia,
759 * channels 149-161 are considered a bundle while
760 * channel 165 is left out and considered to be in a
761 * different bundle. Thus, in case that there is a
762 * station interface connected to an AP on channel 165,
763 * it is assumed that channels 149-161 are allowed for
764 * GO operations. However, having a station interface
765 * connected to an AP on channels 149-161, does not
766 * allow GO operation on channel 165.
767 */
768 if (chan->center_freq == 5825 &&
769 other_chan->center_freq != 5825)
770 continue;
771 return true;
772 }
773 }
774
775 return false;
776}
777
664bool cfg80211_reg_can_beacon(struct wiphy *wiphy, 778bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
665 struct cfg80211_chan_def *chandef) 779 struct cfg80211_chan_def *chandef,
780 enum nl80211_iftype iftype)
666{ 781{
782 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
667 bool res; 783 bool res;
668 u32 prohibited_flags = IEEE80211_CHAN_DISABLED | 784 u32 prohibited_flags = IEEE80211_CHAN_DISABLED |
669 IEEE80211_CHAN_NO_IR |
670 IEEE80211_CHAN_RADAR; 785 IEEE80211_CHAN_RADAR;
671 786
672 trace_cfg80211_reg_can_beacon(wiphy, chandef); 787 trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype);
673 788
674 if (cfg80211_chandef_dfs_required(wiphy, chandef) > 0 && 789 /*
790 * Under certain conditions suggested by the some regulatory bodies
791 * a GO can operate on channels marked with IEEE80211_NO_IR
792 * so set this flag only if such relaxations are not enabled and
793 * the conditions are not met.
794 */
795 if (iftype != NL80211_IFTYPE_P2P_GO ||
796 !cfg80211_go_permissive_chan(rdev, chandef->chan))
797 prohibited_flags |= IEEE80211_CHAN_NO_IR;
798
799 if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 &&
675 cfg80211_chandef_dfs_available(wiphy, chandef)) { 800 cfg80211_chandef_dfs_available(wiphy, chandef)) {
676 /* We can skip IEEE80211_CHAN_NO_IR if chandef dfs available */ 801 /* We can skip IEEE80211_CHAN_NO_IR if chandef dfs available */
677 prohibited_flags = IEEE80211_CHAN_DISABLED; 802 prohibited_flags = IEEE80211_CHAN_DISABLED;
@@ -701,6 +826,8 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
701 enum cfg80211_chan_mode *chanmode, 826 enum cfg80211_chan_mode *chanmode,
702 u8 *radar_detect) 827 u8 *radar_detect)
703{ 828{
829 int ret;
830
704 *chan = NULL; 831 *chan = NULL;
705 *chanmode = CHAN_MODE_UNDEFINED; 832 *chanmode = CHAN_MODE_UNDEFINED;
706 833
@@ -743,8 +870,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
743 *chan = wdev->chandef.chan; 870 *chan = wdev->chandef.chan;
744 *chanmode = CHAN_MODE_SHARED; 871 *chanmode = CHAN_MODE_SHARED;
745 872
746 if (cfg80211_chandef_dfs_required(wdev->wiphy, 873 ret = cfg80211_chandef_dfs_required(wdev->wiphy,
747 &wdev->chandef)) 874 &wdev->chandef,
875 wdev->iftype);
876 WARN_ON(ret < 0);
877 if (ret > 0)
748 *radar_detect |= BIT(wdev->chandef.width); 878 *radar_detect |= BIT(wdev->chandef.width);
749 } 879 }
750 return; 880 return;
@@ -753,8 +883,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
753 *chan = wdev->chandef.chan; 883 *chan = wdev->chandef.chan;
754 *chanmode = CHAN_MODE_SHARED; 884 *chanmode = CHAN_MODE_SHARED;
755 885
756 if (cfg80211_chandef_dfs_required(wdev->wiphy, 886 ret = cfg80211_chandef_dfs_required(wdev->wiphy,
757 &wdev->chandef)) 887 &wdev->chandef,
888 wdev->iftype);
889 WARN_ON(ret < 0);
890 if (ret > 0)
758 *radar_detect |= BIT(wdev->chandef.width); 891 *radar_detect |= BIT(wdev->chandef.width);
759 } 892 }
760 return; 893 return;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 086cddd03ba6..a1c40654dd9b 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -69,7 +69,7 @@ struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx)
69 69
70int get_wiphy_idx(struct wiphy *wiphy) 70int get_wiphy_idx(struct wiphy *wiphy)
71{ 71{
72 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 72 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
73 73
74 return rdev->wiphy_idx; 74 return rdev->wiphy_idx;
75} 75}
@@ -130,7 +130,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
130 newname)) 130 newname))
131 pr_err("failed to rename debugfs dir to %s!\n", newname); 131 pr_err("failed to rename debugfs dir to %s!\n", newname);
132 132
133 nl80211_notify_dev_rename(rdev); 133 nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
134 134
135 return 0; 135 return 0;
136} 136}
@@ -210,15 +210,12 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
210 } 210 }
211} 211}
212 212
213static int cfg80211_rfkill_set_block(void *data, bool blocked) 213void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy)
214{ 214{
215 struct cfg80211_registered_device *rdev = data; 215 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
216 struct wireless_dev *wdev; 216 struct wireless_dev *wdev;
217 217
218 if (!blocked) 218 ASSERT_RTNL();
219 return 0;
220
221 rtnl_lock();
222 219
223 list_for_each_entry(wdev, &rdev->wdev_list, list) { 220 list_for_each_entry(wdev, &rdev->wdev_list, list) {
224 if (wdev->netdev) { 221 if (wdev->netdev) {
@@ -234,7 +231,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
234 break; 231 break;
235 } 232 }
236 } 233 }
234}
235EXPORT_SYMBOL_GPL(cfg80211_shutdown_all_interfaces);
236
237static int cfg80211_rfkill_set_block(void *data, bool blocked)
238{
239 struct cfg80211_registered_device *rdev = data;
240
241 if (!blocked)
242 return 0;
237 243
244 rtnl_lock();
245 cfg80211_shutdown_all_interfaces(&rdev->wiphy);
238 rtnl_unlock(); 246 rtnl_unlock();
239 247
240 return 0; 248 return 0;
@@ -260,6 +268,45 @@ static void cfg80211_event_work(struct work_struct *work)
260 rtnl_unlock(); 268 rtnl_unlock();
261} 269}
262 270
271void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
272{
273 struct cfg80211_iface_destroy *item;
274
275 ASSERT_RTNL();
276
277 spin_lock_irq(&rdev->destroy_list_lock);
278 while ((item = list_first_entry_or_null(&rdev->destroy_list,
279 struct cfg80211_iface_destroy,
280 list))) {
281 struct wireless_dev *wdev, *tmp;
282 u32 nlportid = item->nlportid;
283
284 list_del(&item->list);
285 kfree(item);
286 spin_unlock_irq(&rdev->destroy_list_lock);
287
288 list_for_each_entry_safe(wdev, tmp, &rdev->wdev_list, list) {
289 if (nlportid == wdev->owner_nlportid)
290 rdev_del_virtual_intf(rdev, wdev);
291 }
292
293 spin_lock_irq(&rdev->destroy_list_lock);
294 }
295 spin_unlock_irq(&rdev->destroy_list_lock);
296}
297
298static void cfg80211_destroy_iface_wk(struct work_struct *work)
299{
300 struct cfg80211_registered_device *rdev;
301
302 rdev = container_of(work, struct cfg80211_registered_device,
303 destroy_work);
304
305 rtnl_lock();
306 cfg80211_destroy_ifaces(rdev);
307 rtnl_unlock();
308}
309
263/* exported functions */ 310/* exported functions */
264 311
265struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) 312struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
@@ -318,6 +365,10 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
318 rdev->wiphy.dev.class = &ieee80211_class; 365 rdev->wiphy.dev.class = &ieee80211_class;
319 rdev->wiphy.dev.platform_data = rdev; 366 rdev->wiphy.dev.platform_data = rdev;
320 367
368 INIT_LIST_HEAD(&rdev->destroy_list);
369 spin_lock_init(&rdev->destroy_list_lock);
370 INIT_WORK(&rdev->destroy_work, cfg80211_destroy_iface_wk);
371
321#ifdef CONFIG_CFG80211_DEFAULT_PS 372#ifdef CONFIG_CFG80211_DEFAULT_PS
322 rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 373 rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
323#endif 374#endif
@@ -351,6 +402,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
351 rdev->wiphy.rts_threshold = (u32) -1; 402 rdev->wiphy.rts_threshold = (u32) -1;
352 rdev->wiphy.coverage_class = 0; 403 rdev->wiphy.coverage_class = 0;
353 404
405 rdev->wiphy.max_num_csa_counters = 1;
406
354 return &rdev->wiphy; 407 return &rdev->wiphy;
355} 408}
356EXPORT_SYMBOL(wiphy_new); 409EXPORT_SYMBOL(wiphy_new);
@@ -396,10 +449,7 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
396 for (j = 0; j < c->n_limits; j++) { 449 for (j = 0; j < c->n_limits; j++) {
397 u16 types = c->limits[j].types; 450 u16 types = c->limits[j].types;
398 451
399 /* 452 /* interface types shouldn't overlap */
400 * interface types shouldn't overlap, this is
401 * used in cfg80211_can_change_interface()
402 */
403 if (WARN_ON(types & all_iftypes)) 453 if (WARN_ON(types & all_iftypes))
404 return -EINVAL; 454 return -EINVAL;
405 all_iftypes |= types; 455 all_iftypes |= types;
@@ -435,7 +485,7 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
435 485
436int wiphy_register(struct wiphy *wiphy) 486int wiphy_register(struct wiphy *wiphy)
437{ 487{
438 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 488 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
439 int res; 489 int res;
440 enum ieee80211_band band; 490 enum ieee80211_band band;
441 struct ieee80211_supported_band *sband; 491 struct ieee80211_supported_band *sband;
@@ -610,13 +660,15 @@ int wiphy_register(struct wiphy *wiphy)
610 return res; 660 return res;
611 } 661 }
612 662
663 nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
664
613 return 0; 665 return 0;
614} 666}
615EXPORT_SYMBOL(wiphy_register); 667EXPORT_SYMBOL(wiphy_register);
616 668
617void wiphy_rfkill_start_polling(struct wiphy *wiphy) 669void wiphy_rfkill_start_polling(struct wiphy *wiphy)
618{ 670{
619 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 671 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
620 672
621 if (!rdev->ops->rfkill_poll) 673 if (!rdev->ops->rfkill_poll)
622 return; 674 return;
@@ -627,7 +679,7 @@ EXPORT_SYMBOL(wiphy_rfkill_start_polling);
627 679
628void wiphy_rfkill_stop_polling(struct wiphy *wiphy) 680void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
629{ 681{
630 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 682 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
631 683
632 rfkill_pause_polling(rdev->rfkill); 684 rfkill_pause_polling(rdev->rfkill);
633} 685}
@@ -635,7 +687,7 @@ EXPORT_SYMBOL(wiphy_rfkill_stop_polling);
635 687
636void wiphy_unregister(struct wiphy *wiphy) 688void wiphy_unregister(struct wiphy *wiphy)
637{ 689{
638 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 690 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
639 691
640 wait_event(rdev->dev_wait, ({ 692 wait_event(rdev->dev_wait, ({
641 int __count; 693 int __count;
@@ -648,9 +700,10 @@ void wiphy_unregister(struct wiphy *wiphy)
648 rfkill_unregister(rdev->rfkill); 700 rfkill_unregister(rdev->rfkill);
649 701
650 rtnl_lock(); 702 rtnl_lock();
703 nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY);
651 rdev->wiphy.registered = false; 704 rdev->wiphy.registered = false;
652 705
653 BUG_ON(!list_empty(&rdev->wdev_list)); 706 WARN_ON(!list_empty(&rdev->wdev_list));
654 707
655 /* 708 /*
656 * First remove the hardware from everywhere, this makes 709 * First remove the hardware from everywhere, this makes
@@ -675,6 +728,7 @@ void wiphy_unregister(struct wiphy *wiphy)
675 cancel_work_sync(&rdev->conn_work); 728 cancel_work_sync(&rdev->conn_work);
676 flush_work(&rdev->event_work); 729 flush_work(&rdev->event_work);
677 cancel_delayed_work_sync(&rdev->dfs_update_channels_wk); 730 cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
731 flush_work(&rdev->destroy_work);
678 732
679#ifdef CONFIG_PM 733#ifdef CONFIG_PM
680 if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) 734 if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
@@ -707,7 +761,7 @@ EXPORT_SYMBOL(wiphy_free);
707 761
708void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked) 762void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked)
709{ 763{
710 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 764 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
711 765
712 if (rfkill_set_hw_state(rdev->rfkill, blocked)) 766 if (rfkill_set_hw_state(rdev->rfkill, blocked))
713 schedule_work(&rdev->rfkill_sync); 767 schedule_work(&rdev->rfkill_sync);
@@ -716,7 +770,7 @@ EXPORT_SYMBOL(wiphy_rfkill_set_hw_state);
716 770
717void cfg80211_unregister_wdev(struct wireless_dev *wdev) 771void cfg80211_unregister_wdev(struct wireless_dev *wdev)
718{ 772{
719 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 773 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
720 774
721 ASSERT_RTNL(); 775 ASSERT_RTNL();
722 776
@@ -751,23 +805,23 @@ void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
751 rdev->num_running_monitor_ifaces += num; 805 rdev->num_running_monitor_ifaces += num;
752} 806}
753 807
754void cfg80211_leave(struct cfg80211_registered_device *rdev, 808void __cfg80211_leave(struct cfg80211_registered_device *rdev,
755 struct wireless_dev *wdev) 809 struct wireless_dev *wdev)
756{ 810{
757 struct net_device *dev = wdev->netdev; 811 struct net_device *dev = wdev->netdev;
758 812
759 ASSERT_RTNL(); 813 ASSERT_RTNL();
814 ASSERT_WDEV_LOCK(wdev);
760 815
761 switch (wdev->iftype) { 816 switch (wdev->iftype) {
762 case NL80211_IFTYPE_ADHOC: 817 case NL80211_IFTYPE_ADHOC:
763 cfg80211_leave_ibss(rdev, dev, true); 818 __cfg80211_leave_ibss(rdev, dev, true);
764 break; 819 break;
765 case NL80211_IFTYPE_P2P_CLIENT: 820 case NL80211_IFTYPE_P2P_CLIENT:
766 case NL80211_IFTYPE_STATION: 821 case NL80211_IFTYPE_STATION:
767 if (rdev->sched_scan_req && dev == rdev->sched_scan_req->dev) 822 if (rdev->sched_scan_req && dev == rdev->sched_scan_req->dev)
768 __cfg80211_stop_sched_scan(rdev, false); 823 __cfg80211_stop_sched_scan(rdev, false);
769 824
770 wdev_lock(wdev);
771#ifdef CONFIG_CFG80211_WEXT 825#ifdef CONFIG_CFG80211_WEXT
772 kfree(wdev->wext.ie); 826 kfree(wdev->wext.ie);
773 wdev->wext.ie = NULL; 827 wdev->wext.ie = NULL;
@@ -776,32 +830,60 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
776#endif 830#endif
777 cfg80211_disconnect(rdev, dev, 831 cfg80211_disconnect(rdev, dev,
778 WLAN_REASON_DEAUTH_LEAVING, true); 832 WLAN_REASON_DEAUTH_LEAVING, true);
779 wdev_unlock(wdev);
780 break; 833 break;
781 case NL80211_IFTYPE_MESH_POINT: 834 case NL80211_IFTYPE_MESH_POINT:
782 cfg80211_leave_mesh(rdev, dev); 835 __cfg80211_leave_mesh(rdev, dev);
783 break; 836 break;
784 case NL80211_IFTYPE_AP: 837 case NL80211_IFTYPE_AP:
785 case NL80211_IFTYPE_P2P_GO: 838 case NL80211_IFTYPE_P2P_GO:
786 cfg80211_stop_ap(rdev, dev, true); 839 __cfg80211_stop_ap(rdev, dev, true);
787 break; 840 break;
788 default: 841 default:
789 break; 842 break;
790 } 843 }
791} 844}
792 845
846void cfg80211_leave(struct cfg80211_registered_device *rdev,
847 struct wireless_dev *wdev)
848{
849 wdev_lock(wdev);
850 __cfg80211_leave(rdev, wdev);
851 wdev_unlock(wdev);
852}
853
854void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
855 gfp_t gfp)
856{
857 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
858 struct cfg80211_event *ev;
859 unsigned long flags;
860
861 trace_cfg80211_stop_iface(wiphy, wdev);
862
863 ev = kzalloc(sizeof(*ev), gfp);
864 if (!ev)
865 return;
866
867 ev->type = EVENT_STOPPED;
868
869 spin_lock_irqsave(&wdev->event_lock, flags);
870 list_add_tail(&ev->list, &wdev->event_list);
871 spin_unlock_irqrestore(&wdev->event_lock, flags);
872 queue_work(cfg80211_wq, &rdev->event_work);
873}
874EXPORT_SYMBOL(cfg80211_stop_iface);
875
793static int cfg80211_netdev_notifier_call(struct notifier_block *nb, 876static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
794 unsigned long state, void *ptr) 877 unsigned long state, void *ptr)
795{ 878{
796 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 879 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
797 struct wireless_dev *wdev = dev->ieee80211_ptr; 880 struct wireless_dev *wdev = dev->ieee80211_ptr;
798 struct cfg80211_registered_device *rdev; 881 struct cfg80211_registered_device *rdev;
799 int ret;
800 882
801 if (!wdev) 883 if (!wdev)
802 return NOTIFY_DONE; 884 return NOTIFY_DONE;
803 885
804 rdev = wiphy_to_dev(wdev->wiphy); 886 rdev = wiphy_to_rdev(wdev->wiphy);
805 887
806 WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED); 888 WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED);
807 889
@@ -959,13 +1041,14 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
959 case NETDEV_PRE_UP: 1041 case NETDEV_PRE_UP:
960 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) 1042 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
961 return notifier_from_errno(-EOPNOTSUPP); 1043 return notifier_from_errno(-EOPNOTSUPP);
962 ret = cfg80211_can_add_interface(rdev, wdev->iftype); 1044 if (rfkill_blocked(rdev->rfkill))
963 if (ret) 1045 return notifier_from_errno(-ERFKILL);
964 return notifier_from_errno(ret);
965 break; 1046 break;
1047 default:
1048 return NOTIFY_DONE;
966 } 1049 }
967 1050
968 return NOTIFY_DONE; 1051 return NOTIFY_OK;
969} 1052}
970 1053
971static struct notifier_block cfg80211_netdev_notifier = { 1054static struct notifier_block cfg80211_netdev_notifier = {
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 5b1fdcadd469..e9afbf10e756 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -80,13 +80,17 @@ struct cfg80211_registered_device {
80 80
81 struct cfg80211_coalesce *coalesce; 81 struct cfg80211_coalesce *coalesce;
82 82
83 spinlock_t destroy_list_lock;
84 struct list_head destroy_list;
85 struct work_struct destroy_work;
86
83 /* must be last because of the way we do wiphy_priv(), 87 /* must be last because of the way we do wiphy_priv(),
84 * and it should at least be aligned to NETDEV_ALIGN */ 88 * and it should at least be aligned to NETDEV_ALIGN */
85 struct wiphy wiphy __aligned(NETDEV_ALIGN); 89 struct wiphy wiphy __aligned(NETDEV_ALIGN);
86}; 90};
87 91
88static inline 92static inline
89struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy) 93struct cfg80211_registered_device *wiphy_to_rdev(struct wiphy *wiphy)
90{ 94{
91 BUG_ON(!wiphy); 95 BUG_ON(!wiphy);
92 return container_of(wiphy, struct cfg80211_registered_device, wiphy); 96 return container_of(wiphy, struct cfg80211_registered_device, wiphy);
@@ -181,6 +185,7 @@ enum cfg80211_event_type {
181 EVENT_ROAMED, 185 EVENT_ROAMED,
182 EVENT_DISCONNECTED, 186 EVENT_DISCONNECTED,
183 EVENT_IBSS_JOINED, 187 EVENT_IBSS_JOINED,
188 EVENT_STOPPED,
184}; 189};
185 190
186struct cfg80211_event { 191struct cfg80211_event {
@@ -232,6 +237,13 @@ struct cfg80211_beacon_registration {
232 u32 nlportid; 237 u32 nlportid;
233}; 238};
234 239
240struct cfg80211_iface_destroy {
241 struct list_head list;
242 u32 nlportid;
243};
244
245void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev);
246
235/* free object */ 247/* free object */
236void cfg80211_dev_free(struct cfg80211_registered_device *rdev); 248void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
237 249
@@ -240,8 +252,8 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
240 252
241void ieee80211_set_bitrate_flags(struct wiphy *wiphy); 253void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
242 254
243void cfg80211_bss_expire(struct cfg80211_registered_device *dev); 255void cfg80211_bss_expire(struct cfg80211_registered_device *rdev);
244void cfg80211_bss_age(struct cfg80211_registered_device *dev, 256void cfg80211_bss_age(struct cfg80211_registered_device *rdev,
245 unsigned long age_secs); 257 unsigned long age_secs);
246 258
247/* IBSS */ 259/* IBSS */
@@ -270,6 +282,8 @@ int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
270 struct net_device *dev, 282 struct net_device *dev,
271 struct mesh_setup *setup, 283 struct mesh_setup *setup,
272 const struct mesh_config *conf); 284 const struct mesh_config *conf);
285int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
286 struct net_device *dev);
273int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, 287int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
274 struct net_device *dev); 288 struct net_device *dev);
275int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev, 289int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
@@ -277,6 +291,8 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
277 struct cfg80211_chan_def *chandef); 291 struct cfg80211_chan_def *chandef);
278 292
279/* AP */ 293/* AP */
294int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
295 struct net_device *dev, bool notify);
280int cfg80211_stop_ap(struct cfg80211_registered_device *rdev, 296int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
281 struct net_device *dev, bool notify); 297 struct net_device *dev, bool notify);
282 298
@@ -401,35 +417,6 @@ unsigned int
401cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy, 417cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
402 const struct cfg80211_chan_def *chandef); 418 const struct cfg80211_chan_def *chandef);
403 419
404static inline int
405cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
406 struct wireless_dev *wdev,
407 enum nl80211_iftype iftype)
408{
409 return cfg80211_can_use_iftype_chan(rdev, wdev, iftype, NULL,
410 CHAN_MODE_UNDEFINED, 0);
411}
412
413static inline int
414cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
415 enum nl80211_iftype iftype)
416{
417 if (rfkill_blocked(rdev->rfkill))
418 return -ERFKILL;
419
420 return cfg80211_can_change_interface(rdev, NULL, iftype);
421}
422
423static inline int
424cfg80211_can_use_chan(struct cfg80211_registered_device *rdev,
425 struct wireless_dev *wdev,
426 struct ieee80211_channel *chan,
427 enum cfg80211_chan_mode chanmode)
428{
429 return cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
430 chan, chanmode, 0);
431}
432
433static inline unsigned int elapsed_jiffies_msecs(unsigned long start) 420static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
434{ 421{
435 unsigned long end = jiffies; 422 unsigned long end = jiffies;
@@ -459,6 +446,8 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
459void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, 446void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
460 enum nl80211_iftype iftype, int num); 447 enum nl80211_iftype iftype, int num);
461 448
449void __cfg80211_leave(struct cfg80211_registered_device *rdev,
450 struct wireless_dev *wdev);
462void cfg80211_leave(struct cfg80211_registered_device *rdev, 451void cfg80211_leave(struct cfg80211_registered_device *rdev,
463 struct wireless_dev *wdev); 452 struct wireless_dev *wdev);
464 453
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
index e37862f1b127..d4860bfc020e 100644
--- a/net/wireless/ethtool.c
+++ b/net/wireless/ethtool.c
@@ -43,7 +43,7 @@ static void cfg80211_get_ringparam(struct net_device *dev,
43 struct ethtool_ringparam *rp) 43 struct ethtool_ringparam *rp)
44{ 44{
45 struct wireless_dev *wdev = dev->ieee80211_ptr; 45 struct wireless_dev *wdev = dev->ieee80211_ptr;
46 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 46 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
47 47
48 memset(rp, 0, sizeof(*rp)); 48 memset(rp, 0, sizeof(*rp));
49 49
@@ -56,7 +56,7 @@ static int cfg80211_set_ringparam(struct net_device *dev,
56 struct ethtool_ringparam *rp) 56 struct ethtool_ringparam *rp)
57{ 57{
58 struct wireless_dev *wdev = dev->ieee80211_ptr; 58 struct wireless_dev *wdev = dev->ieee80211_ptr;
59 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 59 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
60 60
61 if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0) 61 if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0)
62 return -EINVAL; 62 return -EINVAL;
@@ -70,7 +70,7 @@ static int cfg80211_set_ringparam(struct net_device *dev,
70static int cfg80211_get_sset_count(struct net_device *dev, int sset) 70static int cfg80211_get_sset_count(struct net_device *dev, int sset)
71{ 71{
72 struct wireless_dev *wdev = dev->ieee80211_ptr; 72 struct wireless_dev *wdev = dev->ieee80211_ptr;
73 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 73 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
74 if (rdev->ops->get_et_sset_count) 74 if (rdev->ops->get_et_sset_count)
75 return rdev_get_et_sset_count(rdev, dev, sset); 75 return rdev_get_et_sset_count(rdev, dev, sset);
76 return -EOPNOTSUPP; 76 return -EOPNOTSUPP;
@@ -80,7 +80,7 @@ static void cfg80211_get_stats(struct net_device *dev,
80 struct ethtool_stats *stats, u64 *data) 80 struct ethtool_stats *stats, u64 *data)
81{ 81{
82 struct wireless_dev *wdev = dev->ieee80211_ptr; 82 struct wireless_dev *wdev = dev->ieee80211_ptr;
83 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 83 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
84 if (rdev->ops->get_et_stats) 84 if (rdev->ops->get_et_stats)
85 rdev_get_et_stats(rdev, dev, stats, data); 85 rdev_get_et_stats(rdev, dev, stats, data);
86} 86}
@@ -88,7 +88,7 @@ static void cfg80211_get_stats(struct net_device *dev,
88static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data) 88static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
89{ 89{
90 struct wireless_dev *wdev = dev->ieee80211_ptr; 90 struct wireless_dev *wdev = dev->ieee80211_ptr;
91 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 91 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
92 if (rdev->ops->get_et_strings) 92 if (rdev->ops->get_et_strings)
93 rdev_get_et_strings(rdev, dev, sset, data); 93 rdev_get_et_strings(rdev, dev, sset, data);
94} 94}
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
index b35da8dc85de..40c37fc5b67c 100644
--- a/net/wireless/genregdb.awk
+++ b/net/wireless/genregdb.awk
@@ -68,17 +68,7 @@ function parse_reg_rule()
68 sub(/,/, "", units) 68 sub(/,/, "", units)
69 dfs_cac = $9 69 dfs_cac = $9
70 if (units == "mW") { 70 if (units == "mW") {
71 if (power == 100) { 71 power = 10 * log(power)/log(10)
72 power = 20
73 } else if (power == 200) {
74 power = 23
75 } else if (power == 500) {
76 power = 27
77 } else if (power == 1000) {
78 power = 30
79 } else {
80 print "Unknown power value in database!"
81 }
82 } else { 72 } else {
83 dfs_cac = $8 73 dfs_cac = $8
84 } 74 }
@@ -117,7 +107,7 @@ function parse_reg_rule()
117 107
118 } 108 }
119 flags = flags "0" 109 flags = flags "0"
120 printf "\t\tREG_RULE_EXT(%d, %d, %d, %d, %d, %d, %s),\n", start, end, bw, gain, power, dfs_cac, flags 110 printf "\t\tREG_RULE_EXT(%d, %d, %d, %d, %.0f, %d, %s),\n", start, end, bw, gain, power, dfs_cac, flags
121 rules++ 111 rules++
122} 112}
123 113
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index a6b5bdad039c..8f345da3ea5f 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -45,7 +45,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
45 45
46 cfg80211_upload_connect_keys(wdev); 46 cfg80211_upload_connect_keys(wdev);
47 47
48 nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, 48 nl80211_send_ibss_bssid(wiphy_to_rdev(wdev->wiphy), dev, bssid,
49 GFP_KERNEL); 49 GFP_KERNEL);
50#ifdef CONFIG_CFG80211_WEXT 50#ifdef CONFIG_CFG80211_WEXT
51 memset(&wrqu, 0, sizeof(wrqu)); 51 memset(&wrqu, 0, sizeof(wrqu));
@@ -58,7 +58,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
58 struct ieee80211_channel *channel, gfp_t gfp) 58 struct ieee80211_channel *channel, gfp_t gfp)
59{ 59{
60 struct wireless_dev *wdev = dev->ieee80211_ptr; 60 struct wireless_dev *wdev = dev->ieee80211_ptr;
61 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 61 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
62 struct cfg80211_event *ev; 62 struct cfg80211_event *ev;
63 unsigned long flags; 63 unsigned long flags;
64 64
@@ -88,8 +88,6 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
88 struct cfg80211_cached_keys *connkeys) 88 struct cfg80211_cached_keys *connkeys)
89{ 89{
90 struct wireless_dev *wdev = dev->ieee80211_ptr; 90 struct wireless_dev *wdev = dev->ieee80211_ptr;
91 struct ieee80211_channel *check_chan;
92 u8 radar_detect_width = 0;
93 int err; 91 int err;
94 92
95 ASSERT_WDEV_LOCK(wdev); 93 ASSERT_WDEV_LOCK(wdev);
@@ -126,28 +124,6 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
126#ifdef CONFIG_CFG80211_WEXT 124#ifdef CONFIG_CFG80211_WEXT
127 wdev->wext.ibss.chandef = params->chandef; 125 wdev->wext.ibss.chandef = params->chandef;
128#endif 126#endif
129 check_chan = params->chandef.chan;
130 if (params->userspace_handles_dfs) {
131 /* Check for radar even if the current channel is not
132 * a radar channel - it might decide to change to DFS
133 * channel later.
134 */
135 radar_detect_width = BIT(params->chandef.width);
136 }
137
138 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
139 check_chan,
140 (params->channel_fixed &&
141 !radar_detect_width)
142 ? CHAN_MODE_SHARED
143 : CHAN_MODE_EXCLUSIVE,
144 radar_detect_width);
145
146 if (err) {
147 wdev->connect_keys = NULL;
148 return err;
149 }
150
151 err = rdev_join_ibss(rdev, dev, params); 127 err = rdev_join_ibss(rdev, dev, params);
152 if (err) { 128 if (err) {
153 wdev->connect_keys = NULL; 129 wdev->connect_keys = NULL;
@@ -180,7 +156,7 @@ int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
180static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) 156static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
181{ 157{
182 struct wireless_dev *wdev = dev->ieee80211_ptr; 158 struct wireless_dev *wdev = dev->ieee80211_ptr;
183 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 159 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
184 int i; 160 int i;
185 161
186 ASSERT_WDEV_LOCK(wdev); 162 ASSERT_WDEV_LOCK(wdev);
@@ -335,7 +311,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
335 struct iw_freq *wextfreq, char *extra) 311 struct iw_freq *wextfreq, char *extra)
336{ 312{
337 struct wireless_dev *wdev = dev->ieee80211_ptr; 313 struct wireless_dev *wdev = dev->ieee80211_ptr;
338 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 314 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
339 struct ieee80211_channel *chan = NULL; 315 struct ieee80211_channel *chan = NULL;
340 int err, freq; 316 int err, freq;
341 317
@@ -346,7 +322,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
346 if (!rdev->ops->join_ibss) 322 if (!rdev->ops->join_ibss)
347 return -EOPNOTSUPP; 323 return -EOPNOTSUPP;
348 324
349 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); 325 freq = cfg80211_wext_freq(wextfreq);
350 if (freq < 0) 326 if (freq < 0)
351 return freq; 327 return freq;
352 328
@@ -420,7 +396,7 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
420 struct iw_point *data, char *ssid) 396 struct iw_point *data, char *ssid)
421{ 397{
422 struct wireless_dev *wdev = dev->ieee80211_ptr; 398 struct wireless_dev *wdev = dev->ieee80211_ptr;
423 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 399 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
424 size_t len = data->length; 400 size_t len = data->length;
425 int err; 401 int err;
426 402
@@ -444,8 +420,8 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
444 if (len > 0 && ssid[len - 1] == '\0') 420 if (len > 0 && ssid[len - 1] == '\0')
445 len--; 421 len--;
446 422
423 memcpy(wdev->ssid, ssid, len);
447 wdev->wext.ibss.ssid = wdev->ssid; 424 wdev->wext.ibss.ssid = wdev->ssid;
448 memcpy(wdev->wext.ibss.ssid, ssid, len);
449 wdev->wext.ibss.ssid_len = len; 425 wdev->wext.ibss.ssid_len = len;
450 426
451 wdev_lock(wdev); 427 wdev_lock(wdev);
@@ -487,7 +463,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
487 struct sockaddr *ap_addr, char *extra) 463 struct sockaddr *ap_addr, char *extra)
488{ 464{
489 struct wireless_dev *wdev = dev->ieee80211_ptr; 465 struct wireless_dev *wdev = dev->ieee80211_ptr;
490 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 466 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
491 u8 *bssid = ap_addr->sa_data; 467 u8 *bssid = ap_addr->sa_data;
492 int err; 468 int err;
493 469
@@ -505,6 +481,9 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
505 if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) 481 if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid))
506 bssid = NULL; 482 bssid = NULL;
507 483
484 if (bssid && !is_valid_ether_addr(bssid))
485 return -EINVAL;
486
508 /* both automatic */ 487 /* both automatic */
509 if (!bssid && !wdev->wext.ibss.bssid) 488 if (!bssid && !wdev->wext.ibss.bssid)
510 return 0; 489 return 0;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 5af5cc6b2c4c..092300b30c37 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -99,7 +99,6 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
99 const struct mesh_config *conf) 99 const struct mesh_config *conf)
100{ 100{
101 struct wireless_dev *wdev = dev->ieee80211_ptr; 101 struct wireless_dev *wdev = dev->ieee80211_ptr;
102 u8 radar_detect_width = 0;
103 int err; 102 int err;
104 103
105 BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN); 104 BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN);
@@ -175,22 +174,10 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
175 scan_width); 174 scan_width);
176 } 175 }
177 176
178 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef)) 177 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef,
178 NL80211_IFTYPE_MESH_POINT))
179 return -EINVAL; 179 return -EINVAL;
180 180
181 err = cfg80211_chandef_dfs_required(wdev->wiphy, &setup->chandef);
182 if (err < 0)
183 return err;
184 if (err)
185 radar_detect_width = BIT(setup->chandef.width);
186
187 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
188 setup->chandef.chan,
189 CHAN_MODE_SHARED,
190 radar_detect_width);
191 if (err)
192 return err;
193
194 err = rdev_join_mesh(rdev, dev, conf, setup); 181 err = rdev_join_mesh(rdev, dev, conf, setup);
195 if (!err) { 182 if (!err) {
196 memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len); 183 memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
@@ -236,17 +223,6 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
236 if (!netif_running(wdev->netdev)) 223 if (!netif_running(wdev->netdev))
237 return -ENETDOWN; 224 return -ENETDOWN;
238 225
239 /* cfg80211_can_use_chan() calls
240 * cfg80211_can_use_iftype_chan() with no radar
241 * detection, so if we're trying to use a radar
242 * channel here, something is wrong.
243 */
244 WARN_ON_ONCE(chandef->chan->flags & IEEE80211_CHAN_RADAR);
245 err = cfg80211_can_use_chan(rdev, wdev, chandef->chan,
246 CHAN_MODE_SHARED);
247 if (err)
248 return err;
249
250 err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev, 226 err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev,
251 chandef->chan); 227 chandef->chan);
252 if (!err) 228 if (!err)
@@ -262,8 +238,8 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
262 return 0; 238 return 0;
263} 239}
264 240
265static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, 241int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
266 struct net_device *dev) 242 struct net_device *dev)
267{ 243{
268 struct wireless_dev *wdev = dev->ieee80211_ptr; 244 struct wireless_dev *wdev = dev->ieee80211_ptr;
269 int err; 245 int err;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index c52ff59a3e96..266766b8d80b 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -23,7 +23,7 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
23{ 23{
24 struct wireless_dev *wdev = dev->ieee80211_ptr; 24 struct wireless_dev *wdev = dev->ieee80211_ptr;
25 struct wiphy *wiphy = wdev->wiphy; 25 struct wiphy *wiphy = wdev->wiphy;
26 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 26 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
27 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 27 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
28 u8 *ie = mgmt->u.assoc_resp.variable; 28 u8 *ie = mgmt->u.assoc_resp.variable;
29 int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); 29 int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(cfg80211_rx_assoc_resp);
54static void cfg80211_process_auth(struct wireless_dev *wdev, 54static void cfg80211_process_auth(struct wireless_dev *wdev,
55 const u8 *buf, size_t len) 55 const u8 *buf, size_t len)
56{ 56{
57 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 57 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
58 58
59 nl80211_send_rx_auth(rdev, wdev->netdev, buf, len, GFP_KERNEL); 59 nl80211_send_rx_auth(rdev, wdev->netdev, buf, len, GFP_KERNEL);
60 cfg80211_sme_rx_auth(wdev, buf, len); 60 cfg80211_sme_rx_auth(wdev, buf, len);
@@ -63,7 +63,7 @@ static void cfg80211_process_auth(struct wireless_dev *wdev,
63static void cfg80211_process_deauth(struct wireless_dev *wdev, 63static void cfg80211_process_deauth(struct wireless_dev *wdev,
64 const u8 *buf, size_t len) 64 const u8 *buf, size_t len)
65{ 65{
66 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 66 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
67 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 67 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
68 const u8 *bssid = mgmt->bssid; 68 const u8 *bssid = mgmt->bssid;
69 u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 69 u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
@@ -82,7 +82,7 @@ static void cfg80211_process_deauth(struct wireless_dev *wdev,
82static void cfg80211_process_disassoc(struct wireless_dev *wdev, 82static void cfg80211_process_disassoc(struct wireless_dev *wdev,
83 const u8 *buf, size_t len) 83 const u8 *buf, size_t len)
84{ 84{
85 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 85 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
86 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 86 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
87 const u8 *bssid = mgmt->bssid; 87 const u8 *bssid = mgmt->bssid;
88 u16 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 88 u16 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@ -123,7 +123,7 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr)
123{ 123{
124 struct wireless_dev *wdev = dev->ieee80211_ptr; 124 struct wireless_dev *wdev = dev->ieee80211_ptr;
125 struct wiphy *wiphy = wdev->wiphy; 125 struct wiphy *wiphy = wdev->wiphy;
126 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 126 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
127 127
128 trace_cfg80211_send_auth_timeout(dev, addr); 128 trace_cfg80211_send_auth_timeout(dev, addr);
129 129
@@ -136,7 +136,7 @@ void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss)
136{ 136{
137 struct wireless_dev *wdev = dev->ieee80211_ptr; 137 struct wireless_dev *wdev = dev->ieee80211_ptr;
138 struct wiphy *wiphy = wdev->wiphy; 138 struct wiphy *wiphy = wdev->wiphy;
139 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 139 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
140 140
141 trace_cfg80211_send_assoc_timeout(dev, bss->bssid); 141 trace_cfg80211_send_assoc_timeout(dev, bss->bssid);
142 142
@@ -172,7 +172,7 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
172 const u8 *tsc, gfp_t gfp) 172 const u8 *tsc, gfp_t gfp)
173{ 173{
174 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 174 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
175 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 175 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
176#ifdef CONFIG_CFG80211_WEXT 176#ifdef CONFIG_CFG80211_WEXT
177 union iwreq_data wrqu; 177 union iwreq_data wrqu;
178 char *buf = kmalloc(128, gfp); 178 char *buf = kmalloc(128, gfp);
@@ -233,14 +233,8 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
233 if (!req.bss) 233 if (!req.bss)
234 return -ENOENT; 234 return -ENOENT;
235 235
236 err = cfg80211_can_use_chan(rdev, wdev, req.bss->channel,
237 CHAN_MODE_SHARED);
238 if (err)
239 goto out;
240
241 err = rdev_auth(rdev, dev, &req); 236 err = rdev_auth(rdev, dev, &req);
242 237
243out:
244 cfg80211_put_bss(&rdev->wiphy, req.bss); 238 cfg80211_put_bss(&rdev->wiphy, req.bss);
245 return err; 239 return err;
246} 240}
@@ -306,16 +300,10 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
306 if (!req->bss) 300 if (!req->bss)
307 return -ENOENT; 301 return -ENOENT;
308 302
309 err = cfg80211_can_use_chan(rdev, wdev, chan, CHAN_MODE_SHARED);
310 if (err)
311 goto out;
312
313 err = rdev_assoc(rdev, dev, req); 303 err = rdev_assoc(rdev, dev, req);
314 if (!err) 304 if (!err)
315 cfg80211_hold_bss(bss_from_pub(req->bss)); 305 cfg80211_hold_bss(bss_from_pub(req->bss));
316 306 else
317out:
318 if (err)
319 cfg80211_put_bss(&rdev->wiphy, req->bss); 307 cfg80211_put_bss(&rdev->wiphy, req->bss);
320 308
321 return err; 309 return err;
@@ -414,7 +402,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
414 int match_len) 402 int match_len)
415{ 403{
416 struct wiphy *wiphy = wdev->wiphy; 404 struct wiphy *wiphy = wdev->wiphy;
417 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 405 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
418 struct cfg80211_mgmt_registration *reg, *nreg; 406 struct cfg80211_mgmt_registration *reg, *nreg;
419 int err = 0; 407 int err = 0;
420 u16 mgmt_type; 408 u16 mgmt_type;
@@ -473,7 +461,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
473void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid) 461void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
474{ 462{
475 struct wiphy *wiphy = wdev->wiphy; 463 struct wiphy *wiphy = wdev->wiphy;
476 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 464 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
477 struct cfg80211_mgmt_registration *reg, *tmp; 465 struct cfg80211_mgmt_registration *reg, *tmp;
478 466
479 spin_lock_bh(&wdev->mgmt_registrations_lock); 467 spin_lock_bh(&wdev->mgmt_registrations_lock);
@@ -620,7 +608,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
620 const u8 *buf, size_t len, u32 flags, gfp_t gfp) 608 const u8 *buf, size_t len, u32 flags, gfp_t gfp)
621{ 609{
622 struct wiphy *wiphy = wdev->wiphy; 610 struct wiphy *wiphy = wdev->wiphy;
623 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 611 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
624 struct cfg80211_mgmt_registration *reg; 612 struct cfg80211_mgmt_registration *reg;
625 const struct ieee80211_txrx_stypes *stypes = 613 const struct ieee80211_txrx_stypes *stypes =
626 &wiphy->mgmt_stypes[wdev->iftype]; 614 &wiphy->mgmt_stypes[wdev->iftype];
@@ -739,7 +727,7 @@ void cfg80211_radar_event(struct wiphy *wiphy,
739 struct cfg80211_chan_def *chandef, 727 struct cfg80211_chan_def *chandef,
740 gfp_t gfp) 728 gfp_t gfp)
741{ 729{
742 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 730 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
743 unsigned long timeout; 731 unsigned long timeout;
744 732
745 trace_cfg80211_radar_event(wiphy, chandef); 733 trace_cfg80211_radar_event(wiphy, chandef);
@@ -764,7 +752,7 @@ void cfg80211_cac_event(struct net_device *netdev,
764{ 752{
765 struct wireless_dev *wdev = netdev->ieee80211_ptr; 753 struct wireless_dev *wdev = netdev->ieee80211_ptr;
766 struct wiphy *wiphy = wdev->wiphy; 754 struct wiphy *wiphy = wdev->wiphy;
767 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 755 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
768 unsigned long timeout; 756 unsigned long timeout;
769 757
770 trace_cfg80211_cac_event(netdev, event); 758 trace_cfg80211_cac_event(netdev, event);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 052c1bf8ffac..ba4f1723c83a 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -168,8 +168,8 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
168 netdev = __dev_get_by_index(netns, ifindex); 168 netdev = __dev_get_by_index(netns, ifindex);
169 if (netdev) { 169 if (netdev) {
170 if (netdev->ieee80211_ptr) 170 if (netdev->ieee80211_ptr)
171 tmp = wiphy_to_dev( 171 tmp = wiphy_to_rdev(
172 netdev->ieee80211_ptr->wiphy); 172 netdev->ieee80211_ptr->wiphy);
173 else 173 else
174 tmp = NULL; 174 tmp = NULL;
175 175
@@ -371,8 +371,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
371 [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 }, 371 [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
372 [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG }, 372 [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG },
373 [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED }, 373 [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED },
374 [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_U16 }, 374 [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_BINARY },
375 [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_U16 }, 375 [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_BINARY },
376 [NL80211_ATTR_STA_SUPPORTED_CHANNELS] = { .type = NLA_BINARY }, 376 [NL80211_ATTR_STA_SUPPORTED_CHANNELS] = { .type = NLA_BINARY },
377 [NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = { .type = NLA_BINARY }, 377 [NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = { .type = NLA_BINARY },
378 [NL80211_ATTR_HANDLE_DFS] = { .type = NLA_FLAG }, 378 [NL80211_ATTR_HANDLE_DFS] = { .type = NLA_FLAG },
@@ -385,6 +385,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
385 [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN }, 385 [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN },
386 [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 }, 386 [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 },
387 [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 }, 387 [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 },
388 [NL80211_ATTR_IFACE_SOCKET_OWNER] = { .type = NLA_FLAG },
389 [NL80211_ATTR_CSA_C_OFFSETS_TX] = { .type = NLA_BINARY },
388}; 390};
389 391
390/* policy for the key attributes */ 392/* policy for the key attributes */
@@ -484,7 +486,7 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
484 err = PTR_ERR(*wdev); 486 err = PTR_ERR(*wdev);
485 goto out_unlock; 487 goto out_unlock;
486 } 488 }
487 *rdev = wiphy_to_dev((*wdev)->wiphy); 489 *rdev = wiphy_to_rdev((*wdev)->wiphy);
488 /* 0 is the first index - add 1 to parse only once */ 490 /* 0 is the first index - add 1 to parse only once */
489 cb->args[0] = (*rdev)->wiphy_idx + 1; 491 cb->args[0] = (*rdev)->wiphy_idx + 1;
490 cb->args[1] = (*wdev)->identifier; 492 cb->args[1] = (*wdev)->identifier;
@@ -497,7 +499,7 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
497 err = -ENODEV; 499 err = -ENODEV;
498 goto out_unlock; 500 goto out_unlock;
499 } 501 }
500 *rdev = wiphy_to_dev(wiphy); 502 *rdev = wiphy_to_rdev(wiphy);
501 *wdev = NULL; 503 *wdev = NULL;
502 504
503 list_for_each_entry(tmp, &(*rdev)->wdev_list, list) { 505 list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
@@ -566,6 +568,13 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
566 struct ieee80211_channel *chan, 568 struct ieee80211_channel *chan,
567 bool large) 569 bool large)
568{ 570{
571 /* Some channels must be completely excluded from the
572 * list to protect old user-space tools from breaking
573 */
574 if (!large && chan->flags &
575 (IEEE80211_CHAN_NO_10MHZ | IEEE80211_CHAN_NO_20MHZ))
576 return 0;
577
569 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ, 578 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ,
570 chan->center_freq)) 579 chan->center_freq))
571 goto nla_put_failure; 580 goto nla_put_failure;
@@ -613,6 +622,18 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
613 if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) && 622 if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) &&
614 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ)) 623 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ))
615 goto nla_put_failure; 624 goto nla_put_failure;
625 if ((chan->flags & IEEE80211_CHAN_INDOOR_ONLY) &&
626 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_INDOOR_ONLY))
627 goto nla_put_failure;
628 if ((chan->flags & IEEE80211_CHAN_GO_CONCURRENT) &&
629 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_GO_CONCURRENT))
630 goto nla_put_failure;
631 if ((chan->flags & IEEE80211_CHAN_NO_20MHZ) &&
632 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_20MHZ))
633 goto nla_put_failure;
634 if ((chan->flags & IEEE80211_CHAN_NO_10MHZ) &&
635 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_10MHZ))
636 goto nla_put_failure;
616 } 637 }
617 638
618 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, 639 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -950,8 +971,10 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
950 c->max_interfaces)) 971 c->max_interfaces))
951 goto nla_put_failure; 972 goto nla_put_failure;
952 if (large && 973 if (large &&
953 nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS, 974 (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
954 c->radar_detect_widths)) 975 c->radar_detect_widths) ||
976 nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
977 c->radar_detect_regions)))
955 goto nla_put_failure; 978 goto nla_put_failure;
956 979
957 nla_nest_end(msg, nl_combi); 980 nla_nest_end(msg, nl_combi);
@@ -1006,42 +1029,42 @@ static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
1006} 1029}
1007 1030
1008static int nl80211_send_wowlan(struct sk_buff *msg, 1031static int nl80211_send_wowlan(struct sk_buff *msg,
1009 struct cfg80211_registered_device *dev, 1032 struct cfg80211_registered_device *rdev,
1010 bool large) 1033 bool large)
1011{ 1034{
1012 struct nlattr *nl_wowlan; 1035 struct nlattr *nl_wowlan;
1013 1036
1014 if (!dev->wiphy.wowlan) 1037 if (!rdev->wiphy.wowlan)
1015 return 0; 1038 return 0;
1016 1039
1017 nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED); 1040 nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED);
1018 if (!nl_wowlan) 1041 if (!nl_wowlan)
1019 return -ENOBUFS; 1042 return -ENOBUFS;
1020 1043
1021 if (((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_ANY) && 1044 if (((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_ANY) &&
1022 nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || 1045 nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
1023 ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_DISCONNECT) && 1046 ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_DISCONNECT) &&
1024 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || 1047 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
1025 ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT) && 1048 ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT) &&
1026 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || 1049 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
1027 ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) && 1050 ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
1028 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) || 1051 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
1029 ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && 1052 ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
1030 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || 1053 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
1031 ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) && 1054 ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
1032 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || 1055 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
1033 ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) && 1056 ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
1034 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || 1057 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
1035 ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE) && 1058 ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
1036 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) 1059 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
1037 return -ENOBUFS; 1060 return -ENOBUFS;
1038 1061
1039 if (dev->wiphy.wowlan->n_patterns) { 1062 if (rdev->wiphy.wowlan->n_patterns) {
1040 struct nl80211_pattern_support pat = { 1063 struct nl80211_pattern_support pat = {
1041 .max_patterns = dev->wiphy.wowlan->n_patterns, 1064 .max_patterns = rdev->wiphy.wowlan->n_patterns,
1042 .min_pattern_len = dev->wiphy.wowlan->pattern_min_len, 1065 .min_pattern_len = rdev->wiphy.wowlan->pattern_min_len,
1043 .max_pattern_len = dev->wiphy.wowlan->pattern_max_len, 1066 .max_pattern_len = rdev->wiphy.wowlan->pattern_max_len,
1044 .max_pkt_offset = dev->wiphy.wowlan->max_pkt_offset, 1067 .max_pkt_offset = rdev->wiphy.wowlan->max_pkt_offset,
1045 }; 1068 };
1046 1069
1047 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, 1070 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
@@ -1049,7 +1072,7 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
1049 return -ENOBUFS; 1072 return -ENOBUFS;
1050 } 1073 }
1051 1074
1052 if (large && nl80211_send_wowlan_tcp_caps(dev, msg)) 1075 if (large && nl80211_send_wowlan_tcp_caps(rdev, msg))
1053 return -ENOBUFS; 1076 return -ENOBUFS;
1054 1077
1055 nla_nest_end(msg, nl_wowlan); 1078 nla_nest_end(msg, nl_wowlan);
@@ -1059,19 +1082,19 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
1059#endif 1082#endif
1060 1083
1061static int nl80211_send_coalesce(struct sk_buff *msg, 1084static int nl80211_send_coalesce(struct sk_buff *msg,
1062 struct cfg80211_registered_device *dev) 1085 struct cfg80211_registered_device *rdev)
1063{ 1086{
1064 struct nl80211_coalesce_rule_support rule; 1087 struct nl80211_coalesce_rule_support rule;
1065 1088
1066 if (!dev->wiphy.coalesce) 1089 if (!rdev->wiphy.coalesce)
1067 return 0; 1090 return 0;
1068 1091
1069 rule.max_rules = dev->wiphy.coalesce->n_rules; 1092 rule.max_rules = rdev->wiphy.coalesce->n_rules;
1070 rule.max_delay = dev->wiphy.coalesce->max_delay; 1093 rule.max_delay = rdev->wiphy.coalesce->max_delay;
1071 rule.pat.max_patterns = dev->wiphy.coalesce->n_patterns; 1094 rule.pat.max_patterns = rdev->wiphy.coalesce->n_patterns;
1072 rule.pat.min_pattern_len = dev->wiphy.coalesce->pattern_min_len; 1095 rule.pat.min_pattern_len = rdev->wiphy.coalesce->pattern_min_len;
1073 rule.pat.max_pattern_len = dev->wiphy.coalesce->pattern_max_len; 1096 rule.pat.max_pattern_len = rdev->wiphy.coalesce->pattern_max_len;
1074 rule.pat.max_pkt_offset = dev->wiphy.coalesce->max_pkt_offset; 1097 rule.pat.max_pkt_offset = rdev->wiphy.coalesce->max_pkt_offset;
1075 1098
1076 if (nla_put(msg, NL80211_ATTR_COALESCE_RULE, sizeof(rule), &rule)) 1099 if (nla_put(msg, NL80211_ATTR_COALESCE_RULE, sizeof(rule), &rule))
1077 return -ENOBUFS; 1100 return -ENOBUFS;
@@ -1202,7 +1225,8 @@ struct nl80211_dump_wiphy_state {
1202 bool split; 1225 bool split;
1203}; 1226};
1204 1227
1205static int nl80211_send_wiphy(struct cfg80211_registered_device *dev, 1228static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
1229 enum nl80211_commands cmd,
1206 struct sk_buff *msg, u32 portid, u32 seq, 1230 struct sk_buff *msg, u32 portid, u32 seq,
1207 int flags, struct nl80211_dump_wiphy_state *state) 1231 int flags, struct nl80211_dump_wiphy_state *state)
1208{ 1232{
@@ -1214,63 +1238,66 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1214 struct ieee80211_channel *chan; 1238 struct ieee80211_channel *chan;
1215 int i; 1239 int i;
1216 const struct ieee80211_txrx_stypes *mgmt_stypes = 1240 const struct ieee80211_txrx_stypes *mgmt_stypes =
1217 dev->wiphy.mgmt_stypes; 1241 rdev->wiphy.mgmt_stypes;
1218 u32 features; 1242 u32 features;
1219 1243
1220 hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_WIPHY); 1244 hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
1221 if (!hdr) 1245 if (!hdr)
1222 return -ENOBUFS; 1246 return -ENOBUFS;
1223 1247
1224 if (WARN_ON(!state)) 1248 if (WARN_ON(!state))
1225 return -EINVAL; 1249 return -EINVAL;
1226 1250
1227 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) || 1251 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
1228 nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, 1252 nla_put_string(msg, NL80211_ATTR_WIPHY_NAME,
1229 wiphy_name(&dev->wiphy)) || 1253 wiphy_name(&rdev->wiphy)) ||
1230 nla_put_u32(msg, NL80211_ATTR_GENERATION, 1254 nla_put_u32(msg, NL80211_ATTR_GENERATION,
1231 cfg80211_rdev_list_generation)) 1255 cfg80211_rdev_list_generation))
1232 goto nla_put_failure; 1256 goto nla_put_failure;
1233 1257
1258 if (cmd != NL80211_CMD_NEW_WIPHY)
1259 goto finish;
1260
1234 switch (state->split_start) { 1261 switch (state->split_start) {
1235 case 0: 1262 case 0:
1236 if (nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, 1263 if (nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
1237 dev->wiphy.retry_short) || 1264 rdev->wiphy.retry_short) ||
1238 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, 1265 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
1239 dev->wiphy.retry_long) || 1266 rdev->wiphy.retry_long) ||
1240 nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, 1267 nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
1241 dev->wiphy.frag_threshold) || 1268 rdev->wiphy.frag_threshold) ||
1242 nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, 1269 nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
1243 dev->wiphy.rts_threshold) || 1270 rdev->wiphy.rts_threshold) ||
1244 nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS, 1271 nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
1245 dev->wiphy.coverage_class) || 1272 rdev->wiphy.coverage_class) ||
1246 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, 1273 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
1247 dev->wiphy.max_scan_ssids) || 1274 rdev->wiphy.max_scan_ssids) ||
1248 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, 1275 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
1249 dev->wiphy.max_sched_scan_ssids) || 1276 rdev->wiphy.max_sched_scan_ssids) ||
1250 nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, 1277 nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
1251 dev->wiphy.max_scan_ie_len) || 1278 rdev->wiphy.max_scan_ie_len) ||
1252 nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, 1279 nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
1253 dev->wiphy.max_sched_scan_ie_len) || 1280 rdev->wiphy.max_sched_scan_ie_len) ||
1254 nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS, 1281 nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
1255 dev->wiphy.max_match_sets)) 1282 rdev->wiphy.max_match_sets))
1256 goto nla_put_failure; 1283 goto nla_put_failure;
1257 1284
1258 if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) && 1285 if ((rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
1259 nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN)) 1286 nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN))
1260 goto nla_put_failure; 1287 goto nla_put_failure;
1261 if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) && 1288 if ((rdev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
1262 nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH)) 1289 nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH))
1263 goto nla_put_failure; 1290 goto nla_put_failure;
1264 if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) && 1291 if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
1265 nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD)) 1292 nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD))
1266 goto nla_put_failure; 1293 goto nla_put_failure;
1267 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) && 1294 if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
1268 nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT)) 1295 nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT))
1269 goto nla_put_failure; 1296 goto nla_put_failure;
1270 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) && 1297 if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
1271 nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT)) 1298 nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT))
1272 goto nla_put_failure; 1299 goto nla_put_failure;
1273 if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) && 1300 if ((rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
1274 nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP)) 1301 nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
1275 goto nla_put_failure; 1302 goto nla_put_failure;
1276 state->split_start++; 1303 state->split_start++;
@@ -1278,35 +1305,35 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1278 break; 1305 break;
1279 case 1: 1306 case 1:
1280 if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES, 1307 if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
1281 sizeof(u32) * dev->wiphy.n_cipher_suites, 1308 sizeof(u32) * rdev->wiphy.n_cipher_suites,
1282 dev->wiphy.cipher_suites)) 1309 rdev->wiphy.cipher_suites))
1283 goto nla_put_failure; 1310 goto nla_put_failure;
1284 1311
1285 if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, 1312 if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
1286 dev->wiphy.max_num_pmkids)) 1313 rdev->wiphy.max_num_pmkids))
1287 goto nla_put_failure; 1314 goto nla_put_failure;
1288 1315
1289 if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) && 1316 if ((rdev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
1290 nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE)) 1317 nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE))
1291 goto nla_put_failure; 1318 goto nla_put_failure;
1292 1319
1293 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, 1320 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
1294 dev->wiphy.available_antennas_tx) || 1321 rdev->wiphy.available_antennas_tx) ||
1295 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, 1322 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
1296 dev->wiphy.available_antennas_rx)) 1323 rdev->wiphy.available_antennas_rx))
1297 goto nla_put_failure; 1324 goto nla_put_failure;
1298 1325
1299 if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) && 1326 if ((rdev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) &&
1300 nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, 1327 nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
1301 dev->wiphy.probe_resp_offload)) 1328 rdev->wiphy.probe_resp_offload))
1302 goto nla_put_failure; 1329 goto nla_put_failure;
1303 1330
1304 if ((dev->wiphy.available_antennas_tx || 1331 if ((rdev->wiphy.available_antennas_tx ||
1305 dev->wiphy.available_antennas_rx) && 1332 rdev->wiphy.available_antennas_rx) &&
1306 dev->ops->get_antenna) { 1333 rdev->ops->get_antenna) {
1307 u32 tx_ant = 0, rx_ant = 0; 1334 u32 tx_ant = 0, rx_ant = 0;
1308 int res; 1335 int res;
1309 res = rdev_get_antenna(dev, &tx_ant, &rx_ant); 1336 res = rdev_get_antenna(rdev, &tx_ant, &rx_ant);
1310 if (!res) { 1337 if (!res) {
1311 if (nla_put_u32(msg, 1338 if (nla_put_u32(msg,
1312 NL80211_ATTR_WIPHY_ANTENNA_TX, 1339 NL80211_ATTR_WIPHY_ANTENNA_TX,
@@ -1323,7 +1350,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1323 break; 1350 break;
1324 case 2: 1351 case 2:
1325 if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES, 1352 if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES,
1326 dev->wiphy.interface_modes)) 1353 rdev->wiphy.interface_modes))
1327 goto nla_put_failure; 1354 goto nla_put_failure;
1328 state->split_start++; 1355 state->split_start++;
1329 if (state->split) 1356 if (state->split)
@@ -1337,7 +1364,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1337 band < IEEE80211_NUM_BANDS; band++) { 1364 band < IEEE80211_NUM_BANDS; band++) {
1338 struct ieee80211_supported_band *sband; 1365 struct ieee80211_supported_band *sband;
1339 1366
1340 sband = dev->wiphy.bands[band]; 1367 sband = rdev->wiphy.bands[band];
1341 1368
1342 if (!sband) 1369 if (!sband)
1343 continue; 1370 continue;
@@ -1414,7 +1441,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1414 i = 0; 1441 i = 0;
1415#define CMD(op, n) \ 1442#define CMD(op, n) \
1416 do { \ 1443 do { \
1417 if (dev->ops->op) { \ 1444 if (rdev->ops->op) { \
1418 i++; \ 1445 i++; \
1419 if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \ 1446 if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \
1420 goto nla_put_failure; \ 1447 goto nla_put_failure; \
@@ -1438,32 +1465,32 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1438 CMD(set_pmksa, SET_PMKSA); 1465 CMD(set_pmksa, SET_PMKSA);
1439 CMD(del_pmksa, DEL_PMKSA); 1466 CMD(del_pmksa, DEL_PMKSA);
1440 CMD(flush_pmksa, FLUSH_PMKSA); 1467 CMD(flush_pmksa, FLUSH_PMKSA);
1441 if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) 1468 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
1442 CMD(remain_on_channel, REMAIN_ON_CHANNEL); 1469 CMD(remain_on_channel, REMAIN_ON_CHANNEL);
1443 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK); 1470 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
1444 CMD(mgmt_tx, FRAME); 1471 CMD(mgmt_tx, FRAME);
1445 CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL); 1472 CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
1446 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { 1473 if (rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
1447 i++; 1474 i++;
1448 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS)) 1475 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
1449 goto nla_put_failure; 1476 goto nla_put_failure;
1450 } 1477 }
1451 if (dev->ops->set_monitor_channel || dev->ops->start_ap || 1478 if (rdev->ops->set_monitor_channel || rdev->ops->start_ap ||
1452 dev->ops->join_mesh) { 1479 rdev->ops->join_mesh) {
1453 i++; 1480 i++;
1454 if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL)) 1481 if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL))
1455 goto nla_put_failure; 1482 goto nla_put_failure;
1456 } 1483 }
1457 CMD(set_wds_peer, SET_WDS_PEER); 1484 CMD(set_wds_peer, SET_WDS_PEER);
1458 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) { 1485 if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
1459 CMD(tdls_mgmt, TDLS_MGMT); 1486 CMD(tdls_mgmt, TDLS_MGMT);
1460 CMD(tdls_oper, TDLS_OPER); 1487 CMD(tdls_oper, TDLS_OPER);
1461 } 1488 }
1462 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) 1489 if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
1463 CMD(sched_scan_start, START_SCHED_SCAN); 1490 CMD(sched_scan_start, START_SCHED_SCAN);
1464 CMD(probe_client, PROBE_CLIENT); 1491 CMD(probe_client, PROBE_CLIENT);
1465 CMD(set_noack_map, SET_NOACK_MAP); 1492 CMD(set_noack_map, SET_NOACK_MAP);
1466 if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) { 1493 if (rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
1467 i++; 1494 i++;
1468 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS)) 1495 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
1469 goto nla_put_failure; 1496 goto nla_put_failure;
@@ -1473,7 +1500,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1473 if (state->split) { 1500 if (state->split) {
1474 CMD(crit_proto_start, CRIT_PROTOCOL_START); 1501 CMD(crit_proto_start, CRIT_PROTOCOL_START);
1475 CMD(crit_proto_stop, CRIT_PROTOCOL_STOP); 1502 CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
1476 if (dev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH) 1503 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
1477 CMD(channel_switch, CHANNEL_SWITCH); 1504 CMD(channel_switch, CHANNEL_SWITCH);
1478 } 1505 }
1479 CMD(set_qos_map, SET_QOS_MAP); 1506 CMD(set_qos_map, SET_QOS_MAP);
@@ -1484,13 +1511,13 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1484 1511
1485#undef CMD 1512#undef CMD
1486 1513
1487 if (dev->ops->connect || dev->ops->auth) { 1514 if (rdev->ops->connect || rdev->ops->auth) {
1488 i++; 1515 i++;
1489 if (nla_put_u32(msg, i, NL80211_CMD_CONNECT)) 1516 if (nla_put_u32(msg, i, NL80211_CMD_CONNECT))
1490 goto nla_put_failure; 1517 goto nla_put_failure;
1491 } 1518 }
1492 1519
1493 if (dev->ops->disconnect || dev->ops->deauth) { 1520 if (rdev->ops->disconnect || rdev->ops->deauth) {
1494 i++; 1521 i++;
1495 if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT)) 1522 if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT))
1496 goto nla_put_failure; 1523 goto nla_put_failure;
@@ -1501,14 +1528,14 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1501 if (state->split) 1528 if (state->split)
1502 break; 1529 break;
1503 case 5: 1530 case 5:
1504 if (dev->ops->remain_on_channel && 1531 if (rdev->ops->remain_on_channel &&
1505 (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) && 1532 (rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
1506 nla_put_u32(msg, 1533 nla_put_u32(msg,
1507 NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, 1534 NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
1508 dev->wiphy.max_remain_on_channel_duration)) 1535 rdev->wiphy.max_remain_on_channel_duration))
1509 goto nla_put_failure; 1536 goto nla_put_failure;
1510 1537
1511 if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) && 1538 if ((rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) &&
1512 nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK)) 1539 nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK))
1513 goto nla_put_failure; 1540 goto nla_put_failure;
1514 1541
@@ -1519,7 +1546,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1519 break; 1546 break;
1520 case 6: 1547 case 6:
1521#ifdef CONFIG_PM 1548#ifdef CONFIG_PM
1522 if (nl80211_send_wowlan(msg, dev, state->split)) 1549 if (nl80211_send_wowlan(msg, rdev, state->split))
1523 goto nla_put_failure; 1550 goto nla_put_failure;
1524 state->split_start++; 1551 state->split_start++;
1525 if (state->split) 1552 if (state->split)
@@ -1529,10 +1556,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1529#endif 1556#endif
1530 case 7: 1557 case 7:
1531 if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES, 1558 if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
1532 dev->wiphy.software_iftypes)) 1559 rdev->wiphy.software_iftypes))
1533 goto nla_put_failure; 1560 goto nla_put_failure;
1534 1561
1535 if (nl80211_put_iface_combinations(&dev->wiphy, msg, 1562 if (nl80211_put_iface_combinations(&rdev->wiphy, msg,
1536 state->split)) 1563 state->split))
1537 goto nla_put_failure; 1564 goto nla_put_failure;
1538 1565
@@ -1540,12 +1567,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1540 if (state->split) 1567 if (state->split)
1541 break; 1568 break;
1542 case 8: 1569 case 8:
1543 if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) && 1570 if ((rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
1544 nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME, 1571 nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
1545 dev->wiphy.ap_sme_capa)) 1572 rdev->wiphy.ap_sme_capa))
1546 goto nla_put_failure; 1573 goto nla_put_failure;
1547 1574
1548 features = dev->wiphy.features; 1575 features = rdev->wiphy.features;
1549 /* 1576 /*
1550 * We can only add the per-channel limit information if the 1577 * We can only add the per-channel limit information if the
1551 * dump is split, otherwise it makes it too big. Therefore 1578 * dump is split, otherwise it makes it too big. Therefore
@@ -1556,16 +1583,16 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1556 if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, features)) 1583 if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, features))
1557 goto nla_put_failure; 1584 goto nla_put_failure;
1558 1585
1559 if (dev->wiphy.ht_capa_mod_mask && 1586 if (rdev->wiphy.ht_capa_mod_mask &&
1560 nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK, 1587 nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
1561 sizeof(*dev->wiphy.ht_capa_mod_mask), 1588 sizeof(*rdev->wiphy.ht_capa_mod_mask),
1562 dev->wiphy.ht_capa_mod_mask)) 1589 rdev->wiphy.ht_capa_mod_mask))
1563 goto nla_put_failure; 1590 goto nla_put_failure;
1564 1591
1565 if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME && 1592 if (rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME &&
1566 dev->wiphy.max_acl_mac_addrs && 1593 rdev->wiphy.max_acl_mac_addrs &&
1567 nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX, 1594 nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX,
1568 dev->wiphy.max_acl_mac_addrs)) 1595 rdev->wiphy.max_acl_mac_addrs))
1569 goto nla_put_failure; 1596 goto nla_put_failure;
1570 1597
1571 /* 1598 /*
@@ -1581,41 +1608,41 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1581 state->split_start++; 1608 state->split_start++;
1582 break; 1609 break;
1583 case 9: 1610 case 9:
1584 if (dev->wiphy.extended_capabilities && 1611 if (rdev->wiphy.extended_capabilities &&
1585 (nla_put(msg, NL80211_ATTR_EXT_CAPA, 1612 (nla_put(msg, NL80211_ATTR_EXT_CAPA,
1586 dev->wiphy.extended_capabilities_len, 1613 rdev->wiphy.extended_capabilities_len,
1587 dev->wiphy.extended_capabilities) || 1614 rdev->wiphy.extended_capabilities) ||
1588 nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, 1615 nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
1589 dev->wiphy.extended_capabilities_len, 1616 rdev->wiphy.extended_capabilities_len,
1590 dev->wiphy.extended_capabilities_mask))) 1617 rdev->wiphy.extended_capabilities_mask)))
1591 goto nla_put_failure; 1618 goto nla_put_failure;
1592 1619
1593 if (dev->wiphy.vht_capa_mod_mask && 1620 if (rdev->wiphy.vht_capa_mod_mask &&
1594 nla_put(msg, NL80211_ATTR_VHT_CAPABILITY_MASK, 1621 nla_put(msg, NL80211_ATTR_VHT_CAPABILITY_MASK,
1595 sizeof(*dev->wiphy.vht_capa_mod_mask), 1622 sizeof(*rdev->wiphy.vht_capa_mod_mask),
1596 dev->wiphy.vht_capa_mod_mask)) 1623 rdev->wiphy.vht_capa_mod_mask))
1597 goto nla_put_failure; 1624 goto nla_put_failure;
1598 1625
1599 state->split_start++; 1626 state->split_start++;
1600 break; 1627 break;
1601 case 10: 1628 case 10:
1602 if (nl80211_send_coalesce(msg, dev)) 1629 if (nl80211_send_coalesce(msg, rdev))
1603 goto nla_put_failure; 1630 goto nla_put_failure;
1604 1631
1605 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ) && 1632 if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ) &&
1606 (nla_put_flag(msg, NL80211_ATTR_SUPPORT_5_MHZ) || 1633 (nla_put_flag(msg, NL80211_ATTR_SUPPORT_5_MHZ) ||
1607 nla_put_flag(msg, NL80211_ATTR_SUPPORT_10_MHZ))) 1634 nla_put_flag(msg, NL80211_ATTR_SUPPORT_10_MHZ)))
1608 goto nla_put_failure; 1635 goto nla_put_failure;
1609 1636
1610 if (dev->wiphy.max_ap_assoc_sta && 1637 if (rdev->wiphy.max_ap_assoc_sta &&
1611 nla_put_u32(msg, NL80211_ATTR_MAX_AP_ASSOC_STA, 1638 nla_put_u32(msg, NL80211_ATTR_MAX_AP_ASSOC_STA,
1612 dev->wiphy.max_ap_assoc_sta)) 1639 rdev->wiphy.max_ap_assoc_sta))
1613 goto nla_put_failure; 1640 goto nla_put_failure;
1614 1641
1615 state->split_start++; 1642 state->split_start++;
1616 break; 1643 break;
1617 case 11: 1644 case 11:
1618 if (dev->wiphy.n_vendor_commands) { 1645 if (rdev->wiphy.n_vendor_commands) {
1619 const struct nl80211_vendor_cmd_info *info; 1646 const struct nl80211_vendor_cmd_info *info;
1620 struct nlattr *nested; 1647 struct nlattr *nested;
1621 1648
@@ -1623,15 +1650,15 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1623 if (!nested) 1650 if (!nested)
1624 goto nla_put_failure; 1651 goto nla_put_failure;
1625 1652
1626 for (i = 0; i < dev->wiphy.n_vendor_commands; i++) { 1653 for (i = 0; i < rdev->wiphy.n_vendor_commands; i++) {
1627 info = &dev->wiphy.vendor_commands[i].info; 1654 info = &rdev->wiphy.vendor_commands[i].info;
1628 if (nla_put(msg, i + 1, sizeof(*info), info)) 1655 if (nla_put(msg, i + 1, sizeof(*info), info))
1629 goto nla_put_failure; 1656 goto nla_put_failure;
1630 } 1657 }
1631 nla_nest_end(msg, nested); 1658 nla_nest_end(msg, nested);
1632 } 1659 }
1633 1660
1634 if (dev->wiphy.n_vendor_events) { 1661 if (rdev->wiphy.n_vendor_events) {
1635 const struct nl80211_vendor_cmd_info *info; 1662 const struct nl80211_vendor_cmd_info *info;
1636 struct nlattr *nested; 1663 struct nlattr *nested;
1637 1664
@@ -1640,18 +1667,26 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1640 if (!nested) 1667 if (!nested)
1641 goto nla_put_failure; 1668 goto nla_put_failure;
1642 1669
1643 for (i = 0; i < dev->wiphy.n_vendor_events; i++) { 1670 for (i = 0; i < rdev->wiphy.n_vendor_events; i++) {
1644 info = &dev->wiphy.vendor_events[i]; 1671 info = &rdev->wiphy.vendor_events[i];
1645 if (nla_put(msg, i + 1, sizeof(*info), info)) 1672 if (nla_put(msg, i + 1, sizeof(*info), info))
1646 goto nla_put_failure; 1673 goto nla_put_failure;
1647 } 1674 }
1648 nla_nest_end(msg, nested); 1675 nla_nest_end(msg, nested);
1649 } 1676 }
1677 state->split_start++;
1678 break;
1679 case 12:
1680 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH &&
1681 nla_put_u8(msg, NL80211_ATTR_MAX_CSA_COUNTERS,
1682 rdev->wiphy.max_num_csa_counters))
1683 goto nla_put_failure;
1650 1684
1651 /* done */ 1685 /* done */
1652 state->split_start = 0; 1686 state->split_start = 0;
1653 break; 1687 break;
1654 } 1688 }
1689 finish:
1655 return genlmsg_end(msg, hdr); 1690 return genlmsg_end(msg, hdr);
1656 1691
1657 nla_put_failure: 1692 nla_put_failure:
@@ -1684,7 +1719,7 @@ static int nl80211_dump_wiphy_parse(struct sk_buff *skb,
1684 if (!netdev) 1719 if (!netdev)
1685 return -ENODEV; 1720 return -ENODEV;
1686 if (netdev->ieee80211_ptr) { 1721 if (netdev->ieee80211_ptr) {
1687 rdev = wiphy_to_dev( 1722 rdev = wiphy_to_rdev(
1688 netdev->ieee80211_ptr->wiphy); 1723 netdev->ieee80211_ptr->wiphy);
1689 state->filter_wiphy = rdev->wiphy_idx; 1724 state->filter_wiphy = rdev->wiphy_idx;
1690 } 1725 }
@@ -1697,7 +1732,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1697{ 1732{
1698 int idx = 0, ret; 1733 int idx = 0, ret;
1699 struct nl80211_dump_wiphy_state *state = (void *)cb->args[0]; 1734 struct nl80211_dump_wiphy_state *state = (void *)cb->args[0];
1700 struct cfg80211_registered_device *dev; 1735 struct cfg80211_registered_device *rdev;
1701 1736
1702 rtnl_lock(); 1737 rtnl_lock();
1703 if (!state) { 1738 if (!state) {
@@ -1716,17 +1751,18 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1716 cb->args[0] = (long)state; 1751 cb->args[0] = (long)state;
1717 } 1752 }
1718 1753
1719 list_for_each_entry(dev, &cfg80211_rdev_list, list) { 1754 list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
1720 if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk))) 1755 if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
1721 continue; 1756 continue;
1722 if (++idx <= state->start) 1757 if (++idx <= state->start)
1723 continue; 1758 continue;
1724 if (state->filter_wiphy != -1 && 1759 if (state->filter_wiphy != -1 &&
1725 state->filter_wiphy != dev->wiphy_idx) 1760 state->filter_wiphy != rdev->wiphy_idx)
1726 continue; 1761 continue;
1727 /* attempt to fit multiple wiphy data chunks into the skb */ 1762 /* attempt to fit multiple wiphy data chunks into the skb */
1728 do { 1763 do {
1729 ret = nl80211_send_wiphy(dev, skb, 1764 ret = nl80211_send_wiphy(rdev, NL80211_CMD_NEW_WIPHY,
1765 skb,
1730 NETLINK_CB(cb->skb).portid, 1766 NETLINK_CB(cb->skb).portid,
1731 cb->nlh->nlmsg_seq, 1767 cb->nlh->nlmsg_seq,
1732 NLM_F_MULTI, state); 1768 NLM_F_MULTI, state);
@@ -1774,14 +1810,15 @@ static int nl80211_dump_wiphy_done(struct netlink_callback *cb)
1774static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) 1810static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
1775{ 1811{
1776 struct sk_buff *msg; 1812 struct sk_buff *msg;
1777 struct cfg80211_registered_device *dev = info->user_ptr[0]; 1813 struct cfg80211_registered_device *rdev = info->user_ptr[0];
1778 struct nl80211_dump_wiphy_state state = {}; 1814 struct nl80211_dump_wiphy_state state = {};
1779 1815
1780 msg = nlmsg_new(4096, GFP_KERNEL); 1816 msg = nlmsg_new(4096, GFP_KERNEL);
1781 if (!msg) 1817 if (!msg)
1782 return -ENOMEM; 1818 return -ENOMEM;
1783 1819
1784 if (nl80211_send_wiphy(dev, msg, info->snd_portid, info->snd_seq, 0, 1820 if (nl80211_send_wiphy(rdev, NL80211_CMD_NEW_WIPHY, msg,
1821 info->snd_portid, info->snd_seq, 0,
1785 &state) < 0) { 1822 &state) < 0) {
1786 nlmsg_free(msg); 1823 nlmsg_free(msg);
1787 return -ENOBUFS; 1824 return -ENOBUFS;
@@ -1908,18 +1945,20 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
1908} 1945}
1909 1946
1910static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, 1947static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
1911 struct wireless_dev *wdev, 1948 struct net_device *dev,
1912 struct genl_info *info) 1949 struct genl_info *info)
1913{ 1950{
1914 struct cfg80211_chan_def chandef; 1951 struct cfg80211_chan_def chandef;
1915 int result; 1952 int result;
1916 enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR; 1953 enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR;
1954 struct wireless_dev *wdev = NULL;
1917 1955
1918 if (wdev) 1956 if (dev)
1919 iftype = wdev->iftype; 1957 wdev = dev->ieee80211_ptr;
1920
1921 if (!nl80211_can_set_dev_channel(wdev)) 1958 if (!nl80211_can_set_dev_channel(wdev))
1922 return -EOPNOTSUPP; 1959 return -EOPNOTSUPP;
1960 if (wdev)
1961 iftype = wdev->iftype;
1923 1962
1924 result = nl80211_parse_chandef(rdev, info, &chandef); 1963 result = nl80211_parse_chandef(rdev, info, &chandef);
1925 if (result) 1964 if (result)
@@ -1928,14 +1967,27 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
1928 switch (iftype) { 1967 switch (iftype) {
1929 case NL80211_IFTYPE_AP: 1968 case NL80211_IFTYPE_AP:
1930 case NL80211_IFTYPE_P2P_GO: 1969 case NL80211_IFTYPE_P2P_GO:
1931 if (wdev->beacon_interval) { 1970 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) {
1932 result = -EBUSY;
1933 break;
1934 }
1935 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef)) {
1936 result = -EINVAL; 1971 result = -EINVAL;
1937 break; 1972 break;
1938 } 1973 }
1974 if (wdev->beacon_interval) {
1975 if (!dev || !rdev->ops->set_ap_chanwidth ||
1976 !(rdev->wiphy.features &
1977 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE)) {
1978 result = -EBUSY;
1979 break;
1980 }
1981
1982 /* Only allow dynamic channel width changes */
1983 if (chandef.chan != wdev->preset_chandef.chan) {
1984 result = -EBUSY;
1985 break;
1986 }
1987 result = rdev_set_ap_chanwidth(rdev, dev, &chandef);
1988 if (result)
1989 break;
1990 }
1939 wdev->preset_chandef = chandef; 1991 wdev->preset_chandef = chandef;
1940 result = 0; 1992 result = 0;
1941 break; 1993 break;
@@ -1957,7 +2009,7 @@ static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info)
1957 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 2009 struct cfg80211_registered_device *rdev = info->user_ptr[0];
1958 struct net_device *netdev = info->user_ptr[1]; 2010 struct net_device *netdev = info->user_ptr[1];
1959 2011
1960 return __nl80211_set_channel(rdev, netdev->ieee80211_ptr, info); 2012 return __nl80211_set_channel(rdev, netdev, info);
1961} 2013}
1962 2014
1963static int nl80211_set_wds_peer(struct sk_buff *skb, struct genl_info *info) 2015static int nl80211_set_wds_peer(struct sk_buff *skb, struct genl_info *info)
@@ -2013,7 +2065,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
2013 2065
2014 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 2066 netdev = __dev_get_by_index(genl_info_net(info), ifindex);
2015 if (netdev && netdev->ieee80211_ptr) 2067 if (netdev && netdev->ieee80211_ptr)
2016 rdev = wiphy_to_dev(netdev->ieee80211_ptr->wiphy); 2068 rdev = wiphy_to_rdev(netdev->ieee80211_ptr->wiphy);
2017 else 2069 else
2018 netdev = NULL; 2070 netdev = NULL;
2019 } 2071 }
@@ -2079,9 +2131,10 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
2079 } 2131 }
2080 2132
2081 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { 2133 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
2082 result = __nl80211_set_channel(rdev, 2134 result = __nl80211_set_channel(
2083 nl80211_can_set_dev_channel(wdev) ? wdev : NULL, 2135 rdev,
2084 info); 2136 nl80211_can_set_dev_channel(wdev) ? netdev : NULL,
2137 info);
2085 if (result) 2138 if (result)
2086 return result; 2139 return result;
2087 } 2140 }
@@ -2229,7 +2282,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
2229static inline u64 wdev_id(struct wireless_dev *wdev) 2282static inline u64 wdev_id(struct wireless_dev *wdev)
2230{ 2283{
2231 return (u64)wdev->identifier | 2284 return (u64)wdev->identifier |
2232 ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32); 2285 ((u64)wiphy_to_rdev(wdev->wiphy)->wiphy_idx << 32);
2233} 2286}
2234 2287
2235static int nl80211_send_chandef(struct sk_buff *msg, 2288static int nl80211_send_chandef(struct sk_buff *msg,
@@ -2355,7 +2408,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
2355static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) 2408static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
2356{ 2409{
2357 struct sk_buff *msg; 2410 struct sk_buff *msg;
2358 struct cfg80211_registered_device *dev = info->user_ptr[0]; 2411 struct cfg80211_registered_device *rdev = info->user_ptr[0];
2359 struct wireless_dev *wdev = info->user_ptr[1]; 2412 struct wireless_dev *wdev = info->user_ptr[1];
2360 2413
2361 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2414 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -2363,7 +2416,7 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
2363 return -ENOMEM; 2416 return -ENOMEM;
2364 2417
2365 if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0, 2418 if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
2366 dev, wdev) < 0) { 2419 rdev, wdev) < 0) {
2367 nlmsg_free(msg); 2420 nlmsg_free(msg);
2368 return -ENOBUFS; 2421 return -ENOBUFS;
2369 } 2422 }
@@ -2514,6 +2567,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2514 enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; 2567 enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
2515 u32 flags; 2568 u32 flags;
2516 2569
2570 /* to avoid failing a new interface creation due to pending removal */
2571 cfg80211_destroy_ifaces(rdev);
2572
2517 memset(&params, 0, sizeof(params)); 2573 memset(&params, 0, sizeof(params));
2518 2574
2519 if (!info->attrs[NL80211_ATTR_IFNAME]) 2575 if (!info->attrs[NL80211_ATTR_IFNAME])
@@ -2563,6 +2619,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2563 return PTR_ERR(wdev); 2619 return PTR_ERR(wdev);
2564 } 2620 }
2565 2621
2622 if (info->attrs[NL80211_ATTR_IFACE_SOCKET_OWNER])
2623 wdev->owner_nlportid = info->snd_portid;
2624
2566 switch (type) { 2625 switch (type) {
2567 case NL80211_IFTYPE_MESH_POINT: 2626 case NL80211_IFTYPE_MESH_POINT:
2568 if (!info->attrs[NL80211_ATTR_MESH_ID]) 2627 if (!info->attrs[NL80211_ATTR_MESH_ID])
@@ -3142,7 +3201,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
3142 struct wireless_dev *wdev = dev->ieee80211_ptr; 3201 struct wireless_dev *wdev = dev->ieee80211_ptr;
3143 struct cfg80211_ap_settings params; 3202 struct cfg80211_ap_settings params;
3144 int err; 3203 int err;
3145 u8 radar_detect_width = 0;
3146 3204
3147 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 3205 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
3148 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 3206 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
@@ -3258,24 +3316,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
3258 } else if (!nl80211_get_ap_channel(rdev, &params)) 3316 } else if (!nl80211_get_ap_channel(rdev, &params))
3259 return -EINVAL; 3317 return -EINVAL;
3260 3318
3261 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef)) 3319 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef,
3320 wdev->iftype))
3262 return -EINVAL; 3321 return -EINVAL;
3263 3322
3264 err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef);
3265 if (err < 0)
3266 return err;
3267 if (err) {
3268 radar_detect_width = BIT(params.chandef.width);
3269 params.radar_required = true;
3270 }
3271
3272 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
3273 params.chandef.chan,
3274 CHAN_MODE_SHARED,
3275 radar_detect_width);
3276 if (err)
3277 return err;
3278
3279 if (info->attrs[NL80211_ATTR_ACL_POLICY]) { 3323 if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
3280 params.acl = parse_acl_data(&rdev->wiphy, info); 3324 params.acl = parse_acl_data(&rdev->wiphy, info);
3281 if (IS_ERR(params.acl)) 3325 if (IS_ERR(params.acl))
@@ -3613,6 +3657,10 @@ static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
3613 nla_put_u32(msg, NL80211_STA_INFO_TX_FAILED, 3657 nla_put_u32(msg, NL80211_STA_INFO_TX_FAILED,
3614 sinfo->tx_failed)) 3658 sinfo->tx_failed))
3615 goto nla_put_failure; 3659 goto nla_put_failure;
3660 if ((sinfo->filled & STATION_INFO_EXPECTED_THROUGHPUT) &&
3661 nla_put_u32(msg, NL80211_STA_INFO_EXPECTED_THROUGHPUT,
3662 sinfo->expected_throughput))
3663 goto nla_put_failure;
3616 if ((sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) && 3664 if ((sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) &&
3617 nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS, 3665 nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS,
3618 sinfo->beacon_loss_count)) 3666 sinfo->beacon_loss_count))
@@ -3675,13 +3723,13 @@ static int nl80211_dump_station(struct sk_buff *skb,
3675 struct netlink_callback *cb) 3723 struct netlink_callback *cb)
3676{ 3724{
3677 struct station_info sinfo; 3725 struct station_info sinfo;
3678 struct cfg80211_registered_device *dev; 3726 struct cfg80211_registered_device *rdev;
3679 struct wireless_dev *wdev; 3727 struct wireless_dev *wdev;
3680 u8 mac_addr[ETH_ALEN]; 3728 u8 mac_addr[ETH_ALEN];
3681 int sta_idx = cb->args[2]; 3729 int sta_idx = cb->args[2];
3682 int err; 3730 int err;
3683 3731
3684 err = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev); 3732 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
3685 if (err) 3733 if (err)
3686 return err; 3734 return err;
3687 3735
@@ -3690,14 +3738,14 @@ static int nl80211_dump_station(struct sk_buff *skb,
3690 goto out_err; 3738 goto out_err;
3691 } 3739 }
3692 3740
3693 if (!dev->ops->dump_station) { 3741 if (!rdev->ops->dump_station) {
3694 err = -EOPNOTSUPP; 3742 err = -EOPNOTSUPP;
3695 goto out_err; 3743 goto out_err;
3696 } 3744 }
3697 3745
3698 while (1) { 3746 while (1) {
3699 memset(&sinfo, 0, sizeof(sinfo)); 3747 memset(&sinfo, 0, sizeof(sinfo));
3700 err = rdev_dump_station(dev, wdev->netdev, sta_idx, 3748 err = rdev_dump_station(rdev, wdev->netdev, sta_idx,
3701 mac_addr, &sinfo); 3749 mac_addr, &sinfo);
3702 if (err == -ENOENT) 3750 if (err == -ENOENT)
3703 break; 3751 break;
@@ -3707,7 +3755,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
3707 if (nl80211_send_station(skb, 3755 if (nl80211_send_station(skb,
3708 NETLINK_CB(cb->skb).portid, 3756 NETLINK_CB(cb->skb).portid,
3709 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3757 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3710 dev, wdev->netdev, mac_addr, 3758 rdev, wdev->netdev, mac_addr,
3711 &sinfo) < 0) 3759 &sinfo) < 0)
3712 goto out; 3760 goto out;
3713 3761
@@ -3719,7 +3767,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
3719 cb->args[2] = sta_idx; 3767 cb->args[2] = sta_idx;
3720 err = skb->len; 3768 err = skb->len;
3721 out_err: 3769 out_err:
3722 nl80211_finish_wdev_dump(dev); 3770 nl80211_finish_wdev_dump(rdev);
3723 3771
3724 return err; 3772 return err;
3725} 3773}
@@ -4380,18 +4428,18 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
4380 struct netlink_callback *cb) 4428 struct netlink_callback *cb)
4381{ 4429{
4382 struct mpath_info pinfo; 4430 struct mpath_info pinfo;
4383 struct cfg80211_registered_device *dev; 4431 struct cfg80211_registered_device *rdev;
4384 struct wireless_dev *wdev; 4432 struct wireless_dev *wdev;
4385 u8 dst[ETH_ALEN]; 4433 u8 dst[ETH_ALEN];
4386 u8 next_hop[ETH_ALEN]; 4434 u8 next_hop[ETH_ALEN];
4387 int path_idx = cb->args[2]; 4435 int path_idx = cb->args[2];
4388 int err; 4436 int err;
4389 4437
4390 err = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev); 4438 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
4391 if (err) 4439 if (err)
4392 return err; 4440 return err;
4393 4441
4394 if (!dev->ops->dump_mpath) { 4442 if (!rdev->ops->dump_mpath) {
4395 err = -EOPNOTSUPP; 4443 err = -EOPNOTSUPP;
4396 goto out_err; 4444 goto out_err;
4397 } 4445 }
@@ -4402,7 +4450,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
4402 } 4450 }
4403 4451
4404 while (1) { 4452 while (1) {
4405 err = rdev_dump_mpath(dev, wdev->netdev, path_idx, dst, 4453 err = rdev_dump_mpath(rdev, wdev->netdev, path_idx, dst,
4406 next_hop, &pinfo); 4454 next_hop, &pinfo);
4407 if (err == -ENOENT) 4455 if (err == -ENOENT)
4408 break; 4456 break;
@@ -4423,7 +4471,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
4423 cb->args[2] = path_idx; 4471 cb->args[2] = path_idx;
4424 err = skb->len; 4472 err = skb->len;
4425 out_err: 4473 out_err:
4426 nl80211_finish_wdev_dump(dev); 4474 nl80211_finish_wdev_dump(rdev);
4427 return err; 4475 return err;
4428} 4476}
4429 4477
@@ -4663,7 +4711,6 @@ static int parse_reg_rule(struct nlattr *tb[],
4663 4711
4664static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info) 4712static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
4665{ 4713{
4666 int r;
4667 char *data = NULL; 4714 char *data = NULL;
4668 enum nl80211_user_reg_hint_type user_reg_hint_type; 4715 enum nl80211_user_reg_hint_type user_reg_hint_type;
4669 4716
@@ -4676,11 +4723,6 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
4676 if (unlikely(!rcu_access_pointer(cfg80211_regdomain))) 4723 if (unlikely(!rcu_access_pointer(cfg80211_regdomain)))
4677 return -EINPROGRESS; 4724 return -EINPROGRESS;
4678 4725
4679 if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
4680 return -EINVAL;
4681
4682 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
4683
4684 if (info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]) 4726 if (info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE])
4685 user_reg_hint_type = 4727 user_reg_hint_type =
4686 nla_get_u32(info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]); 4728 nla_get_u32(info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]);
@@ -4690,14 +4732,16 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
4690 switch (user_reg_hint_type) { 4732 switch (user_reg_hint_type) {
4691 case NL80211_USER_REG_HINT_USER: 4733 case NL80211_USER_REG_HINT_USER:
4692 case NL80211_USER_REG_HINT_CELL_BASE: 4734 case NL80211_USER_REG_HINT_CELL_BASE:
4693 break; 4735 if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
4736 return -EINVAL;
4737
4738 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
4739 return regulatory_hint_user(data, user_reg_hint_type);
4740 case NL80211_USER_REG_HINT_INDOOR:
4741 return regulatory_hint_indoor_user();
4694 default: 4742 default:
4695 return -EINVAL; 4743 return -EINVAL;
4696 } 4744 }
4697
4698 r = regulatory_hint_user(data, user_reg_hint_type);
4699
4700 return r;
4701} 4745}
4702 4746
4703static int nl80211_get_mesh_config(struct sk_buff *skb, 4747static int nl80211_get_mesh_config(struct sk_buff *skb,
@@ -5796,7 +5840,8 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
5796 if (wdev->cac_started) 5840 if (wdev->cac_started)
5797 return -EBUSY; 5841 return -EBUSY;
5798 5842
5799 err = cfg80211_chandef_dfs_required(wdev->wiphy, &chandef); 5843 err = cfg80211_chandef_dfs_required(wdev->wiphy, &chandef,
5844 wdev->iftype);
5800 if (err < 0) 5845 if (err < 0)
5801 return err; 5846 return err;
5802 5847
@@ -5809,12 +5854,6 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
5809 if (!rdev->ops->start_radar_detection) 5854 if (!rdev->ops->start_radar_detection)
5810 return -EOPNOTSUPP; 5855 return -EOPNOTSUPP;
5811 5856
5812 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
5813 chandef.chan, CHAN_MODE_SHARED,
5814 BIT(chandef.width));
5815 if (err)
5816 return err;
5817
5818 cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef); 5857 cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef);
5819 if (WARN_ON(!cac_time_ms)) 5858 if (WARN_ON(!cac_time_ms))
5820 cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; 5859 cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
@@ -5843,6 +5882,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
5843 u8 radar_detect_width = 0; 5882 u8 radar_detect_width = 0;
5844 int err; 5883 int err;
5845 bool need_new_beacon = false; 5884 bool need_new_beacon = false;
5885 int len, i;
5846 5886
5847 if (!rdev->ops->channel_switch || 5887 if (!rdev->ops->channel_switch ||
5848 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) 5888 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
@@ -5901,26 +5941,55 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
5901 if (!csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]) 5941 if (!csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON])
5902 return -EINVAL; 5942 return -EINVAL;
5903 5943
5904 params.counter_offset_beacon = 5944 len = nla_len(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]);
5905 nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]); 5945 if (!len || (len % sizeof(u16)))
5906 if (params.counter_offset_beacon >= params.beacon_csa.tail_len)
5907 return -EINVAL; 5946 return -EINVAL;
5908 5947
5909 /* sanity check - counters should be the same */ 5948 params.n_counter_offsets_beacon = len / sizeof(u16);
5910 if (params.beacon_csa.tail[params.counter_offset_beacon] != 5949 if (rdev->wiphy.max_num_csa_counters &&
5911 params.count) 5950 (params.n_counter_offsets_beacon >
5951 rdev->wiphy.max_num_csa_counters))
5912 return -EINVAL; 5952 return -EINVAL;
5913 5953
5954 params.counter_offsets_beacon =
5955 nla_data(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]);
5956
5957 /* sanity checks - counters should fit and be the same */
5958 for (i = 0; i < params.n_counter_offsets_beacon; i++) {
5959 u16 offset = params.counter_offsets_beacon[i];
5960
5961 if (offset >= params.beacon_csa.tail_len)
5962 return -EINVAL;
5963
5964 if (params.beacon_csa.tail[offset] != params.count)
5965 return -EINVAL;
5966 }
5967
5914 if (csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]) { 5968 if (csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]) {
5915 params.counter_offset_presp = 5969 len = nla_len(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]);
5916 nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]); 5970 if (!len || (len % sizeof(u16)))
5917 if (params.counter_offset_presp >=
5918 params.beacon_csa.probe_resp_len)
5919 return -EINVAL; 5971 return -EINVAL;
5920 5972
5921 if (params.beacon_csa.probe_resp[params.counter_offset_presp] != 5973 params.n_counter_offsets_presp = len / sizeof(u16);
5922 params.count) 5974 if (rdev->wiphy.max_num_csa_counters &&
5975 (params.n_counter_offsets_beacon >
5976 rdev->wiphy.max_num_csa_counters))
5923 return -EINVAL; 5977 return -EINVAL;
5978
5979 params.counter_offsets_presp =
5980 nla_data(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]);
5981
5982 /* sanity checks - counters should fit and be the same */
5983 for (i = 0; i < params.n_counter_offsets_presp; i++) {
5984 u16 offset = params.counter_offsets_presp[i];
5985
5986 if (offset >= params.beacon_csa.probe_resp_len)
5987 return -EINVAL;
5988
5989 if (params.beacon_csa.probe_resp[offset] !=
5990 params.count)
5991 return -EINVAL;
5992 }
5924 } 5993 }
5925 5994
5926skip_beacons: 5995skip_beacons:
@@ -5928,27 +5997,25 @@ skip_beacons:
5928 if (err) 5997 if (err)
5929 return err; 5998 return err;
5930 5999
5931 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef)) 6000 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef,
6001 wdev->iftype))
5932 return -EINVAL; 6002 return -EINVAL;
5933 6003
5934 switch (dev->ieee80211_ptr->iftype) { 6004 err = cfg80211_chandef_dfs_required(wdev->wiphy,
5935 case NL80211_IFTYPE_AP: 6005 &params.chandef,
5936 case NL80211_IFTYPE_P2P_GO: 6006 wdev->iftype);
5937 case NL80211_IFTYPE_ADHOC: 6007 if (err < 0)
5938 case NL80211_IFTYPE_MESH_POINT: 6008 return err;
5939 err = cfg80211_chandef_dfs_required(wdev->wiphy, 6009
5940 &params.chandef); 6010 if (err > 0) {
5941 if (err < 0) 6011 radar_detect_width = BIT(params.chandef.width);
5942 return err; 6012 params.radar_required = true;
5943 if (err) {
5944 radar_detect_width = BIT(params.chandef.width);
5945 params.radar_required = true;
5946 }
5947 break;
5948 default:
5949 break;
5950 } 6013 }
5951 6014
6015 /* TODO: I left this here for now. With channel switch, the
6016 * verification is a bit more complicated, because we only do
6017 * it later when the channel switch really happens.
6018 */
5952 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype, 6019 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
5953 params.chandef.chan, 6020 params.chandef.chan,
5954 CHAN_MODE_SHARED, 6021 CHAN_MODE_SHARED,
@@ -6175,12 +6242,12 @@ static int nl80211_dump_survey(struct sk_buff *skb,
6175 struct netlink_callback *cb) 6242 struct netlink_callback *cb)
6176{ 6243{
6177 struct survey_info survey; 6244 struct survey_info survey;
6178 struct cfg80211_registered_device *dev; 6245 struct cfg80211_registered_device *rdev;
6179 struct wireless_dev *wdev; 6246 struct wireless_dev *wdev;
6180 int survey_idx = cb->args[2]; 6247 int survey_idx = cb->args[2];
6181 int res; 6248 int res;
6182 6249
6183 res = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev); 6250 res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
6184 if (res) 6251 if (res)
6185 return res; 6252 return res;
6186 6253
@@ -6189,7 +6256,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
6189 goto out_err; 6256 goto out_err;
6190 } 6257 }
6191 6258
6192 if (!dev->ops->dump_survey) { 6259 if (!rdev->ops->dump_survey) {
6193 res = -EOPNOTSUPP; 6260 res = -EOPNOTSUPP;
6194 goto out_err; 6261 goto out_err;
6195 } 6262 }
@@ -6197,7 +6264,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
6197 while (1) { 6264 while (1) {
6198 struct ieee80211_channel *chan; 6265 struct ieee80211_channel *chan;
6199 6266
6200 res = rdev_dump_survey(dev, wdev->netdev, survey_idx, &survey); 6267 res = rdev_dump_survey(rdev, wdev->netdev, survey_idx, &survey);
6201 if (res == -ENOENT) 6268 if (res == -ENOENT)
6202 break; 6269 break;
6203 if (res) 6270 if (res)
@@ -6209,7 +6276,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
6209 goto out; 6276 goto out;
6210 } 6277 }
6211 6278
6212 chan = ieee80211_get_channel(&dev->wiphy, 6279 chan = ieee80211_get_channel(&rdev->wiphy,
6213 survey.channel->center_freq); 6280 survey.channel->center_freq);
6214 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) { 6281 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
6215 survey_idx++; 6282 survey_idx++;
@@ -6228,7 +6295,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
6228 cb->args[2] = survey_idx; 6295 cb->args[2] = survey_idx;
6229 res = skb->len; 6296 res = skb->len;
6230 out_err: 6297 out_err:
6231 nl80211_finish_wdev_dump(dev); 6298 nl80211_finish_wdev_dump(rdev);
6232 return res; 6299 return res;
6233} 6300}
6234 6301
@@ -6704,7 +6771,8 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
6704 if (err) 6771 if (err)
6705 return err; 6772 return err;
6706 6773
6707 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef)) 6774 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef,
6775 NL80211_IFTYPE_ADHOC))
6708 return -EINVAL; 6776 return -EINVAL;
6709 6777
6710 switch (ibss.chandef.width) { 6778 switch (ibss.chandef.width) {
@@ -6879,7 +6947,7 @@ struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
6879 int vendor_event_idx, 6947 int vendor_event_idx,
6880 int approxlen, gfp_t gfp) 6948 int approxlen, gfp_t gfp)
6881{ 6949{
6882 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 6950 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
6883 const struct nl80211_vendor_cmd_info *info; 6951 const struct nl80211_vendor_cmd_info *info;
6884 6952
6885 switch (cmd) { 6953 switch (cmd) {
@@ -7767,6 +7835,27 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
7767 if (!chandef.chan && params.offchan) 7835 if (!chandef.chan && params.offchan)
7768 return -EINVAL; 7836 return -EINVAL;
7769 7837
7838 params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]);
7839 params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]);
7840
7841 if (info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]) {
7842 int len = nla_len(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]);
7843 int i;
7844
7845 if (len % sizeof(u16))
7846 return -EINVAL;
7847
7848 params.n_csa_offsets = len / sizeof(u16);
7849 params.csa_offsets =
7850 nla_data(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]);
7851
7852 /* check that all the offsets fit the frame */
7853 for (i = 0; i < params.n_csa_offsets; i++) {
7854 if (params.csa_offsets[i] >= params.len)
7855 return -EINVAL;
7856 }
7857 }
7858
7770 if (!params.dont_wait_for_ack) { 7859 if (!params.dont_wait_for_ack) {
7771 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 7860 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
7772 if (!msg) 7861 if (!msg)
@@ -7780,8 +7869,6 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
7780 } 7869 }
7781 } 7870 }
7782 7871
7783 params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]);
7784 params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]);
7785 params.chan = chandef.chan; 7872 params.chan = chandef.chan;
7786 err = cfg80211_mlme_mgmt_tx(rdev, wdev, &params, &cookie); 7873 err = cfg80211_mlme_mgmt_tx(rdev, wdev, &params, &cookie);
7787 if (err) 7874 if (err)
@@ -8478,6 +8565,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
8478 8565
8479 nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN], 8566 nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
8480 rem) { 8567 rem) {
8568 u8 *mask_pat;
8569
8481 nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat), 8570 nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
8482 nla_len(pat), NULL); 8571 nla_len(pat), NULL);
8483 err = -EINVAL; 8572 err = -EINVAL;
@@ -8501,19 +8590,18 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
8501 goto error; 8590 goto error;
8502 new_triggers.patterns[i].pkt_offset = pkt_offset; 8591 new_triggers.patterns[i].pkt_offset = pkt_offset;
8503 8592
8504 new_triggers.patterns[i].mask = 8593 mask_pat = kmalloc(mask_len + pat_len, GFP_KERNEL);
8505 kmalloc(mask_len + pat_len, GFP_KERNEL); 8594 if (!mask_pat) {
8506 if (!new_triggers.patterns[i].mask) {
8507 err = -ENOMEM; 8595 err = -ENOMEM;
8508 goto error; 8596 goto error;
8509 } 8597 }
8510 new_triggers.patterns[i].pattern = 8598 new_triggers.patterns[i].mask = mask_pat;
8511 new_triggers.patterns[i].mask + mask_len; 8599 memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_MASK]),
8512 memcpy(new_triggers.patterns[i].mask,
8513 nla_data(pat_tb[NL80211_PKTPAT_MASK]),
8514 mask_len); 8600 mask_len);
8601 mask_pat += mask_len;
8602 new_triggers.patterns[i].pattern = mask_pat;
8515 new_triggers.patterns[i].pattern_len = pat_len; 8603 new_triggers.patterns[i].pattern_len = pat_len;
8516 memcpy(new_triggers.patterns[i].pattern, 8604 memcpy(mask_pat,
8517 nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), 8605 nla_data(pat_tb[NL80211_PKTPAT_PATTERN]),
8518 pat_len); 8606 pat_len);
8519 i++; 8607 i++;
@@ -8705,6 +8793,8 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
8705 8793
8706 nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN], 8794 nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN],
8707 rem) { 8795 rem) {
8796 u8 *mask_pat;
8797
8708 nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat), 8798 nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
8709 nla_len(pat), NULL); 8799 nla_len(pat), NULL);
8710 if (!pat_tb[NL80211_PKTPAT_MASK] || 8800 if (!pat_tb[NL80211_PKTPAT_MASK] ||
@@ -8726,17 +8816,19 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
8726 return -EINVAL; 8816 return -EINVAL;
8727 new_rule->patterns[i].pkt_offset = pkt_offset; 8817 new_rule->patterns[i].pkt_offset = pkt_offset;
8728 8818
8729 new_rule->patterns[i].mask = 8819 mask_pat = kmalloc(mask_len + pat_len, GFP_KERNEL);
8730 kmalloc(mask_len + pat_len, GFP_KERNEL); 8820 if (!mask_pat)
8731 if (!new_rule->patterns[i].mask)
8732 return -ENOMEM; 8821 return -ENOMEM;
8733 new_rule->patterns[i].pattern = 8822
8734 new_rule->patterns[i].mask + mask_len; 8823 new_rule->patterns[i].mask = mask_pat;
8735 memcpy(new_rule->patterns[i].mask, 8824 memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_MASK]),
8736 nla_data(pat_tb[NL80211_PKTPAT_MASK]), mask_len); 8825 mask_len);
8826
8827 mask_pat += mask_len;
8828 new_rule->patterns[i].pattern = mask_pat;
8737 new_rule->patterns[i].pattern_len = pat_len; 8829 new_rule->patterns[i].pattern_len = pat_len;
8738 memcpy(new_rule->patterns[i].pattern, 8830 memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_PATTERN]),
8739 nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), pat_len); 8831 pat_len);
8740 i++; 8832 i++;
8741 } 8833 }
8742 8834
@@ -8981,9 +9073,8 @@ static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
8981 if (wdev->p2p_started) 9073 if (wdev->p2p_started)
8982 return 0; 9074 return 0;
8983 9075
8984 err = cfg80211_can_add_interface(rdev, wdev->iftype); 9076 if (rfkill_blocked(rdev->rfkill))
8985 if (err) 9077 return -ERFKILL;
8986 return err;
8987 9078
8988 err = rdev_start_p2p_device(rdev, wdev); 9079 err = rdev_start_p2p_device(rdev, wdev);
8989 if (err) 9080 if (err)
@@ -9192,7 +9283,7 @@ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
9192 enum nl80211_attrs attr, 9283 enum nl80211_attrs attr,
9193 int approxlen) 9284 int approxlen)
9194{ 9285{
9195 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 9286 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
9196 9287
9197 if (WARN_ON(!rdev->cur_cmd_info)) 9288 if (WARN_ON(!rdev->cur_cmd_info))
9198 return NULL; 9289 return NULL;
@@ -9316,7 +9407,7 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
9316 } 9407 }
9317 9408
9318 dev = wdev->netdev; 9409 dev = wdev->netdev;
9319 rdev = wiphy_to_dev(wdev->wiphy); 9410 rdev = wiphy_to_rdev(wdev->wiphy);
9320 9411
9321 if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) { 9412 if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) {
9322 if (!dev) { 9413 if (!dev) {
@@ -10017,16 +10108,20 @@ static const struct genl_ops nl80211_ops[] = {
10017 10108
10018/* notification functions */ 10109/* notification functions */
10019 10110
10020void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev) 10111void nl80211_notify_wiphy(struct cfg80211_registered_device *rdev,
10112 enum nl80211_commands cmd)
10021{ 10113{
10022 struct sk_buff *msg; 10114 struct sk_buff *msg;
10023 struct nl80211_dump_wiphy_state state = {}; 10115 struct nl80211_dump_wiphy_state state = {};
10024 10116
10117 WARN_ON(cmd != NL80211_CMD_NEW_WIPHY &&
10118 cmd != NL80211_CMD_DEL_WIPHY);
10119
10025 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 10120 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
10026 if (!msg) 10121 if (!msg)
10027 return; 10122 return;
10028 10123
10029 if (nl80211_send_wiphy(rdev, msg, 0, 0, 0, &state) < 0) { 10124 if (nl80211_send_wiphy(rdev, cmd, msg, 0, 0, 0, &state) < 0) {
10030 nlmsg_free(msg); 10125 nlmsg_free(msg);
10031 return; 10126 return;
10032 } 10127 }
@@ -10345,7 +10440,7 @@ void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf,
10345{ 10440{
10346 struct wireless_dev *wdev = dev->ieee80211_ptr; 10441 struct wireless_dev *wdev = dev->ieee80211_ptr;
10347 struct wiphy *wiphy = wdev->wiphy; 10442 struct wiphy *wiphy = wdev->wiphy;
10348 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 10443 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
10349 const struct ieee80211_mgmt *mgmt = (void *)buf; 10444 const struct ieee80211_mgmt *mgmt = (void *)buf;
10350 u32 cmd; 10445 u32 cmd;
10351 10446
@@ -10567,7 +10662,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
10567 const u8* ie, u8 ie_len, gfp_t gfp) 10662 const u8* ie, u8 ie_len, gfp_t gfp)
10568{ 10663{
10569 struct wireless_dev *wdev = dev->ieee80211_ptr; 10664 struct wireless_dev *wdev = dev->ieee80211_ptr;
10570 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 10665 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
10571 struct sk_buff *msg; 10666 struct sk_buff *msg;
10572 void *hdr; 10667 void *hdr;
10573 10668
@@ -10747,7 +10842,7 @@ void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
10747 unsigned int duration, gfp_t gfp) 10842 unsigned int duration, gfp_t gfp)
10748{ 10843{
10749 struct wiphy *wiphy = wdev->wiphy; 10844 struct wiphy *wiphy = wdev->wiphy;
10750 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 10845 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
10751 10846
10752 trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration); 10847 trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration);
10753 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL, 10848 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
@@ -10761,7 +10856,7 @@ void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
10761 gfp_t gfp) 10856 gfp_t gfp)
10762{ 10857{
10763 struct wiphy *wiphy = wdev->wiphy; 10858 struct wiphy *wiphy = wdev->wiphy;
10764 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 10859 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
10765 10860
10766 trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan); 10861 trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan);
10767 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, 10862 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
@@ -10773,7 +10868,7 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
10773 struct station_info *sinfo, gfp_t gfp) 10868 struct station_info *sinfo, gfp_t gfp)
10774{ 10869{
10775 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 10870 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
10776 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 10871 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
10777 struct sk_buff *msg; 10872 struct sk_buff *msg;
10778 10873
10779 trace_cfg80211_new_sta(dev, mac_addr, sinfo); 10874 trace_cfg80211_new_sta(dev, mac_addr, sinfo);
@@ -10796,7 +10891,7 @@ EXPORT_SYMBOL(cfg80211_new_sta);
10796void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp) 10891void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
10797{ 10892{
10798 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 10893 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
10799 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 10894 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
10800 struct sk_buff *msg; 10895 struct sk_buff *msg;
10801 void *hdr; 10896 void *hdr;
10802 10897
@@ -10833,7 +10928,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
10833 gfp_t gfp) 10928 gfp_t gfp)
10834{ 10929{
10835 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 10930 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
10836 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 10931 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
10837 struct sk_buff *msg; 10932 struct sk_buff *msg;
10838 void *hdr; 10933 void *hdr;
10839 10934
@@ -10868,7 +10963,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
10868 const u8 *addr, gfp_t gfp) 10963 const u8 *addr, gfp_t gfp)
10869{ 10964{
10870 struct wireless_dev *wdev = dev->ieee80211_ptr; 10965 struct wireless_dev *wdev = dev->ieee80211_ptr;
10871 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 10966 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
10872 struct sk_buff *msg; 10967 struct sk_buff *msg;
10873 void *hdr; 10968 void *hdr;
10874 u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid); 10969 u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid);
@@ -10988,7 +11083,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
10988 const u8 *buf, size_t len, bool ack, gfp_t gfp) 11083 const u8 *buf, size_t len, bool ack, gfp_t gfp)
10989{ 11084{
10990 struct wiphy *wiphy = wdev->wiphy; 11085 struct wiphy *wiphy = wdev->wiphy;
10991 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11086 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
10992 struct net_device *netdev = wdev->netdev; 11087 struct net_device *netdev = wdev->netdev;
10993 struct sk_buff *msg; 11088 struct sk_buff *msg;
10994 void *hdr; 11089 void *hdr;
@@ -11032,7 +11127,7 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
11032{ 11127{
11033 struct wireless_dev *wdev = dev->ieee80211_ptr; 11128 struct wireless_dev *wdev = dev->ieee80211_ptr;
11034 struct wiphy *wiphy = wdev->wiphy; 11129 struct wiphy *wiphy = wdev->wiphy;
11035 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11130 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11036 struct sk_buff *msg; 11131 struct sk_buff *msg;
11037 struct nlattr *pinfoattr; 11132 struct nlattr *pinfoattr;
11038 void *hdr; 11133 void *hdr;
@@ -11124,7 +11219,7 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
11124{ 11219{
11125 struct wireless_dev *wdev = dev->ieee80211_ptr; 11220 struct wireless_dev *wdev = dev->ieee80211_ptr;
11126 struct wiphy *wiphy = wdev->wiphy; 11221 struct wiphy *wiphy = wdev->wiphy;
11127 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11222 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11128 11223
11129 trace_cfg80211_gtk_rekey_notify(dev, bssid); 11224 trace_cfg80211_gtk_rekey_notify(dev, bssid);
11130 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp); 11225 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
@@ -11182,7 +11277,7 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
11182{ 11277{
11183 struct wireless_dev *wdev = dev->ieee80211_ptr; 11278 struct wireless_dev *wdev = dev->ieee80211_ptr;
11184 struct wiphy *wiphy = wdev->wiphy; 11279 struct wiphy *wiphy = wdev->wiphy;
11185 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11280 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11186 11281
11187 trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth); 11282 trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth);
11188 nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp); 11283 nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
@@ -11229,7 +11324,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
11229{ 11324{
11230 struct wireless_dev *wdev = dev->ieee80211_ptr; 11325 struct wireless_dev *wdev = dev->ieee80211_ptr;
11231 struct wiphy *wiphy = wdev->wiphy; 11326 struct wiphy *wiphy = wdev->wiphy;
11232 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11327 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11233 11328
11234 ASSERT_WDEV_LOCK(wdev); 11329 ASSERT_WDEV_LOCK(wdev);
11235 11330
@@ -11253,7 +11348,7 @@ void cfg80211_cqm_txe_notify(struct net_device *dev,
11253{ 11348{
11254 struct wireless_dev *wdev = dev->ieee80211_ptr; 11349 struct wireless_dev *wdev = dev->ieee80211_ptr;
11255 struct wiphy *wiphy = wdev->wiphy; 11350 struct wiphy *wiphy = wdev->wiphy;
11256 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11351 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11257 struct sk_buff *msg; 11352 struct sk_buff *msg;
11258 struct nlattr *pinfoattr; 11353 struct nlattr *pinfoattr;
11259 void *hdr; 11354 void *hdr;
@@ -11353,7 +11448,7 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev,
11353{ 11448{
11354 struct wireless_dev *wdev = dev->ieee80211_ptr; 11449 struct wireless_dev *wdev = dev->ieee80211_ptr;
11355 struct wiphy *wiphy = wdev->wiphy; 11450 struct wiphy *wiphy = wdev->wiphy;
11356 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11451 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11357 struct sk_buff *msg; 11452 struct sk_buff *msg;
11358 struct nlattr *pinfoattr; 11453 struct nlattr *pinfoattr;
11359 void *hdr; 11454 void *hdr;
@@ -11400,7 +11495,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
11400 u64 cookie, bool acked, gfp_t gfp) 11495 u64 cookie, bool acked, gfp_t gfp)
11401{ 11496{
11402 struct wireless_dev *wdev = dev->ieee80211_ptr; 11497 struct wireless_dev *wdev = dev->ieee80211_ptr;
11403 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 11498 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
11404 struct sk_buff *msg; 11499 struct sk_buff *msg;
11405 void *hdr; 11500 void *hdr;
11406 11501
@@ -11440,7 +11535,7 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
11440 const u8 *frame, size_t len, 11535 const u8 *frame, size_t len,
11441 int freq, int sig_dbm) 11536 int freq, int sig_dbm)
11442{ 11537{
11443 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11538 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11444 struct sk_buff *msg; 11539 struct sk_buff *msg;
11445 void *hdr; 11540 void *hdr;
11446 struct cfg80211_beacon_registration *reg; 11541 struct cfg80211_beacon_registration *reg;
@@ -11487,7 +11582,7 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
11487 struct cfg80211_wowlan_wakeup *wakeup, 11582 struct cfg80211_wowlan_wakeup *wakeup,
11488 gfp_t gfp) 11583 gfp_t gfp)
11489{ 11584{
11490 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 11585 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
11491 struct sk_buff *msg; 11586 struct sk_buff *msg;
11492 void *hdr; 11587 void *hdr;
11493 int size = 200; 11588 int size = 200;
@@ -11597,7 +11692,7 @@ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
11597 u16 reason_code, gfp_t gfp) 11692 u16 reason_code, gfp_t gfp)
11598{ 11693{
11599 struct wireless_dev *wdev = dev->ieee80211_ptr; 11694 struct wireless_dev *wdev = dev->ieee80211_ptr;
11600 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 11695 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
11601 struct sk_buff *msg; 11696 struct sk_buff *msg;
11602 void *hdr; 11697 void *hdr;
11603 11698
@@ -11649,9 +11744,15 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
11649 rcu_read_lock(); 11744 rcu_read_lock();
11650 11745
11651 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { 11746 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
11652 list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) 11747 bool schedule_destroy_work = false;
11748
11749 list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) {
11653 cfg80211_mlme_unregister_socket(wdev, notify->portid); 11750 cfg80211_mlme_unregister_socket(wdev, notify->portid);
11654 11751
11752 if (wdev->owner_nlportid == notify->portid)
11753 schedule_destroy_work = true;
11754 }
11755
11655 spin_lock_bh(&rdev->beacon_registrations_lock); 11756 spin_lock_bh(&rdev->beacon_registrations_lock);
11656 list_for_each_entry_safe(reg, tmp, &rdev->beacon_registrations, 11757 list_for_each_entry_safe(reg, tmp, &rdev->beacon_registrations,
11657 list) { 11758 list) {
@@ -11662,11 +11763,24 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
11662 } 11763 }
11663 } 11764 }
11664 spin_unlock_bh(&rdev->beacon_registrations_lock); 11765 spin_unlock_bh(&rdev->beacon_registrations_lock);
11766
11767 if (schedule_destroy_work) {
11768 struct cfg80211_iface_destroy *destroy;
11769
11770 destroy = kzalloc(sizeof(*destroy), GFP_ATOMIC);
11771 if (destroy) {
11772 destroy->nlportid = notify->portid;
11773 spin_lock(&rdev->destroy_list_lock);
11774 list_add(&destroy->list, &rdev->destroy_list);
11775 spin_unlock(&rdev->destroy_list_lock);
11776 schedule_work(&rdev->destroy_work);
11777 }
11778 }
11665 } 11779 }
11666 11780
11667 rcu_read_unlock(); 11781 rcu_read_unlock();
11668 11782
11669 return NOTIFY_DONE; 11783 return NOTIFY_OK;
11670} 11784}
11671 11785
11672static struct notifier_block nl80211_netlink_notifier = { 11786static struct notifier_block nl80211_netlink_notifier = {
@@ -11677,7 +11791,7 @@ void cfg80211_ft_event(struct net_device *netdev,
11677 struct cfg80211_ft_event_params *ft_event) 11791 struct cfg80211_ft_event_params *ft_event)
11678{ 11792{
11679 struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy; 11793 struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy;
11680 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11794 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11681 struct sk_buff *msg; 11795 struct sk_buff *msg;
11682 void *hdr; 11796 void *hdr;
11683 11797
@@ -11724,7 +11838,7 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp)
11724 void *hdr; 11838 void *hdr;
11725 u32 nlportid; 11839 u32 nlportid;
11726 11840
11727 rdev = wiphy_to_dev(wdev->wiphy); 11841 rdev = wiphy_to_rdev(wdev->wiphy);
11728 if (!rdev->crit_proto_nlportid) 11842 if (!rdev->crit_proto_nlportid)
11729 return; 11843 return;
11730 11844
@@ -11759,7 +11873,7 @@ EXPORT_SYMBOL(cfg80211_crit_proto_stopped);
11759void nl80211_send_ap_stopped(struct wireless_dev *wdev) 11873void nl80211_send_ap_stopped(struct wireless_dev *wdev)
11760{ 11874{
11761 struct wiphy *wiphy = wdev->wiphy; 11875 struct wiphy *wiphy = wdev->wiphy;
11762 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11876 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11763 struct sk_buff *msg; 11877 struct sk_buff *msg;
11764 void *hdr; 11878 void *hdr;
11765 11879
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 1e6df9630f42..49c9a482dd12 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -5,7 +5,8 @@
5 5
6int nl80211_init(void); 6int nl80211_init(void);
7void nl80211_exit(void); 7void nl80211_exit(void);
8void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev); 8void nl80211_notify_wiphy(struct cfg80211_registered_device *rdev,
9 enum nl80211_commands cmd);
9void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, 10void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
10 struct wireless_dev *wdev); 11 struct wireless_dev *wdev);
11struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev, 12struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 74d97d33c938..d95bbe348138 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -199,7 +199,7 @@ static inline int rdev_change_station(struct cfg80211_registered_device *rdev,
199} 199}
200 200
201static inline int rdev_get_station(struct cfg80211_registered_device *rdev, 201static inline int rdev_get_station(struct cfg80211_registered_device *rdev,
202 struct net_device *dev, u8 *mac, 202 struct net_device *dev, const u8 *mac,
203 struct station_info *sinfo) 203 struct station_info *sinfo)
204{ 204{
205 int ret; 205 int ret;
@@ -950,4 +950,17 @@ static inline int rdev_set_qos_map(struct cfg80211_registered_device *rdev,
950 return ret; 950 return ret;
951} 951}
952 952
953static inline int
954rdev_set_ap_chanwidth(struct cfg80211_registered_device *rdev,
955 struct net_device *dev, struct cfg80211_chan_def *chandef)
956{
957 int ret;
958
959 trace_rdev_set_ap_chanwidth(&rdev->wiphy, dev, chandef);
960 ret = rdev->ops->set_ap_chanwidth(&rdev->wiphy, dev, chandef);
961 trace_rdev_return_int(&rdev->wiphy, ret);
962
963 return ret;
964}
965
953#endif /* __CFG80211_RDEV_OPS */ 966#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index f59aaac586f8..558b0e3a02d8 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -65,11 +65,26 @@
65#define REG_DBG_PRINT(args...) 65#define REG_DBG_PRINT(args...)
66#endif 66#endif
67 67
68/**
69 * enum reg_request_treatment - regulatory request treatment
70 *
71 * @REG_REQ_OK: continue processing the regulatory request
72 * @REG_REQ_IGNORE: ignore the regulatory request
73 * @REG_REQ_INTERSECT: the regulatory domain resulting from this request should
74 * be intersected with the current one.
75 * @REG_REQ_ALREADY_SET: the regulatory request will not change the current
76 * regulatory settings, and no further processing is required.
77 * @REG_REQ_USER_HINT_HANDLED: a non alpha2 user hint was handled and no
78 * further processing is required, i.e., not need to update last_request
79 * etc. This should be used for user hints that do not provide an alpha2
80 * but some other type of regulatory hint, i.e., indoor operation.
81 */
68enum reg_request_treatment { 82enum reg_request_treatment {
69 REG_REQ_OK, 83 REG_REQ_OK,
70 REG_REQ_IGNORE, 84 REG_REQ_IGNORE,
71 REG_REQ_INTERSECT, 85 REG_REQ_INTERSECT,
72 REG_REQ_ALREADY_SET, 86 REG_REQ_ALREADY_SET,
87 REG_REQ_USER_HINT_HANDLED,
73}; 88};
74 89
75static struct regulatory_request core_request_world = { 90static struct regulatory_request core_request_world = {
@@ -106,6 +121,14 @@ const struct ieee80211_regdomain __rcu *cfg80211_regdomain;
106 */ 121 */
107static int reg_num_devs_support_basehint; 122static int reg_num_devs_support_basehint;
108 123
124/*
125 * State variable indicating if the platform on which the devices
126 * are attached is operating in an indoor environment. The state variable
127 * is relevant for all registered devices.
128 * (protected by RTNL)
129 */
130static bool reg_is_indoor;
131
109static const struct ieee80211_regdomain *get_cfg80211_regdom(void) 132static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
110{ 133{
111 return rtnl_dereference(cfg80211_regdomain); 134 return rtnl_dereference(cfg80211_regdomain);
@@ -240,8 +263,16 @@ static char user_alpha2[2];
240module_param(ieee80211_regdom, charp, 0444); 263module_param(ieee80211_regdom, charp, 0444);
241MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); 264MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
242 265
243static void reg_free_request(struct regulatory_request *lr) 266static void reg_free_request(struct regulatory_request *request)
244{ 267{
268 if (request != get_last_request())
269 kfree(request);
270}
271
272static void reg_free_last_request(void)
273{
274 struct regulatory_request *lr = get_last_request();
275
245 if (lr != &core_request_world && lr) 276 if (lr != &core_request_world && lr)
246 kfree_rcu(lr, rcu_head); 277 kfree_rcu(lr, rcu_head);
247} 278}
@@ -254,7 +285,7 @@ static void reg_update_last_request(struct regulatory_request *request)
254 if (lr == request) 285 if (lr == request)
255 return; 286 return;
256 287
257 reg_free_request(lr); 288 reg_free_last_request();
258 rcu_assign_pointer(last_request, request); 289 rcu_assign_pointer(last_request, request);
259} 290}
260 291
@@ -873,6 +904,8 @@ static u32 map_regdom_flags(u32 rd_flags)
873 channel_flags |= IEEE80211_CHAN_RADAR; 904 channel_flags |= IEEE80211_CHAN_RADAR;
874 if (rd_flags & NL80211_RRF_NO_OFDM) 905 if (rd_flags & NL80211_RRF_NO_OFDM)
875 channel_flags |= IEEE80211_CHAN_NO_OFDM; 906 channel_flags |= IEEE80211_CHAN_NO_OFDM;
907 if (rd_flags & NL80211_RRF_NO_OUTDOOR)
908 channel_flags |= IEEE80211_CHAN_INDOOR_ONLY;
876 return channel_flags; 909 return channel_flags;
877} 910}
878 911
@@ -902,7 +935,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
902 if (!band_rule_found) 935 if (!band_rule_found)
903 band_rule_found = freq_in_rule_band(fr, center_freq); 936 band_rule_found = freq_in_rule_band(fr, center_freq);
904 937
905 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20)); 938 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(5));
906 939
907 if (band_rule_found && bw_fits) 940 if (band_rule_found && bw_fits)
908 return rr; 941 return rr;
@@ -986,10 +1019,10 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
986} 1019}
987#endif 1020#endif
988 1021
989/* 1022/* Find an ieee80211_reg_rule such that a 5MHz channel with frequency
990 * Note that right now we assume the desired channel bandwidth 1023 * chan->center_freq fits there.
991 * is always 20 MHz for each individual channel (HT40 uses 20 MHz 1024 * If there is no such reg_rule, disable the channel, otherwise set the
992 * per channel, the primary and the extension channel). 1025 * flags corresponding to the bandwidths allowed in the particular reg_rule
993 */ 1026 */
994static void handle_channel(struct wiphy *wiphy, 1027static void handle_channel(struct wiphy *wiphy,
995 enum nl80211_reg_initiator initiator, 1028 enum nl80211_reg_initiator initiator,
@@ -1050,8 +1083,12 @@ static void handle_channel(struct wiphy *wiphy,
1050 if (reg_rule->flags & NL80211_RRF_AUTO_BW) 1083 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1051 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); 1084 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1052 1085
1086 if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1087 bw_flags = IEEE80211_CHAN_NO_10MHZ;
1088 if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1089 bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1053 if (max_bandwidth_khz < MHZ_TO_KHZ(40)) 1090 if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1054 bw_flags = IEEE80211_CHAN_NO_HT40; 1091 bw_flags |= IEEE80211_CHAN_NO_HT40;
1055 if (max_bandwidth_khz < MHZ_TO_KHZ(80)) 1092 if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1056 bw_flags |= IEEE80211_CHAN_NO_80MHZ; 1093 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1057 if (max_bandwidth_khz < MHZ_TO_KHZ(160)) 1094 if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1071,6 +1108,13 @@ static void handle_channel(struct wiphy *wiphy,
1071 (int) MBI_TO_DBI(power_rule->max_antenna_gain); 1108 (int) MBI_TO_DBI(power_rule->max_antenna_gain);
1072 chan->max_reg_power = chan->max_power = chan->orig_mpwr = 1109 chan->max_reg_power = chan->max_power = chan->orig_mpwr =
1073 (int) MBM_TO_DBM(power_rule->max_eirp); 1110 (int) MBM_TO_DBM(power_rule->max_eirp);
1111
1112 if (chan->flags & IEEE80211_CHAN_RADAR) {
1113 chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
1114 if (reg_rule->dfs_cac_ms)
1115 chan->dfs_cac_ms = reg_rule->dfs_cac_ms;
1116 }
1117
1074 return; 1118 return;
1075 } 1119 }
1076 1120
@@ -1126,12 +1170,19 @@ static bool reg_request_cell_base(struct regulatory_request *request)
1126 return request->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE; 1170 return request->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE;
1127} 1171}
1128 1172
1173static bool reg_request_indoor(struct regulatory_request *request)
1174{
1175 if (request->initiator != NL80211_REGDOM_SET_BY_USER)
1176 return false;
1177 return request->user_reg_hint_type == NL80211_USER_REG_HINT_INDOOR;
1178}
1179
1129bool reg_last_request_cell_base(void) 1180bool reg_last_request_cell_base(void)
1130{ 1181{
1131 return reg_request_cell_base(get_last_request()); 1182 return reg_request_cell_base(get_last_request());
1132} 1183}
1133 1184
1134#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS 1185#ifdef CONFIG_CFG80211_REG_CELLULAR_HINTS
1135/* Core specific check */ 1186/* Core specific check */
1136static enum reg_request_treatment 1187static enum reg_request_treatment
1137reg_ignore_cell_hint(struct regulatory_request *pending_request) 1188reg_ignore_cell_hint(struct regulatory_request *pending_request)
@@ -1471,8 +1522,12 @@ static void handle_channel_custom(struct wiphy *wiphy,
1471 if (reg_rule->flags & NL80211_RRF_AUTO_BW) 1522 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1472 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); 1523 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1473 1524
1525 if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1526 bw_flags = IEEE80211_CHAN_NO_10MHZ;
1527 if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1528 bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1474 if (max_bandwidth_khz < MHZ_TO_KHZ(40)) 1529 if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1475 bw_flags = IEEE80211_CHAN_NO_HT40; 1530 bw_flags |= IEEE80211_CHAN_NO_HT40;
1476 if (max_bandwidth_khz < MHZ_TO_KHZ(80)) 1531 if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1477 bw_flags |= IEEE80211_CHAN_NO_80MHZ; 1532 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1478 if (max_bandwidth_khz < MHZ_TO_KHZ(160)) 1533 if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1568,6 +1623,11 @@ __reg_process_hint_user(struct regulatory_request *user_request)
1568{ 1623{
1569 struct regulatory_request *lr = get_last_request(); 1624 struct regulatory_request *lr = get_last_request();
1570 1625
1626 if (reg_request_indoor(user_request)) {
1627 reg_is_indoor = true;
1628 return REG_REQ_USER_HINT_HANDLED;
1629 }
1630
1571 if (reg_request_cell_base(user_request)) 1631 if (reg_request_cell_base(user_request))
1572 return reg_ignore_cell_hint(user_request); 1632 return reg_ignore_cell_hint(user_request);
1573 1633
@@ -1615,8 +1675,9 @@ reg_process_hint_user(struct regulatory_request *user_request)
1615 1675
1616 treatment = __reg_process_hint_user(user_request); 1676 treatment = __reg_process_hint_user(user_request);
1617 if (treatment == REG_REQ_IGNORE || 1677 if (treatment == REG_REQ_IGNORE ||
1618 treatment == REG_REQ_ALREADY_SET) { 1678 treatment == REG_REQ_ALREADY_SET ||
1619 kfree(user_request); 1679 treatment == REG_REQ_USER_HINT_HANDLED) {
1680 reg_free_request(user_request);
1620 return treatment; 1681 return treatment;
1621 } 1682 }
1622 1683
@@ -1676,14 +1737,15 @@ reg_process_hint_driver(struct wiphy *wiphy,
1676 case REG_REQ_OK: 1737 case REG_REQ_OK:
1677 break; 1738 break;
1678 case REG_REQ_IGNORE: 1739 case REG_REQ_IGNORE:
1679 kfree(driver_request); 1740 case REG_REQ_USER_HINT_HANDLED:
1741 reg_free_request(driver_request);
1680 return treatment; 1742 return treatment;
1681 case REG_REQ_INTERSECT: 1743 case REG_REQ_INTERSECT:
1682 /* fall through */ 1744 /* fall through */
1683 case REG_REQ_ALREADY_SET: 1745 case REG_REQ_ALREADY_SET:
1684 regd = reg_copy_regd(get_cfg80211_regdom()); 1746 regd = reg_copy_regd(get_cfg80211_regdom());
1685 if (IS_ERR(regd)) { 1747 if (IS_ERR(regd)) {
1686 kfree(driver_request); 1748 reg_free_request(driver_request);
1687 return REG_REQ_IGNORE; 1749 return REG_REQ_IGNORE;
1688 } 1750 }
1689 rcu_assign_pointer(wiphy->regd, regd); 1751 rcu_assign_pointer(wiphy->regd, regd);
@@ -1775,12 +1837,13 @@ reg_process_hint_country_ie(struct wiphy *wiphy,
1775 case REG_REQ_OK: 1837 case REG_REQ_OK:
1776 break; 1838 break;
1777 case REG_REQ_IGNORE: 1839 case REG_REQ_IGNORE:
1840 case REG_REQ_USER_HINT_HANDLED:
1778 /* fall through */ 1841 /* fall through */
1779 case REG_REQ_ALREADY_SET: 1842 case REG_REQ_ALREADY_SET:
1780 kfree(country_ie_request); 1843 reg_free_request(country_ie_request);
1781 return treatment; 1844 return treatment;
1782 case REG_REQ_INTERSECT: 1845 case REG_REQ_INTERSECT:
1783 kfree(country_ie_request); 1846 reg_free_request(country_ie_request);
1784 /* 1847 /*
1785 * This doesn't happen yet, not sure we 1848 * This doesn't happen yet, not sure we
1786 * ever want to support it for this case. 1849 * ever want to support it for this case.
@@ -1813,7 +1876,8 @@ static void reg_process_hint(struct regulatory_request *reg_request)
1813 case NL80211_REGDOM_SET_BY_USER: 1876 case NL80211_REGDOM_SET_BY_USER:
1814 treatment = reg_process_hint_user(reg_request); 1877 treatment = reg_process_hint_user(reg_request);
1815 if (treatment == REG_REQ_IGNORE || 1878 if (treatment == REG_REQ_IGNORE ||
1816 treatment == REG_REQ_ALREADY_SET) 1879 treatment == REG_REQ_ALREADY_SET ||
1880 treatment == REG_REQ_USER_HINT_HANDLED)
1817 return; 1881 return;
1818 queue_delayed_work(system_power_efficient_wq, 1882 queue_delayed_work(system_power_efficient_wq,
1819 &reg_timeout, msecs_to_jiffies(3142)); 1883 &reg_timeout, msecs_to_jiffies(3142));
@@ -1841,7 +1905,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
1841 return; 1905 return;
1842 1906
1843out_free: 1907out_free:
1844 kfree(reg_request); 1908 reg_free_request(reg_request);
1845} 1909}
1846 1910
1847/* 1911/*
@@ -1857,7 +1921,7 @@ static void reg_process_pending_hints(void)
1857 1921
1858 /* When last_request->processed becomes true this will be rescheduled */ 1922 /* When last_request->processed becomes true this will be rescheduled */
1859 if (lr && !lr->processed) { 1923 if (lr && !lr->processed) {
1860 REG_DBG_PRINT("Pending regulatory request, waiting for it to be processed...\n"); 1924 reg_process_hint(lr);
1861 return; 1925 return;
1862 } 1926 }
1863 1927
@@ -1967,6 +2031,22 @@ int regulatory_hint_user(const char *alpha2,
1967 return 0; 2031 return 0;
1968} 2032}
1969 2033
2034int regulatory_hint_indoor_user(void)
2035{
2036 struct regulatory_request *request;
2037
2038 request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
2039 if (!request)
2040 return -ENOMEM;
2041
2042 request->wiphy_idx = WIPHY_IDX_INVALID;
2043 request->initiator = NL80211_REGDOM_SET_BY_USER;
2044 request->user_reg_hint_type = NL80211_USER_REG_HINT_INDOOR;
2045 queue_regulatory_request(request);
2046
2047 return 0;
2048}
2049
1970/* Driver hints */ 2050/* Driver hints */
1971int regulatory_hint(struct wiphy *wiphy, const char *alpha2) 2051int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
1972{ 2052{
@@ -2134,6 +2214,8 @@ static void restore_regulatory_settings(bool reset_user)
2134 2214
2135 ASSERT_RTNL(); 2215 ASSERT_RTNL();
2136 2216
2217 reg_is_indoor = false;
2218
2137 reset_regdomains(true, &world_regdom); 2219 reset_regdomains(true, &world_regdom);
2138 restore_alpha2(alpha2, reset_user); 2220 restore_alpha2(alpha2, reset_user);
2139 2221
@@ -2594,7 +2676,7 @@ void wiphy_regulatory_deregister(struct wiphy *wiphy)
2594 reg_num_devs_support_basehint--; 2676 reg_num_devs_support_basehint--;
2595 2677
2596 rcu_free_regdom(get_wiphy_regdom(wiphy)); 2678 rcu_free_regdom(get_wiphy_regdom(wiphy));
2597 rcu_assign_pointer(wiphy->regd, NULL); 2679 RCU_INIT_POINTER(wiphy->regd, NULL);
2598 2680
2599 if (lr) 2681 if (lr)
2600 request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); 2682 request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
@@ -2614,6 +2696,40 @@ static void reg_timeout_work(struct work_struct *work)
2614 rtnl_unlock(); 2696 rtnl_unlock();
2615} 2697}
2616 2698
2699/*
2700 * See http://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii, for
2701 * UNII band definitions
2702 */
2703int cfg80211_get_unii(int freq)
2704{
2705 /* UNII-1 */
2706 if (freq >= 5150 && freq <= 5250)
2707 return 0;
2708
2709 /* UNII-2A */
2710 if (freq > 5250 && freq <= 5350)
2711 return 1;
2712
2713 /* UNII-2B */
2714 if (freq > 5350 && freq <= 5470)
2715 return 2;
2716
2717 /* UNII-2C */
2718 if (freq > 5470 && freq <= 5725)
2719 return 3;
2720
2721 /* UNII-3 */
2722 if (freq > 5725 && freq <= 5825)
2723 return 4;
2724
2725 return -EINVAL;
2726}
2727
2728bool regulatory_indoor_allowed(void)
2729{
2730 return reg_is_indoor;
2731}
2732
2617int __init regulatory_init(void) 2733int __init regulatory_init(void)
2618{ 2734{
2619 int err = 0; 2735 int err = 0;
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 37c180df34b7..5e48031ccb9a 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -25,6 +25,7 @@ enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy);
25 25
26int regulatory_hint_user(const char *alpha2, 26int regulatory_hint_user(const char *alpha2,
27 enum nl80211_user_reg_hint_type user_reg_hint_type); 27 enum nl80211_user_reg_hint_type user_reg_hint_type);
28int regulatory_hint_indoor_user(void);
28 29
29void wiphy_regulatory_register(struct wiphy *wiphy); 30void wiphy_regulatory_register(struct wiphy *wiphy);
30void wiphy_regulatory_deregister(struct wiphy *wiphy); 31void wiphy_regulatory_deregister(struct wiphy *wiphy);
@@ -104,4 +105,21 @@ void regulatory_hint_country_ie(struct wiphy *wiphy,
104 */ 105 */
105void regulatory_hint_disconnect(void); 106void regulatory_hint_disconnect(void);
106 107
108/**
109 * cfg80211_get_unii - get the U-NII band for the frequency
110 * @freq: the frequency for which we want to get the UNII band.
111
112 * Get a value specifying the U-NII band frequency belongs to.
113 * U-NII bands are defined by the FCC in C.F.R 47 part 15.
114 *
115 * Returns -EINVAL if freq is invalid, 0 for UNII-1, 1 for UNII-2A,
116 * 2 for UNII-2B, 3 for UNII-2C and 4 for UNII-3.
117 */
118int cfg80211_get_unii(int freq);
119
120/**
121 * regulatory_indoor_allowed - is indoor operation allowed
122 */
123bool regulatory_indoor_allowed(void);
124
107#endif /* __NET_WIRELESS_REG_H */ 125#endif /* __NET_WIRELESS_REG_H */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 88f108edfb58..0798c62e6085 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -81,10 +81,10 @@ static void bss_free(struct cfg80211_internal_bss *bss)
81 kfree(bss); 81 kfree(bss);
82} 82}
83 83
84static inline void bss_ref_get(struct cfg80211_registered_device *dev, 84static inline void bss_ref_get(struct cfg80211_registered_device *rdev,
85 struct cfg80211_internal_bss *bss) 85 struct cfg80211_internal_bss *bss)
86{ 86{
87 lockdep_assert_held(&dev->bss_lock); 87 lockdep_assert_held(&rdev->bss_lock);
88 88
89 bss->refcount++; 89 bss->refcount++;
90 if (bss->pub.hidden_beacon_bss) { 90 if (bss->pub.hidden_beacon_bss) {
@@ -95,10 +95,10 @@ static inline void bss_ref_get(struct cfg80211_registered_device *dev,
95 } 95 }
96} 96}
97 97
98static inline void bss_ref_put(struct cfg80211_registered_device *dev, 98static inline void bss_ref_put(struct cfg80211_registered_device *rdev,
99 struct cfg80211_internal_bss *bss) 99 struct cfg80211_internal_bss *bss)
100{ 100{
101 lockdep_assert_held(&dev->bss_lock); 101 lockdep_assert_held(&rdev->bss_lock);
102 102
103 if (bss->pub.hidden_beacon_bss) { 103 if (bss->pub.hidden_beacon_bss) {
104 struct cfg80211_internal_bss *hbss; 104 struct cfg80211_internal_bss *hbss;
@@ -114,10 +114,10 @@ static inline void bss_ref_put(struct cfg80211_registered_device *dev,
114 bss_free(bss); 114 bss_free(bss);
115} 115}
116 116
117static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *dev, 117static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
118 struct cfg80211_internal_bss *bss) 118 struct cfg80211_internal_bss *bss)
119{ 119{
120 lockdep_assert_held(&dev->bss_lock); 120 lockdep_assert_held(&rdev->bss_lock);
121 121
122 if (!list_empty(&bss->hidden_list)) { 122 if (!list_empty(&bss->hidden_list)) {
123 /* 123 /*
@@ -134,31 +134,31 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
134 } 134 }
135 135
136 list_del_init(&bss->list); 136 list_del_init(&bss->list);
137 rb_erase(&bss->rbn, &dev->bss_tree); 137 rb_erase(&bss->rbn, &rdev->bss_tree);
138 bss_ref_put(dev, bss); 138 bss_ref_put(rdev, bss);
139 return true; 139 return true;
140} 140}
141 141
142static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev, 142static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
143 unsigned long expire_time) 143 unsigned long expire_time)
144{ 144{
145 struct cfg80211_internal_bss *bss, *tmp; 145 struct cfg80211_internal_bss *bss, *tmp;
146 bool expired = false; 146 bool expired = false;
147 147
148 lockdep_assert_held(&dev->bss_lock); 148 lockdep_assert_held(&rdev->bss_lock);
149 149
150 list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) { 150 list_for_each_entry_safe(bss, tmp, &rdev->bss_list, list) {
151 if (atomic_read(&bss->hold)) 151 if (atomic_read(&bss->hold))
152 continue; 152 continue;
153 if (!time_after(expire_time, bss->ts)) 153 if (!time_after(expire_time, bss->ts))
154 continue; 154 continue;
155 155
156 if (__cfg80211_unlink_bss(dev, bss)) 156 if (__cfg80211_unlink_bss(rdev, bss))
157 expired = true; 157 expired = true;
158 } 158 }
159 159
160 if (expired) 160 if (expired)
161 dev->bss_generation++; 161 rdev->bss_generation++;
162} 162}
163 163
164void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, 164void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
@@ -238,11 +238,11 @@ void __cfg80211_scan_done(struct work_struct *wk)
238void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) 238void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
239{ 239{
240 trace_cfg80211_scan_done(request, aborted); 240 trace_cfg80211_scan_done(request, aborted);
241 WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); 241 WARN_ON(request != wiphy_to_rdev(request->wiphy)->scan_req);
242 242
243 request->aborted = aborted; 243 request->aborted = aborted;
244 request->notified = true; 244 request->notified = true;
245 queue_work(cfg80211_wq, &wiphy_to_dev(request->wiphy)->scan_done_wk); 245 queue_work(cfg80211_wq, &wiphy_to_rdev(request->wiphy)->scan_done_wk);
246} 246}
247EXPORT_SYMBOL(cfg80211_scan_done); 247EXPORT_SYMBOL(cfg80211_scan_done);
248 248
@@ -278,15 +278,15 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy)
278{ 278{
279 trace_cfg80211_sched_scan_results(wiphy); 279 trace_cfg80211_sched_scan_results(wiphy);
280 /* ignore if we're not scanning */ 280 /* ignore if we're not scanning */
281 if (wiphy_to_dev(wiphy)->sched_scan_req) 281 if (wiphy_to_rdev(wiphy)->sched_scan_req)
282 queue_work(cfg80211_wq, 282 queue_work(cfg80211_wq,
283 &wiphy_to_dev(wiphy)->sched_scan_results_wk); 283 &wiphy_to_rdev(wiphy)->sched_scan_results_wk);
284} 284}
285EXPORT_SYMBOL(cfg80211_sched_scan_results); 285EXPORT_SYMBOL(cfg80211_sched_scan_results);
286 286
287void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy) 287void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy)
288{ 288{
289 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 289 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
290 290
291 ASSERT_RTNL(); 291 ASSERT_RTNL();
292 292
@@ -330,21 +330,21 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
330 return 0; 330 return 0;
331} 331}
332 332
333void cfg80211_bss_age(struct cfg80211_registered_device *dev, 333void cfg80211_bss_age(struct cfg80211_registered_device *rdev,
334 unsigned long age_secs) 334 unsigned long age_secs)
335{ 335{
336 struct cfg80211_internal_bss *bss; 336 struct cfg80211_internal_bss *bss;
337 unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC); 337 unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC);
338 338
339 spin_lock_bh(&dev->bss_lock); 339 spin_lock_bh(&rdev->bss_lock);
340 list_for_each_entry(bss, &dev->bss_list, list) 340 list_for_each_entry(bss, &rdev->bss_list, list)
341 bss->ts -= age_jiffies; 341 bss->ts -= age_jiffies;
342 spin_unlock_bh(&dev->bss_lock); 342 spin_unlock_bh(&rdev->bss_lock);
343} 343}
344 344
345void cfg80211_bss_expire(struct cfg80211_registered_device *dev) 345void cfg80211_bss_expire(struct cfg80211_registered_device *rdev)
346{ 346{
347 __cfg80211_bss_expire(dev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE); 347 __cfg80211_bss_expire(rdev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
348} 348}
349 349
350const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len) 350const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
@@ -534,32 +534,34 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
534 const u8 *ssid, size_t ssid_len, 534 const u8 *ssid, size_t ssid_len,
535 u16 capa_mask, u16 capa_val) 535 u16 capa_mask, u16 capa_val)
536{ 536{
537 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy); 537 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
538 struct cfg80211_internal_bss *bss, *res = NULL; 538 struct cfg80211_internal_bss *bss, *res = NULL;
539 unsigned long now = jiffies; 539 unsigned long now = jiffies;
540 540
541 trace_cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, capa_mask, 541 trace_cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, capa_mask,
542 capa_val); 542 capa_val);
543 543
544 spin_lock_bh(&dev->bss_lock); 544 spin_lock_bh(&rdev->bss_lock);
545 545
546 list_for_each_entry(bss, &dev->bss_list, list) { 546 list_for_each_entry(bss, &rdev->bss_list, list) {
547 if ((bss->pub.capability & capa_mask) != capa_val) 547 if ((bss->pub.capability & capa_mask) != capa_val)
548 continue; 548 continue;
549 if (channel && bss->pub.channel != channel) 549 if (channel && bss->pub.channel != channel)
550 continue; 550 continue;
551 if (!is_valid_ether_addr(bss->pub.bssid))
552 continue;
551 /* Don't get expired BSS structs */ 553 /* Don't get expired BSS structs */
552 if (time_after(now, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE) && 554 if (time_after(now, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE) &&
553 !atomic_read(&bss->hold)) 555 !atomic_read(&bss->hold))
554 continue; 556 continue;
555 if (is_bss(&bss->pub, bssid, ssid, ssid_len)) { 557 if (is_bss(&bss->pub, bssid, ssid, ssid_len)) {
556 res = bss; 558 res = bss;
557 bss_ref_get(dev, res); 559 bss_ref_get(rdev, res);
558 break; 560 break;
559 } 561 }
560 } 562 }
561 563
562 spin_unlock_bh(&dev->bss_lock); 564 spin_unlock_bh(&rdev->bss_lock);
563 if (!res) 565 if (!res)
564 return NULL; 566 return NULL;
565 trace_cfg80211_return_bss(&res->pub); 567 trace_cfg80211_return_bss(&res->pub);
@@ -567,10 +569,10 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
567} 569}
568EXPORT_SYMBOL(cfg80211_get_bss); 570EXPORT_SYMBOL(cfg80211_get_bss);
569 571
570static void rb_insert_bss(struct cfg80211_registered_device *dev, 572static void rb_insert_bss(struct cfg80211_registered_device *rdev,
571 struct cfg80211_internal_bss *bss) 573 struct cfg80211_internal_bss *bss)
572{ 574{
573 struct rb_node **p = &dev->bss_tree.rb_node; 575 struct rb_node **p = &rdev->bss_tree.rb_node;
574 struct rb_node *parent = NULL; 576 struct rb_node *parent = NULL;
575 struct cfg80211_internal_bss *tbss; 577 struct cfg80211_internal_bss *tbss;
576 int cmp; 578 int cmp;
@@ -593,15 +595,15 @@ static void rb_insert_bss(struct cfg80211_registered_device *dev,
593 } 595 }
594 596
595 rb_link_node(&bss->rbn, parent, p); 597 rb_link_node(&bss->rbn, parent, p);
596 rb_insert_color(&bss->rbn, &dev->bss_tree); 598 rb_insert_color(&bss->rbn, &rdev->bss_tree);
597} 599}
598 600
599static struct cfg80211_internal_bss * 601static struct cfg80211_internal_bss *
600rb_find_bss(struct cfg80211_registered_device *dev, 602rb_find_bss(struct cfg80211_registered_device *rdev,
601 struct cfg80211_internal_bss *res, 603 struct cfg80211_internal_bss *res,
602 enum bss_compare_mode mode) 604 enum bss_compare_mode mode)
603{ 605{
604 struct rb_node *n = dev->bss_tree.rb_node; 606 struct rb_node *n = rdev->bss_tree.rb_node;
605 struct cfg80211_internal_bss *bss; 607 struct cfg80211_internal_bss *bss;
606 int r; 608 int r;
607 609
@@ -620,7 +622,7 @@ rb_find_bss(struct cfg80211_registered_device *dev,
620 return NULL; 622 return NULL;
621} 623}
622 624
623static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev, 625static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
624 struct cfg80211_internal_bss *new) 626 struct cfg80211_internal_bss *new)
625{ 627{
626 const struct cfg80211_bss_ies *ies; 628 const struct cfg80211_bss_ies *ies;
@@ -650,7 +652,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
650 652
651 /* This is the bad part ... */ 653 /* This is the bad part ... */
652 654
653 list_for_each_entry(bss, &dev->bss_list, list) { 655 list_for_each_entry(bss, &rdev->bss_list, list) {
654 if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) 656 if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
655 continue; 657 continue;
656 if (bss->pub.channel != new->pub.channel) 658 if (bss->pub.channel != new->pub.channel)
@@ -684,7 +686,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
684 686
685/* Returned bss is reference counted and must be cleaned up appropriately. */ 687/* Returned bss is reference counted and must be cleaned up appropriately. */
686static struct cfg80211_internal_bss * 688static struct cfg80211_internal_bss *
687cfg80211_bss_update(struct cfg80211_registered_device *dev, 689cfg80211_bss_update(struct cfg80211_registered_device *rdev,
688 struct cfg80211_internal_bss *tmp, 690 struct cfg80211_internal_bss *tmp,
689 bool signal_valid) 691 bool signal_valid)
690{ 692{
@@ -695,14 +697,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
695 697
696 tmp->ts = jiffies; 698 tmp->ts = jiffies;
697 699
698 spin_lock_bh(&dev->bss_lock); 700 spin_lock_bh(&rdev->bss_lock);
699 701
700 if (WARN_ON(!rcu_access_pointer(tmp->pub.ies))) { 702 if (WARN_ON(!rcu_access_pointer(tmp->pub.ies))) {
701 spin_unlock_bh(&dev->bss_lock); 703 spin_unlock_bh(&rdev->bss_lock);
702 return NULL; 704 return NULL;
703 } 705 }
704 706
705 found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR); 707 found = rb_find_bss(rdev, tmp, BSS_CMP_REGULAR);
706 708
707 if (found) { 709 if (found) {
708 /* Update IEs */ 710 /* Update IEs */
@@ -789,7 +791,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
789 * is allocated on the stack since it's not needed in the 791 * is allocated on the stack since it's not needed in the
790 * more common case of an update 792 * more common case of an update
791 */ 793 */
792 new = kzalloc(sizeof(*new) + dev->wiphy.bss_priv_size, 794 new = kzalloc(sizeof(*new) + rdev->wiphy.bss_priv_size,
793 GFP_ATOMIC); 795 GFP_ATOMIC);
794 if (!new) { 796 if (!new) {
795 ies = (void *)rcu_dereference(tmp->pub.beacon_ies); 797 ies = (void *)rcu_dereference(tmp->pub.beacon_ies);
@@ -805,9 +807,9 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
805 INIT_LIST_HEAD(&new->hidden_list); 807 INIT_LIST_HEAD(&new->hidden_list);
806 808
807 if (rcu_access_pointer(tmp->pub.proberesp_ies)) { 809 if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
808 hidden = rb_find_bss(dev, tmp, BSS_CMP_HIDE_ZLEN); 810 hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN);
809 if (!hidden) 811 if (!hidden)
810 hidden = rb_find_bss(dev, tmp, 812 hidden = rb_find_bss(rdev, tmp,
811 BSS_CMP_HIDE_NUL); 813 BSS_CMP_HIDE_NUL);
812 if (hidden) { 814 if (hidden) {
813 new->pub.hidden_beacon_bss = &hidden->pub; 815 new->pub.hidden_beacon_bss = &hidden->pub;
@@ -824,24 +826,24 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
824 * expensive search for any probe responses that should 826 * expensive search for any probe responses that should
825 * be grouped with this beacon for updates ... 827 * be grouped with this beacon for updates ...
826 */ 828 */
827 if (!cfg80211_combine_bsses(dev, new)) { 829 if (!cfg80211_combine_bsses(rdev, new)) {
828 kfree(new); 830 kfree(new);
829 goto drop; 831 goto drop;
830 } 832 }
831 } 833 }
832 834
833 list_add_tail(&new->list, &dev->bss_list); 835 list_add_tail(&new->list, &rdev->bss_list);
834 rb_insert_bss(dev, new); 836 rb_insert_bss(rdev, new);
835 found = new; 837 found = new;
836 } 838 }
837 839
838 dev->bss_generation++; 840 rdev->bss_generation++;
839 bss_ref_get(dev, found); 841 bss_ref_get(rdev, found);
840 spin_unlock_bh(&dev->bss_lock); 842 spin_unlock_bh(&rdev->bss_lock);
841 843
842 return found; 844 return found;
843 drop: 845 drop:
844 spin_unlock_bh(&dev->bss_lock); 846 spin_unlock_bh(&rdev->bss_lock);
845 return NULL; 847 return NULL;
846} 848}
847 849
@@ -889,6 +891,7 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
889 struct cfg80211_bss_ies *ies; 891 struct cfg80211_bss_ies *ies;
890 struct ieee80211_channel *channel; 892 struct ieee80211_channel *channel;
891 struct cfg80211_internal_bss tmp = {}, *res; 893 struct cfg80211_internal_bss tmp = {}, *res;
894 bool signal_valid;
892 895
893 if (WARN_ON(!wiphy)) 896 if (WARN_ON(!wiphy))
894 return NULL; 897 return NULL;
@@ -925,8 +928,9 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
925 rcu_assign_pointer(tmp.pub.beacon_ies, ies); 928 rcu_assign_pointer(tmp.pub.beacon_ies, ies);
926 rcu_assign_pointer(tmp.pub.ies, ies); 929 rcu_assign_pointer(tmp.pub.ies, ies);
927 930
928 res = cfg80211_bss_update(wiphy_to_dev(wiphy), &tmp, 931 signal_valid = abs(rx_channel->center_freq - channel->center_freq) <=
929 rx_channel == channel); 932 wiphy->max_adj_channel_rssi_comp;
933 res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid);
930 if (!res) 934 if (!res)
931 return NULL; 935 return NULL;
932 936
@@ -950,6 +954,7 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
950 struct cfg80211_internal_bss tmp = {}, *res; 954 struct cfg80211_internal_bss tmp = {}, *res;
951 struct cfg80211_bss_ies *ies; 955 struct cfg80211_bss_ies *ies;
952 struct ieee80211_channel *channel; 956 struct ieee80211_channel *channel;
957 bool signal_valid;
953 size_t ielen = len - offsetof(struct ieee80211_mgmt, 958 size_t ielen = len - offsetof(struct ieee80211_mgmt,
954 u.probe_resp.variable); 959 u.probe_resp.variable);
955 960
@@ -997,8 +1002,9 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
997 tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); 1002 tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
998 tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); 1003 tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
999 1004
1000 res = cfg80211_bss_update(wiphy_to_dev(wiphy), &tmp, 1005 signal_valid = abs(rx_channel->center_freq - channel->center_freq) <=
1001 rx_channel == channel); 1006 wiphy->max_adj_channel_rssi_comp;
1007 res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid);
1002 if (!res) 1008 if (!res)
1003 return NULL; 1009 return NULL;
1004 1010
@@ -1013,7 +1019,7 @@ EXPORT_SYMBOL(cfg80211_inform_bss_width_frame);
1013 1019
1014void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) 1020void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
1015{ 1021{
1016 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy); 1022 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
1017 struct cfg80211_internal_bss *bss; 1023 struct cfg80211_internal_bss *bss;
1018 1024
1019 if (!pub) 1025 if (!pub)
@@ -1021,15 +1027,15 @@ void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
1021 1027
1022 bss = container_of(pub, struct cfg80211_internal_bss, pub); 1028 bss = container_of(pub, struct cfg80211_internal_bss, pub);
1023 1029
1024 spin_lock_bh(&dev->bss_lock); 1030 spin_lock_bh(&rdev->bss_lock);
1025 bss_ref_get(dev, bss); 1031 bss_ref_get(rdev, bss);
1026 spin_unlock_bh(&dev->bss_lock); 1032 spin_unlock_bh(&rdev->bss_lock);
1027} 1033}
1028EXPORT_SYMBOL(cfg80211_ref_bss); 1034EXPORT_SYMBOL(cfg80211_ref_bss);
1029 1035
1030void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) 1036void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
1031{ 1037{
1032 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy); 1038 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
1033 struct cfg80211_internal_bss *bss; 1039 struct cfg80211_internal_bss *bss;
1034 1040
1035 if (!pub) 1041 if (!pub)
@@ -1037,15 +1043,15 @@ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
1037 1043
1038 bss = container_of(pub, struct cfg80211_internal_bss, pub); 1044 bss = container_of(pub, struct cfg80211_internal_bss, pub);
1039 1045
1040 spin_lock_bh(&dev->bss_lock); 1046 spin_lock_bh(&rdev->bss_lock);
1041 bss_ref_put(dev, bss); 1047 bss_ref_put(rdev, bss);
1042 spin_unlock_bh(&dev->bss_lock); 1048 spin_unlock_bh(&rdev->bss_lock);
1043} 1049}
1044EXPORT_SYMBOL(cfg80211_put_bss); 1050EXPORT_SYMBOL(cfg80211_put_bss);
1045 1051
1046void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) 1052void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
1047{ 1053{
1048 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy); 1054 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
1049 struct cfg80211_internal_bss *bss; 1055 struct cfg80211_internal_bss *bss;
1050 1056
1051 if (WARN_ON(!pub)) 1057 if (WARN_ON(!pub))
@@ -1053,12 +1059,12 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
1053 1059
1054 bss = container_of(pub, struct cfg80211_internal_bss, pub); 1060 bss = container_of(pub, struct cfg80211_internal_bss, pub);
1055 1061
1056 spin_lock_bh(&dev->bss_lock); 1062 spin_lock_bh(&rdev->bss_lock);
1057 if (!list_empty(&bss->list)) { 1063 if (!list_empty(&bss->list)) {
1058 if (__cfg80211_unlink_bss(dev, bss)) 1064 if (__cfg80211_unlink_bss(rdev, bss))
1059 dev->bss_generation++; 1065 rdev->bss_generation++;
1060 } 1066 }
1061 spin_unlock_bh(&dev->bss_lock); 1067 spin_unlock_bh(&rdev->bss_lock);
1062} 1068}
1063EXPORT_SYMBOL(cfg80211_unlink_bss); 1069EXPORT_SYMBOL(cfg80211_unlink_bss);
1064 1070
@@ -1075,7 +1081,7 @@ cfg80211_get_dev_from_ifindex(struct net *net, int ifindex)
1075 if (!dev) 1081 if (!dev)
1076 return ERR_PTR(-ENODEV); 1082 return ERR_PTR(-ENODEV);
1077 if (dev->ieee80211_ptr) 1083 if (dev->ieee80211_ptr)
1078 rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); 1084 rdev = wiphy_to_rdev(dev->ieee80211_ptr->wiphy);
1079 else 1085 else
1080 rdev = ERR_PTR(-ENODEV); 1086 rdev = ERR_PTR(-ENODEV);
1081 dev_put(dev); 1087 dev_put(dev);
@@ -1155,7 +1161,11 @@ int cfg80211_wext_siwscan(struct net_device *dev,
1155 int k; 1161 int k;
1156 int wiphy_freq = wiphy->bands[band]->channels[j].center_freq; 1162 int wiphy_freq = wiphy->bands[band]->channels[j].center_freq;
1157 for (k = 0; k < wreq->num_channels; k++) { 1163 for (k = 0; k < wreq->num_channels; k++) {
1158 int wext_freq = cfg80211_wext_freq(wiphy, &wreq->channel_list[k]); 1164 struct iw_freq *freq =
1165 &wreq->channel_list[k];
1166 int wext_freq =
1167 cfg80211_wext_freq(freq);
1168
1159 if (wext_freq == wiphy_freq) 1169 if (wext_freq == wiphy_freq)
1160 goto wext_freq_found; 1170 goto wext_freq_found;
1161 } 1171 }
@@ -1467,7 +1477,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1467} 1477}
1468 1478
1469 1479
1470static int ieee80211_scan_results(struct cfg80211_registered_device *dev, 1480static int ieee80211_scan_results(struct cfg80211_registered_device *rdev,
1471 struct iw_request_info *info, 1481 struct iw_request_info *info,
1472 char *buf, size_t len) 1482 char *buf, size_t len)
1473{ 1483{
@@ -1475,18 +1485,18 @@ static int ieee80211_scan_results(struct cfg80211_registered_device *dev,
1475 char *end_buf = buf + len; 1485 char *end_buf = buf + len;
1476 struct cfg80211_internal_bss *bss; 1486 struct cfg80211_internal_bss *bss;
1477 1487
1478 spin_lock_bh(&dev->bss_lock); 1488 spin_lock_bh(&rdev->bss_lock);
1479 cfg80211_bss_expire(dev); 1489 cfg80211_bss_expire(rdev);
1480 1490
1481 list_for_each_entry(bss, &dev->bss_list, list) { 1491 list_for_each_entry(bss, &rdev->bss_list, list) {
1482 if (buf + len - current_ev <= IW_EV_ADDR_LEN) { 1492 if (buf + len - current_ev <= IW_EV_ADDR_LEN) {
1483 spin_unlock_bh(&dev->bss_lock); 1493 spin_unlock_bh(&rdev->bss_lock);
1484 return -E2BIG; 1494 return -E2BIG;
1485 } 1495 }
1486 current_ev = ieee80211_bss(&dev->wiphy, info, bss, 1496 current_ev = ieee80211_bss(&rdev->wiphy, info, bss,
1487 current_ev, end_buf); 1497 current_ev, end_buf);
1488 } 1498 }
1489 spin_unlock_bh(&dev->bss_lock); 1499 spin_unlock_bh(&rdev->bss_lock);
1490 return current_ev - buf; 1500 return current_ev - buf;
1491} 1501}
1492 1502
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 3546a77033de..8bbeeb302216 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -59,7 +59,7 @@ static void cfg80211_sme_free(struct wireless_dev *wdev)
59 59
60static int cfg80211_conn_scan(struct wireless_dev *wdev) 60static int cfg80211_conn_scan(struct wireless_dev *wdev)
61{ 61{
62 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 62 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
63 struct cfg80211_scan_request *request; 63 struct cfg80211_scan_request *request;
64 int n_channels, err; 64 int n_channels, err;
65 65
@@ -130,7 +130,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
130 130
131static int cfg80211_conn_do_work(struct wireless_dev *wdev) 131static int cfg80211_conn_do_work(struct wireless_dev *wdev)
132{ 132{
133 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 133 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
134 struct cfg80211_connect_params *params; 134 struct cfg80211_connect_params *params;
135 struct cfg80211_assoc_request req = {}; 135 struct cfg80211_assoc_request req = {};
136 int err; 136 int err;
@@ -149,7 +149,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
149 case CFG80211_CONN_SCAN_AGAIN: 149 case CFG80211_CONN_SCAN_AGAIN:
150 return cfg80211_conn_scan(wdev); 150 return cfg80211_conn_scan(wdev);
151 case CFG80211_CONN_AUTHENTICATE_NEXT: 151 case CFG80211_CONN_AUTHENTICATE_NEXT:
152 BUG_ON(!rdev->ops->auth); 152 if (WARN_ON(!rdev->ops->auth))
153 return -EOPNOTSUPP;
153 wdev->conn->state = CFG80211_CONN_AUTHENTICATING; 154 wdev->conn->state = CFG80211_CONN_AUTHENTICATING;
154 return cfg80211_mlme_auth(rdev, wdev->netdev, 155 return cfg80211_mlme_auth(rdev, wdev->netdev,
155 params->channel, params->auth_type, 156 params->channel, params->auth_type,
@@ -161,7 +162,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
161 case CFG80211_CONN_AUTH_FAILED: 162 case CFG80211_CONN_AUTH_FAILED:
162 return -ENOTCONN; 163 return -ENOTCONN;
163 case CFG80211_CONN_ASSOCIATE_NEXT: 164 case CFG80211_CONN_ASSOCIATE_NEXT:
164 BUG_ON(!rdev->ops->assoc); 165 if (WARN_ON(!rdev->ops->assoc))
166 return -EOPNOTSUPP;
165 wdev->conn->state = CFG80211_CONN_ASSOCIATING; 167 wdev->conn->state = CFG80211_CONN_ASSOCIATING;
166 if (wdev->conn->prev_bssid_valid) 168 if (wdev->conn->prev_bssid_valid)
167 req.prev_bssid = wdev->conn->prev_bssid; 169 req.prev_bssid = wdev->conn->prev_bssid;
@@ -244,7 +246,7 @@ void cfg80211_conn_work(struct work_struct *work)
244/* Returned bss is reference counted and must be cleaned up appropriately. */ 246/* Returned bss is reference counted and must be cleaned up appropriately. */
245static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev) 247static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
246{ 248{
247 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 249 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
248 struct cfg80211_bss *bss; 250 struct cfg80211_bss *bss;
249 u16 capa = WLAN_CAPABILITY_ESS; 251 u16 capa = WLAN_CAPABILITY_ESS;
250 252
@@ -274,7 +276,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
274static void __cfg80211_sme_scan_done(struct net_device *dev) 276static void __cfg80211_sme_scan_done(struct net_device *dev)
275{ 277{
276 struct wireless_dev *wdev = dev->ieee80211_ptr; 278 struct wireless_dev *wdev = dev->ieee80211_ptr;
277 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 279 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
278 struct cfg80211_bss *bss; 280 struct cfg80211_bss *bss;
279 281
280 ASSERT_WDEV_LOCK(wdev); 282 ASSERT_WDEV_LOCK(wdev);
@@ -305,7 +307,7 @@ void cfg80211_sme_scan_done(struct net_device *dev)
305void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len) 307void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len)
306{ 308{
307 struct wiphy *wiphy = wdev->wiphy; 309 struct wiphy *wiphy = wdev->wiphy;
308 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 310 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
309 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 311 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
310 u16 status_code = le16_to_cpu(mgmt->u.auth.status_code); 312 u16 status_code = le16_to_cpu(mgmt->u.auth.status_code);
311 313
@@ -351,7 +353,7 @@ void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len)
351 353
352bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status) 354bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status)
353{ 355{
354 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 356 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
355 357
356 if (!wdev->conn) 358 if (!wdev->conn)
357 return false; 359 return false;
@@ -385,7 +387,7 @@ void cfg80211_sme_deauth(struct wireless_dev *wdev)
385 387
386void cfg80211_sme_auth_timeout(struct wireless_dev *wdev) 388void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
387{ 389{
388 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 390 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
389 391
390 if (!wdev->conn) 392 if (!wdev->conn)
391 return; 393 return;
@@ -396,7 +398,7 @@ void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
396 398
397void cfg80211_sme_disassoc(struct wireless_dev *wdev) 399void cfg80211_sme_disassoc(struct wireless_dev *wdev)
398{ 400{
399 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 401 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
400 402
401 if (!wdev->conn) 403 if (!wdev->conn)
402 return; 404 return;
@@ -407,7 +409,7 @@ void cfg80211_sme_disassoc(struct wireless_dev *wdev)
407 409
408void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev) 410void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev)
409{ 411{
410 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 412 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
411 413
412 if (!wdev->conn) 414 if (!wdev->conn)
413 return; 415 return;
@@ -420,7 +422,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
420 struct cfg80211_connect_params *connect, 422 struct cfg80211_connect_params *connect,
421 const u8 *prev_bssid) 423 const u8 *prev_bssid)
422{ 424{
423 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 425 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
424 struct cfg80211_bss *bss; 426 struct cfg80211_bss *bss;
425 int err; 427 int err;
426 428
@@ -467,7 +469,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
467 } 469 }
468 470
469 wdev->conn->params.ssid = wdev->ssid; 471 wdev->conn->params.ssid = wdev->ssid;
470 wdev->conn->params.ssid_len = connect->ssid_len; 472 wdev->conn->params.ssid_len = wdev->ssid_len;
471 473
472 /* see if we have the bss already */ 474 /* see if we have the bss already */
473 bss = cfg80211_get_conn_bss(wdev); 475 bss = cfg80211_get_conn_bss(wdev);
@@ -479,7 +481,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
479 481
480 /* we're good if we have a matching bss struct */ 482 /* we're good if we have a matching bss struct */
481 if (bss) { 483 if (bss) {
482 wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
483 err = cfg80211_conn_do_work(wdev); 484 err = cfg80211_conn_do_work(wdev);
484 cfg80211_put_bss(wdev->wiphy, bss); 485 cfg80211_put_bss(wdev->wiphy, bss);
485 } else { 486 } else {
@@ -505,7 +506,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
505 506
506static int cfg80211_sme_disconnect(struct wireless_dev *wdev, u16 reason) 507static int cfg80211_sme_disconnect(struct wireless_dev *wdev, u16 reason)
507{ 508{
508 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 509 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
509 int err; 510 int err;
510 511
511 if (!wdev->conn) 512 if (!wdev->conn)
@@ -593,7 +594,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
593 return; 594 return;
594 } 595 }
595 596
596 nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev, 597 nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev,
597 bssid, req_ie, req_ie_len, 598 bssid, req_ie, req_ie_len,
598 resp_ie, resp_ie_len, 599 resp_ie, resp_ie_len,
599 status, GFP_KERNEL); 600 status, GFP_KERNEL);
@@ -624,7 +625,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
624#endif 625#endif
625 626
626 if (!bss && (status == WLAN_STATUS_SUCCESS)) { 627 if (!bss && (status == WLAN_STATUS_SUCCESS)) {
627 WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect); 628 WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
628 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, 629 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
629 wdev->ssid, wdev->ssid_len, 630 wdev->ssid, wdev->ssid_len,
630 WLAN_CAPABILITY_ESS, 631 WLAN_CAPABILITY_ESS,
@@ -687,7 +688,7 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
687 u16 status, gfp_t gfp) 688 u16 status, gfp_t gfp)
688{ 689{
689 struct wireless_dev *wdev = dev->ieee80211_ptr; 690 struct wireless_dev *wdev = dev->ieee80211_ptr;
690 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 691 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
691 struct cfg80211_event *ev; 692 struct cfg80211_event *ev;
692 unsigned long flags; 693 unsigned long flags;
693 694
@@ -742,7 +743,8 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
742 cfg80211_hold_bss(bss_from_pub(bss)); 743 cfg80211_hold_bss(bss_from_pub(bss));
743 wdev->current_bss = bss_from_pub(bss); 744 wdev->current_bss = bss_from_pub(bss);
744 745
745 nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bss->bssid, 746 nl80211_send_roamed(wiphy_to_rdev(wdev->wiphy),
747 wdev->netdev, bss->bssid,
746 req_ie, req_ie_len, resp_ie, resp_ie_len, 748 req_ie, req_ie_len, resp_ie, resp_ie_len,
747 GFP_KERNEL); 749 GFP_KERNEL);
748 750
@@ -801,7 +803,7 @@ void cfg80211_roamed_bss(struct net_device *dev,
801 size_t resp_ie_len, gfp_t gfp) 803 size_t resp_ie_len, gfp_t gfp)
802{ 804{
803 struct wireless_dev *wdev = dev->ieee80211_ptr; 805 struct wireless_dev *wdev = dev->ieee80211_ptr;
804 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 806 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
805 struct cfg80211_event *ev; 807 struct cfg80211_event *ev;
806 unsigned long flags; 808 unsigned long flags;
807 809
@@ -834,7 +836,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
834 size_t ie_len, u16 reason, bool from_ap) 836 size_t ie_len, u16 reason, bool from_ap)
835{ 837{
836 struct wireless_dev *wdev = dev->ieee80211_ptr; 838 struct wireless_dev *wdev = dev->ieee80211_ptr;
837 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 839 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
838 int i; 840 int i;
839#ifdef CONFIG_CFG80211_WEXT 841#ifdef CONFIG_CFG80211_WEXT
840 union iwreq_data wrqu; 842 union iwreq_data wrqu;
@@ -877,10 +879,10 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
877} 879}
878 880
879void cfg80211_disconnected(struct net_device *dev, u16 reason, 881void cfg80211_disconnected(struct net_device *dev, u16 reason,
880 u8 *ie, size_t ie_len, gfp_t gfp) 882 const u8 *ie, size_t ie_len, gfp_t gfp)
881{ 883{
882 struct wireless_dev *wdev = dev->ieee80211_ptr; 884 struct wireless_dev *wdev = dev->ieee80211_ptr;
883 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 885 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
884 struct cfg80211_event *ev; 886 struct cfg80211_event *ev;
885 unsigned long flags; 887 unsigned long flags;
886 888
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index aabccf13e07b..560ed77084e9 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1876,29 +1876,33 @@ TRACE_EVENT(rdev_channel_switch,
1876 WIPHY_ENTRY 1876 WIPHY_ENTRY
1877 NETDEV_ENTRY 1877 NETDEV_ENTRY
1878 CHAN_DEF_ENTRY 1878 CHAN_DEF_ENTRY
1879 __field(u16, counter_offset_beacon)
1880 __field(u16, counter_offset_presp)
1881 __field(bool, radar_required) 1879 __field(bool, radar_required)
1882 __field(bool, block_tx) 1880 __field(bool, block_tx)
1883 __field(u8, count) 1881 __field(u8, count)
1882 __dynamic_array(u16, bcn_ofs, params->n_counter_offsets_beacon)
1883 __dynamic_array(u16, pres_ofs, params->n_counter_offsets_presp)
1884 ), 1884 ),
1885 TP_fast_assign( 1885 TP_fast_assign(
1886 WIPHY_ASSIGN; 1886 WIPHY_ASSIGN;
1887 NETDEV_ASSIGN; 1887 NETDEV_ASSIGN;
1888 CHAN_DEF_ASSIGN(&params->chandef); 1888 CHAN_DEF_ASSIGN(&params->chandef);
1889 __entry->counter_offset_beacon = params->counter_offset_beacon;
1890 __entry->counter_offset_presp = params->counter_offset_presp;
1891 __entry->radar_required = params->radar_required; 1889 __entry->radar_required = params->radar_required;
1892 __entry->block_tx = params->block_tx; 1890 __entry->block_tx = params->block_tx;
1893 __entry->count = params->count; 1891 __entry->count = params->count;
1892 memcpy(__get_dynamic_array(bcn_ofs),
1893 params->counter_offsets_beacon,
1894 params->n_counter_offsets_beacon * sizeof(u16));
1895
1896 /* probe response offsets are optional */
1897 if (params->n_counter_offsets_presp)
1898 memcpy(__get_dynamic_array(pres_ofs),
1899 params->counter_offsets_presp,
1900 params->n_counter_offsets_presp * sizeof(u16));
1894 ), 1901 ),
1895 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT 1902 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
1896 ", block_tx: %d, count: %u, radar_required: %d" 1903 ", block_tx: %d, count: %u, radar_required: %d",
1897 ", counter offsets (beacon/presp): %u/%u",
1898 WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG, 1904 WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
1899 __entry->block_tx, __entry->count, __entry->radar_required, 1905 __entry->block_tx, __entry->count, __entry->radar_required)
1900 __entry->counter_offset_beacon,
1901 __entry->counter_offset_presp)
1902); 1906);
1903 1907
1904TRACE_EVENT(rdev_set_qos_map, 1908TRACE_EVENT(rdev_set_qos_map,
@@ -1919,6 +1923,24 @@ TRACE_EVENT(rdev_set_qos_map,
1919 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->num_des) 1923 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->num_des)
1920); 1924);
1921 1925
1926TRACE_EVENT(rdev_set_ap_chanwidth,
1927 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1928 struct cfg80211_chan_def *chandef),
1929 TP_ARGS(wiphy, netdev, chandef),
1930 TP_STRUCT__entry(
1931 WIPHY_ENTRY
1932 NETDEV_ENTRY
1933 CHAN_DEF_ENTRY
1934 ),
1935 TP_fast_assign(
1936 WIPHY_ASSIGN;
1937 NETDEV_ASSIGN;
1938 CHAN_DEF_ASSIGN(chandef);
1939 ),
1940 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT,
1941 WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
1942);
1943
1922/************************************************************* 1944/*************************************************************
1923 * cfg80211 exported functions traces * 1945 * cfg80211 exported functions traces *
1924 *************************************************************/ 1946 *************************************************************/
@@ -2193,18 +2215,21 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify,
2193); 2215);
2194 2216
2195TRACE_EVENT(cfg80211_reg_can_beacon, 2217TRACE_EVENT(cfg80211_reg_can_beacon,
2196 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef), 2218 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
2197 TP_ARGS(wiphy, chandef), 2219 enum nl80211_iftype iftype),
2220 TP_ARGS(wiphy, chandef, iftype),
2198 TP_STRUCT__entry( 2221 TP_STRUCT__entry(
2199 WIPHY_ENTRY 2222 WIPHY_ENTRY
2200 CHAN_DEF_ENTRY 2223 CHAN_DEF_ENTRY
2224 __field(enum nl80211_iftype, iftype)
2201 ), 2225 ),
2202 TP_fast_assign( 2226 TP_fast_assign(
2203 WIPHY_ASSIGN; 2227 WIPHY_ASSIGN;
2204 CHAN_DEF_ASSIGN(chandef); 2228 CHAN_DEF_ASSIGN(chandef);
2229 __entry->iftype = iftype;
2205 ), 2230 ),
2206 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT, 2231 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d",
2207 WIPHY_PR_ARG, CHAN_DEF_PR_ARG) 2232 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype)
2208); 2233);
2209 2234
2210TRACE_EVENT(cfg80211_chandef_dfs_required, 2235TRACE_EVENT(cfg80211_chandef_dfs_required,
@@ -2615,6 +2640,21 @@ TRACE_EVENT(cfg80211_ft_event,
2615 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(target_ap)) 2640 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(target_ap))
2616); 2641);
2617 2642
2643TRACE_EVENT(cfg80211_stop_iface,
2644 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
2645 TP_ARGS(wiphy, wdev),
2646 TP_STRUCT__entry(
2647 WIPHY_ENTRY
2648 WDEV_ENTRY
2649 ),
2650 TP_fast_assign(
2651 WIPHY_ASSIGN;
2652 WDEV_ASSIGN;
2653 ),
2654 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT,
2655 WIPHY_PR_ARG, WDEV_PR_ARG)
2656);
2657
2618#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ 2658#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
2619 2659
2620#undef TRACE_INCLUDE_PATH 2660#undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/util.c b/net/wireless/util.c
index e5872ff2c27c..728f1c0dc70d 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -476,7 +476,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
476EXPORT_SYMBOL(ieee80211_data_to_8023); 476EXPORT_SYMBOL(ieee80211_data_to_8023);
477 477
478int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, 478int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
479 enum nl80211_iftype iftype, u8 *bssid, bool qos) 479 enum nl80211_iftype iftype,
480 const u8 *bssid, bool qos)
480{ 481{
481 struct ieee80211_hdr hdr; 482 struct ieee80211_hdr hdr;
482 u16 hdrlen, ethertype; 483 u16 hdrlen, ethertype;
@@ -770,7 +771,7 @@ EXPORT_SYMBOL(ieee80211_bss_get_ie);
770 771
771void cfg80211_upload_connect_keys(struct wireless_dev *wdev) 772void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
772{ 773{
773 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 774 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
774 struct net_device *dev = wdev->netdev; 775 struct net_device *dev = wdev->netdev;
775 int i; 776 int i;
776 777
@@ -839,6 +840,9 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev)
839 __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid, 840 __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid,
840 ev->ij.channel); 841 ev->ij.channel);
841 break; 842 break;
843 case EVENT_STOPPED:
844 __cfg80211_leave(wiphy_to_rdev(wdev->wiphy), wdev);
845 break;
842 } 846 }
843 wdev_unlock(wdev); 847 wdev_unlock(wdev);
844 848
@@ -888,11 +892,6 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
888 return -EBUSY; 892 return -EBUSY;
889 893
890 if (ntype != otype && netif_running(dev)) { 894 if (ntype != otype && netif_running(dev)) {
891 err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
892 ntype);
893 if (err)
894 return err;
895
896 dev->ieee80211_ptr->use_4addr = false; 895 dev->ieee80211_ptr->use_4addr = false;
897 dev->ieee80211_ptr->mesh_id_up_len = 0; 896 dev->ieee80211_ptr->mesh_id_up_len = 0;
898 wdev_lock(dev->ieee80211_ptr); 897 wdev_lock(dev->ieee80211_ptr);
@@ -1268,6 +1267,120 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
1268 return res; 1267 return res;
1269} 1268}
1270 1269
1270int cfg80211_iter_combinations(struct wiphy *wiphy,
1271 const int num_different_channels,
1272 const u8 radar_detect,
1273 const int iftype_num[NUM_NL80211_IFTYPES],
1274 void (*iter)(const struct ieee80211_iface_combination *c,
1275 void *data),
1276 void *data)
1277{
1278 const struct ieee80211_regdomain *regdom;
1279 enum nl80211_dfs_regions region = 0;
1280 int i, j, iftype;
1281 int num_interfaces = 0;
1282 u32 used_iftypes = 0;
1283
1284 if (radar_detect) {
1285 rcu_read_lock();
1286 regdom = rcu_dereference(cfg80211_regdomain);
1287 if (regdom)
1288 region = regdom->dfs_region;
1289 rcu_read_unlock();
1290 }
1291
1292 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
1293 num_interfaces += iftype_num[iftype];
1294 if (iftype_num[iftype] > 0 &&
1295 !(wiphy->software_iftypes & BIT(iftype)))
1296 used_iftypes |= BIT(iftype);
1297 }
1298
1299 for (i = 0; i < wiphy->n_iface_combinations; i++) {
1300 const struct ieee80211_iface_combination *c;
1301 struct ieee80211_iface_limit *limits;
1302 u32 all_iftypes = 0;
1303
1304 c = &wiphy->iface_combinations[i];
1305
1306 if (num_interfaces > c->max_interfaces)
1307 continue;
1308 if (num_different_channels > c->num_different_channels)
1309 continue;
1310
1311 limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
1312 GFP_KERNEL);
1313 if (!limits)
1314 return -ENOMEM;
1315
1316 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
1317 if (wiphy->software_iftypes & BIT(iftype))
1318 continue;
1319 for (j = 0; j < c->n_limits; j++) {
1320 all_iftypes |= limits[j].types;
1321 if (!(limits[j].types & BIT(iftype)))
1322 continue;
1323 if (limits[j].max < iftype_num[iftype])
1324 goto cont;
1325 limits[j].max -= iftype_num[iftype];
1326 }
1327 }
1328
1329 if (radar_detect != (c->radar_detect_widths & radar_detect))
1330 goto cont;
1331
1332 if (radar_detect && c->radar_detect_regions &&
1333 !(c->radar_detect_regions & BIT(region)))
1334 goto cont;
1335
1336 /* Finally check that all iftypes that we're currently
1337 * using are actually part of this combination. If they
1338 * aren't then we can't use this combination and have
1339 * to continue to the next.
1340 */
1341 if ((all_iftypes & used_iftypes) != used_iftypes)
1342 goto cont;
1343
1344 /* This combination covered all interface types and
1345 * supported the requested numbers, so we're good.
1346 */
1347
1348 (*iter)(c, data);
1349 cont:
1350 kfree(limits);
1351 }
1352
1353 return 0;
1354}
1355EXPORT_SYMBOL(cfg80211_iter_combinations);
1356
1357static void
1358cfg80211_iter_sum_ifcombs(const struct ieee80211_iface_combination *c,
1359 void *data)
1360{
1361 int *num = data;
1362 (*num)++;
1363}
1364
1365int cfg80211_check_combinations(struct wiphy *wiphy,
1366 const int num_different_channels,
1367 const u8 radar_detect,
1368 const int iftype_num[NUM_NL80211_IFTYPES])
1369{
1370 int err, num = 0;
1371
1372 err = cfg80211_iter_combinations(wiphy, num_different_channels,
1373 radar_detect, iftype_num,
1374 cfg80211_iter_sum_ifcombs, &num);
1375 if (err)
1376 return err;
1377 if (num == 0)
1378 return -EBUSY;
1379
1380 return 0;
1381}
1382EXPORT_SYMBOL(cfg80211_check_combinations);
1383
1271int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev, 1384int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1272 struct wireless_dev *wdev, 1385 struct wireless_dev *wdev,
1273 enum nl80211_iftype iftype, 1386 enum nl80211_iftype iftype,
@@ -1276,7 +1389,6 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1276 u8 radar_detect) 1389 u8 radar_detect)
1277{ 1390{
1278 struct wireless_dev *wdev_iter; 1391 struct wireless_dev *wdev_iter;
1279 u32 used_iftypes = BIT(iftype);
1280 int num[NUM_NL80211_IFTYPES]; 1392 int num[NUM_NL80211_IFTYPES];
1281 struct ieee80211_channel 1393 struct ieee80211_channel
1282 *used_channels[CFG80211_MAX_NUM_DIFFERENT_CHANNELS]; 1394 *used_channels[CFG80211_MAX_NUM_DIFFERENT_CHANNELS];
@@ -1284,7 +1396,7 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1284 enum cfg80211_chan_mode chmode; 1396 enum cfg80211_chan_mode chmode;
1285 int num_different_channels = 0; 1397 int num_different_channels = 0;
1286 int total = 1; 1398 int total = 1;
1287 int i, j; 1399 int i;
1288 1400
1289 ASSERT_RTNL(); 1401 ASSERT_RTNL();
1290 1402
@@ -1306,6 +1418,11 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1306 1418
1307 num[iftype] = 1; 1419 num[iftype] = 1;
1308 1420
1421 /* TODO: We'll probably not need this anymore, since this
1422 * should only be called with CHAN_MODE_UNDEFINED. There are
1423 * still a couple of pending calls where other chanmodes are
1424 * used, but we should get rid of them.
1425 */
1309 switch (chanmode) { 1426 switch (chanmode) {
1310 case CHAN_MODE_UNDEFINED: 1427 case CHAN_MODE_UNDEFINED:
1311 break; 1428 break;
@@ -1369,65 +1486,13 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1369 1486
1370 num[wdev_iter->iftype]++; 1487 num[wdev_iter->iftype]++;
1371 total++; 1488 total++;
1372 used_iftypes |= BIT(wdev_iter->iftype);
1373 } 1489 }
1374 1490
1375 if (total == 1 && !radar_detect) 1491 if (total == 1 && !radar_detect)
1376 return 0; 1492 return 0;
1377 1493
1378 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { 1494 return cfg80211_check_combinations(&rdev->wiphy, num_different_channels,
1379 const struct ieee80211_iface_combination *c; 1495 radar_detect, num);
1380 struct ieee80211_iface_limit *limits;
1381 u32 all_iftypes = 0;
1382
1383 c = &rdev->wiphy.iface_combinations[i];
1384
1385 if (total > c->max_interfaces)
1386 continue;
1387 if (num_different_channels > c->num_different_channels)
1388 continue;
1389
1390 limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
1391 GFP_KERNEL);
1392 if (!limits)
1393 return -ENOMEM;
1394
1395 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
1396 if (rdev->wiphy.software_iftypes & BIT(iftype))
1397 continue;
1398 for (j = 0; j < c->n_limits; j++) {
1399 all_iftypes |= limits[j].types;
1400 if (!(limits[j].types & BIT(iftype)))
1401 continue;
1402 if (limits[j].max < num[iftype])
1403 goto cont;
1404 limits[j].max -= num[iftype];
1405 }
1406 }
1407
1408 if (radar_detect && !(c->radar_detect_widths & radar_detect))
1409 goto cont;
1410
1411 /*
1412 * Finally check that all iftypes that we're currently
1413 * using are actually part of this combination. If they
1414 * aren't then we can't use this combination and have
1415 * to continue to the next.
1416 */
1417 if ((all_iftypes & used_iftypes) != used_iftypes)
1418 goto cont;
1419
1420 /*
1421 * This combination covered all interface types and
1422 * supported the requested numbers, so we're good.
1423 */
1424 kfree(limits);
1425 return 0;
1426 cont:
1427 kfree(limits);
1428 }
1429
1430 return -EBUSY;
1431} 1496}
1432 1497
1433int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, 1498int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
@@ -1481,6 +1546,24 @@ unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy)
1481} 1546}
1482EXPORT_SYMBOL(ieee80211_get_num_supported_channels); 1547EXPORT_SYMBOL(ieee80211_get_num_supported_channels);
1483 1548
1549int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
1550 struct station_info *sinfo)
1551{
1552 struct cfg80211_registered_device *rdev;
1553 struct wireless_dev *wdev;
1554
1555 wdev = dev->ieee80211_ptr;
1556 if (!wdev)
1557 return -EOPNOTSUPP;
1558
1559 rdev = wiphy_to_rdev(wdev->wiphy);
1560 if (!rdev->ops->get_station)
1561 return -EOPNOTSUPP;
1562
1563 return rdev_get_station(rdev, dev, mac_addr, sinfo);
1564}
1565EXPORT_SYMBOL(cfg80211_get_station);
1566
1484/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ 1567/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
1485/* Ethernet-II snap header (RFC1042 for most EtherTypes) */ 1568/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
1486const unsigned char rfc1042_header[] __aligned(2) = 1569const unsigned char rfc1042_header[] __aligned(2) =
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 5661a54ac7ee..11120bb14162 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -73,7 +73,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
73 struct vif_params vifparams; 73 struct vif_params vifparams;
74 enum nl80211_iftype type; 74 enum nl80211_iftype type;
75 75
76 rdev = wiphy_to_dev(wdev->wiphy); 76 rdev = wiphy_to_rdev(wdev->wiphy);
77 77
78 switch (*mode) { 78 switch (*mode) {
79 case IW_MODE_INFRA: 79 case IW_MODE_INFRA:
@@ -253,12 +253,12 @@ EXPORT_SYMBOL_GPL(cfg80211_wext_giwrange);
253 253
254/** 254/**
255 * cfg80211_wext_freq - get wext frequency for non-"auto" 255 * cfg80211_wext_freq - get wext frequency for non-"auto"
256 * @wiphy: the wiphy 256 * @dev: the net device
257 * @freq: the wext freq encoding 257 * @freq: the wext freq encoding
258 * 258 *
259 * Returns a frequency, or a negative error code, or 0 for auto. 259 * Returns a frequency, or a negative error code, or 0 for auto.
260 */ 260 */
261int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq) 261int cfg80211_wext_freq(struct iw_freq *freq)
262{ 262{
263 /* 263 /*
264 * Parse frequency - return 0 for auto and 264 * Parse frequency - return 0 for auto and
@@ -286,7 +286,7 @@ int cfg80211_wext_siwrts(struct net_device *dev,
286 struct iw_param *rts, char *extra) 286 struct iw_param *rts, char *extra)
287{ 287{
288 struct wireless_dev *wdev = dev->ieee80211_ptr; 288 struct wireless_dev *wdev = dev->ieee80211_ptr;
289 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 289 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
290 u32 orts = wdev->wiphy->rts_threshold; 290 u32 orts = wdev->wiphy->rts_threshold;
291 int err; 291 int err;
292 292
@@ -324,7 +324,7 @@ int cfg80211_wext_siwfrag(struct net_device *dev,
324 struct iw_param *frag, char *extra) 324 struct iw_param *frag, char *extra)
325{ 325{
326 struct wireless_dev *wdev = dev->ieee80211_ptr; 326 struct wireless_dev *wdev = dev->ieee80211_ptr;
327 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 327 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
328 u32 ofrag = wdev->wiphy->frag_threshold; 328 u32 ofrag = wdev->wiphy->frag_threshold;
329 int err; 329 int err;
330 330
@@ -364,7 +364,7 @@ static int cfg80211_wext_siwretry(struct net_device *dev,
364 struct iw_param *retry, char *extra) 364 struct iw_param *retry, char *extra)
365{ 365{
366 struct wireless_dev *wdev = dev->ieee80211_ptr; 366 struct wireless_dev *wdev = dev->ieee80211_ptr;
367 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 367 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
368 u32 changed = 0; 368 u32 changed = 0;
369 u8 olong = wdev->wiphy->retry_long; 369 u8 olong = wdev->wiphy->retry_long;
370 u8 oshort = wdev->wiphy->retry_short; 370 u8 oshort = wdev->wiphy->retry_short;
@@ -587,7 +587,7 @@ static int cfg80211_wext_siwencode(struct net_device *dev,
587 struct iw_point *erq, char *keybuf) 587 struct iw_point *erq, char *keybuf)
588{ 588{
589 struct wireless_dev *wdev = dev->ieee80211_ptr; 589 struct wireless_dev *wdev = dev->ieee80211_ptr;
590 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 590 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
591 int idx, err; 591 int idx, err;
592 bool remove = false; 592 bool remove = false;
593 struct key_params params; 593 struct key_params params;
@@ -647,7 +647,7 @@ static int cfg80211_wext_siwencodeext(struct net_device *dev,
647 struct iw_point *erq, char *extra) 647 struct iw_point *erq, char *extra)
648{ 648{
649 struct wireless_dev *wdev = dev->ieee80211_ptr; 649 struct wireless_dev *wdev = dev->ieee80211_ptr;
650 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 650 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
651 struct iw_encode_ext *ext = (struct iw_encode_ext *) extra; 651 struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
652 const u8 *addr; 652 const u8 *addr;
653 int idx; 653 int idx;
@@ -775,7 +775,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
775 struct iw_freq *wextfreq, char *extra) 775 struct iw_freq *wextfreq, char *extra)
776{ 776{
777 struct wireless_dev *wdev = dev->ieee80211_ptr; 777 struct wireless_dev *wdev = dev->ieee80211_ptr;
778 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 778 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
779 struct cfg80211_chan_def chandef = { 779 struct cfg80211_chan_def chandef = {
780 .width = NL80211_CHAN_WIDTH_20_NOHT, 780 .width = NL80211_CHAN_WIDTH_20_NOHT,
781 }; 781 };
@@ -787,7 +787,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
787 case NL80211_IFTYPE_ADHOC: 787 case NL80211_IFTYPE_ADHOC:
788 return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra); 788 return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
789 case NL80211_IFTYPE_MONITOR: 789 case NL80211_IFTYPE_MONITOR:
790 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); 790 freq = cfg80211_wext_freq(wextfreq);
791 if (freq < 0) 791 if (freq < 0)
792 return freq; 792 return freq;
793 if (freq == 0) 793 if (freq == 0)
@@ -798,7 +798,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
798 return -EINVAL; 798 return -EINVAL;
799 return cfg80211_set_monitor_channel(rdev, &chandef); 799 return cfg80211_set_monitor_channel(rdev, &chandef);
800 case NL80211_IFTYPE_MESH_POINT: 800 case NL80211_IFTYPE_MESH_POINT:
801 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); 801 freq = cfg80211_wext_freq(wextfreq);
802 if (freq < 0) 802 if (freq < 0)
803 return freq; 803 return freq;
804 if (freq == 0) 804 if (freq == 0)
@@ -818,7 +818,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
818 struct iw_freq *freq, char *extra) 818 struct iw_freq *freq, char *extra)
819{ 819{
820 struct wireless_dev *wdev = dev->ieee80211_ptr; 820 struct wireless_dev *wdev = dev->ieee80211_ptr;
821 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 821 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
822 struct cfg80211_chan_def chandef; 822 struct cfg80211_chan_def chandef;
823 int ret; 823 int ret;
824 824
@@ -847,7 +847,7 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev,
847 union iwreq_data *data, char *extra) 847 union iwreq_data *data, char *extra)
848{ 848{
849 struct wireless_dev *wdev = dev->ieee80211_ptr; 849 struct wireless_dev *wdev = dev->ieee80211_ptr;
850 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 850 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
851 enum nl80211_tx_power_setting type; 851 enum nl80211_tx_power_setting type;
852 int dbm = 0; 852 int dbm = 0;
853 853
@@ -899,7 +899,7 @@ static int cfg80211_wext_giwtxpower(struct net_device *dev,
899 union iwreq_data *data, char *extra) 899 union iwreq_data *data, char *extra)
900{ 900{
901 struct wireless_dev *wdev = dev->ieee80211_ptr; 901 struct wireless_dev *wdev = dev->ieee80211_ptr;
902 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 902 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
903 int err, val; 903 int err, val;
904 904
905 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) 905 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
@@ -1119,7 +1119,7 @@ static int cfg80211_wext_siwpower(struct net_device *dev,
1119 struct iw_param *wrq, char *extra) 1119 struct iw_param *wrq, char *extra)
1120{ 1120{
1121 struct wireless_dev *wdev = dev->ieee80211_ptr; 1121 struct wireless_dev *wdev = dev->ieee80211_ptr;
1122 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1122 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1123 bool ps = wdev->ps; 1123 bool ps = wdev->ps;
1124 int timeout = wdev->ps_timeout; 1124 int timeout = wdev->ps_timeout;
1125 int err; 1125 int err;
@@ -1177,7 +1177,7 @@ static int cfg80211_wds_wext_siwap(struct net_device *dev,
1177 struct sockaddr *addr, char *extra) 1177 struct sockaddr *addr, char *extra)
1178{ 1178{
1179 struct wireless_dev *wdev = dev->ieee80211_ptr; 1179 struct wireless_dev *wdev = dev->ieee80211_ptr;
1180 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1180 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1181 int err; 1181 int err;
1182 1182
1183 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS)) 1183 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS))
@@ -1221,7 +1221,7 @@ static int cfg80211_wext_siwrate(struct net_device *dev,
1221 struct iw_param *rate, char *extra) 1221 struct iw_param *rate, char *extra)
1222{ 1222{
1223 struct wireless_dev *wdev = dev->ieee80211_ptr; 1223 struct wireless_dev *wdev = dev->ieee80211_ptr;
1224 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1224 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1225 struct cfg80211_bitrate_mask mask; 1225 struct cfg80211_bitrate_mask mask;
1226 u32 fixed, maxrate; 1226 u32 fixed, maxrate;
1227 struct ieee80211_supported_band *sband; 1227 struct ieee80211_supported_band *sband;
@@ -1272,7 +1272,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
1272 struct iw_param *rate, char *extra) 1272 struct iw_param *rate, char *extra)
1273{ 1273{
1274 struct wireless_dev *wdev = dev->ieee80211_ptr; 1274 struct wireless_dev *wdev = dev->ieee80211_ptr;
1275 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1275 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1276 /* we are under RTNL - globally locked - so can use a static struct */ 1276 /* we are under RTNL - globally locked - so can use a static struct */
1277 static struct station_info sinfo; 1277 static struct station_info sinfo;
1278 u8 addr[ETH_ALEN]; 1278 u8 addr[ETH_ALEN];
@@ -1310,7 +1310,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
1310static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) 1310static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
1311{ 1311{
1312 struct wireless_dev *wdev = dev->ieee80211_ptr; 1312 struct wireless_dev *wdev = dev->ieee80211_ptr;
1313 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1313 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1314 /* we are under RTNL - globally locked - so can use static structs */ 1314 /* we are under RTNL - globally locked - so can use static structs */
1315 static struct iw_statistics wstats; 1315 static struct iw_statistics wstats;
1316 static struct station_info sinfo; 1316 static struct station_info sinfo;
@@ -1449,7 +1449,7 @@ static int cfg80211_wext_siwpmksa(struct net_device *dev,
1449 struct iw_point *data, char *extra) 1449 struct iw_point *data, char *extra)
1450{ 1450{
1451 struct wireless_dev *wdev = dev->ieee80211_ptr; 1451 struct wireless_dev *wdev = dev->ieee80211_ptr;
1452 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1452 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1453 struct cfg80211_pmksa cfg_pmksa; 1453 struct cfg80211_pmksa cfg_pmksa;
1454 struct iw_pmksa *pmksa = (struct iw_pmksa *)extra; 1454 struct iw_pmksa *pmksa = (struct iw_pmksa *)extra;
1455 1455
diff --git a/net/wireless/wext-compat.h b/net/wireless/wext-compat.h
index 5d766b0118e8..ebcacca2f731 100644
--- a/net/wireless/wext-compat.h
+++ b/net/wireless/wext-compat.h
@@ -50,7 +50,7 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
50 struct iw_point *data, char *extra); 50 struct iw_point *data, char *extra);
51 51
52 52
53int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq); 53int cfg80211_wext_freq(struct iw_freq *freq);
54 54
55 55
56extern const struct iw_handler_def cfg80211_wext_handler; 56extern const struct iw_handler_def cfg80211_wext_handler;
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 86c331a65664..c7e5c8eb4f24 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -67,7 +67,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
67 struct iw_freq *wextfreq, char *extra) 67 struct iw_freq *wextfreq, char *extra)
68{ 68{
69 struct wireless_dev *wdev = dev->ieee80211_ptr; 69 struct wireless_dev *wdev = dev->ieee80211_ptr;
70 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 70 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
71 struct ieee80211_channel *chan = NULL; 71 struct ieee80211_channel *chan = NULL;
72 int err, freq; 72 int err, freq;
73 73
@@ -75,7 +75,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
75 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) 75 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
76 return -EINVAL; 76 return -EINVAL;
77 77
78 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); 78 freq = cfg80211_wext_freq(wextfreq);
79 if (freq < 0) 79 if (freq < 0)
80 return freq; 80 return freq;
81 81
@@ -169,7 +169,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
169 struct iw_point *data, char *ssid) 169 struct iw_point *data, char *ssid)
170{ 170{
171 struct wireless_dev *wdev = dev->ieee80211_ptr; 171 struct wireless_dev *wdev = dev->ieee80211_ptr;
172 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 172 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
173 size_t len = data->length; 173 size_t len = data->length;
174 int err; 174 int err;
175 175
@@ -260,7 +260,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
260 struct sockaddr *ap_addr, char *extra) 260 struct sockaddr *ap_addr, char *extra)
261{ 261{
262 struct wireless_dev *wdev = dev->ieee80211_ptr; 262 struct wireless_dev *wdev = dev->ieee80211_ptr;
263 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 263 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
264 u8 *bssid = ap_addr->sa_data; 264 u8 *bssid = ap_addr->sa_data;
265 int err; 265 int err;
266 266
@@ -333,7 +333,7 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
333 struct iw_point *data, char *extra) 333 struct iw_point *data, char *extra)
334{ 334{
335 struct wireless_dev *wdev = dev->ieee80211_ptr; 335 struct wireless_dev *wdev = dev->ieee80211_ptr;
336 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 336 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
337 u8 *ie = extra; 337 u8 *ie = extra;
338 int ie_len = data->length, err; 338 int ie_len = data->length, err;
339 339
@@ -390,7 +390,7 @@ int cfg80211_wext_siwmlme(struct net_device *dev,
390 if (!wdev) 390 if (!wdev)
391 return -EOPNOTSUPP; 391 return -EOPNOTSUPP;
392 392
393 rdev = wiphy_to_dev(wdev->wiphy); 393 rdev = wiphy_to_rdev(wdev->wiphy);
394 394
395 if (wdev->iftype != NL80211_IFTYPE_STATION) 395 if (wdev->iftype != NL80211_IFTYPE_STATION)
396 return -EINVAL; 396 return -EINVAL;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 3bb2cdc13b46..c51e8f7b8653 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -199,6 +199,7 @@ int xfrm_output(struct sk_buff *skb)
199 199
200 return xfrm_output2(skb); 200 return xfrm_output2(skb);
201} 201}
202EXPORT_SYMBOL_GPL(xfrm_output);
202 203
203int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb) 204int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
204{ 205{
@@ -213,6 +214,7 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
213 return -EAFNOSUPPORT; 214 return -EAFNOSUPPORT;
214 return inner_mode->afinfo->extract_output(x, skb); 215 return inner_mode->afinfo->extract_output(x, skb);
215} 216}
217EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
216 218
217void xfrm_local_error(struct sk_buff *skb, int mtu) 219void xfrm_local_error(struct sk_buff *skb, int mtu)
218{ 220{
@@ -233,7 +235,4 @@ void xfrm_local_error(struct sk_buff *skb, int mtu)
233 afinfo->local_error(skb, mtu); 235 afinfo->local_error(skb, mtu);
234 xfrm_state_put_afinfo(afinfo); 236 xfrm_state_put_afinfo(afinfo);
235} 237}
236
237EXPORT_SYMBOL_GPL(xfrm_output);
238EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
239EXPORT_SYMBOL_GPL(xfrm_local_error); 238EXPORT_SYMBOL_GPL(xfrm_local_error);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c08fbd11ceff..a8ef5108e0d8 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -769,7 +769,7 @@ EXPORT_SYMBOL(xfrm_policy_byid);
769 769
770#ifdef CONFIG_SECURITY_NETWORK_XFRM 770#ifdef CONFIG_SECURITY_NETWORK_XFRM
771static inline int 771static inline int
772xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 772xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
773{ 773{
774 int dir, err = 0; 774 int dir, err = 0;
775 775
@@ -783,10 +783,7 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
783 continue; 783 continue;
784 err = security_xfrm_policy_delete(pol->security); 784 err = security_xfrm_policy_delete(pol->security);
785 if (err) { 785 if (err) {
786 xfrm_audit_policy_delete(pol, 0, 786 xfrm_audit_policy_delete(pol, 0, task_valid);
787 audit_info->loginuid,
788 audit_info->sessionid,
789 audit_info->secid);
790 return err; 787 return err;
791 } 788 }
792 } 789 }
@@ -800,9 +797,7 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
800 pol->security); 797 pol->security);
801 if (err) { 798 if (err) {
802 xfrm_audit_policy_delete(pol, 0, 799 xfrm_audit_policy_delete(pol, 0,
803 audit_info->loginuid, 800 task_valid);
804 audit_info->sessionid,
805 audit_info->secid);
806 return err; 801 return err;
807 } 802 }
808 } 803 }
@@ -812,19 +807,19 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
812} 807}
813#else 808#else
814static inline int 809static inline int
815xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 810xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
816{ 811{
817 return 0; 812 return 0;
818} 813}
819#endif 814#endif
820 815
821int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) 816int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
822{ 817{
823 int dir, err = 0, cnt = 0; 818 int dir, err = 0, cnt = 0;
824 819
825 write_lock_bh(&net->xfrm.xfrm_policy_lock); 820 write_lock_bh(&net->xfrm.xfrm_policy_lock);
826 821
827 err = xfrm_policy_flush_secctx_check(net, type, audit_info); 822 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
828 if (err) 823 if (err)
829 goto out; 824 goto out;
830 825
@@ -841,9 +836,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
841 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 836 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
842 cnt++; 837 cnt++;
843 838
844 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 839 xfrm_audit_policy_delete(pol, 1, task_valid);
845 audit_info->sessionid,
846 audit_info->secid);
847 840
848 xfrm_policy_kill(pol); 841 xfrm_policy_kill(pol);
849 842
@@ -862,10 +855,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
862 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 855 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
863 cnt++; 856 cnt++;
864 857
865 xfrm_audit_policy_delete(pol, 1, 858 xfrm_audit_policy_delete(pol, 1, task_valid);
866 audit_info->loginuid,
867 audit_info->sessionid,
868 audit_info->secid);
869 xfrm_policy_kill(pol); 859 xfrm_policy_kill(pol);
870 860
871 write_lock_bh(&net->xfrm.xfrm_policy_lock); 861 write_lock_bh(&net->xfrm.xfrm_policy_lock);
@@ -2783,21 +2773,19 @@ static struct notifier_block xfrm_dev_notifier = {
2783static int __net_init xfrm_statistics_init(struct net *net) 2773static int __net_init xfrm_statistics_init(struct net *net)
2784{ 2774{
2785 int rv; 2775 int rv;
2786 2776 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
2787 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics, 2777 if (!net->mib.xfrm_statistics)
2788 sizeof(struct linux_xfrm_mib),
2789 __alignof__(struct linux_xfrm_mib)) < 0)
2790 return -ENOMEM; 2778 return -ENOMEM;
2791 rv = xfrm_proc_init(net); 2779 rv = xfrm_proc_init(net);
2792 if (rv < 0) 2780 if (rv < 0)
2793 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2781 free_percpu(net->mib.xfrm_statistics);
2794 return rv; 2782 return rv;
2795} 2783}
2796 2784
2797static void xfrm_statistics_fini(struct net *net) 2785static void xfrm_statistics_fini(struct net *net)
2798{ 2786{
2799 xfrm_proc_fini(net); 2787 xfrm_proc_fini(net);
2800 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2788 free_percpu(net->mib.xfrm_statistics);
2801} 2789}
2802#else 2790#else
2803static int __net_init xfrm_statistics_init(struct net *net) 2791static int __net_init xfrm_statistics_init(struct net *net)
@@ -2862,21 +2850,14 @@ out_byidx:
2862 2850
2863static void xfrm_policy_fini(struct net *net) 2851static void xfrm_policy_fini(struct net *net)
2864{ 2852{
2865 struct xfrm_audit audit_info;
2866 unsigned int sz; 2853 unsigned int sz;
2867 int dir; 2854 int dir;
2868 2855
2869 flush_work(&net->xfrm.policy_hash_work); 2856 flush_work(&net->xfrm.policy_hash_work);
2870#ifdef CONFIG_XFRM_SUB_POLICY 2857#ifdef CONFIG_XFRM_SUB_POLICY
2871 audit_info.loginuid = INVALID_UID; 2858 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
2872 audit_info.sessionid = (unsigned int)-1;
2873 audit_info.secid = 0;
2874 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
2875#endif 2859#endif
2876 audit_info.loginuid = INVALID_UID; 2860 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
2877 audit_info.sessionid = (unsigned int)-1;
2878 audit_info.secid = 0;
2879 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2880 2861
2881 WARN_ON(!list_empty(&net->xfrm.policy_all)); 2862 WARN_ON(!list_empty(&net->xfrm.policy_all));
2882 2863
@@ -2991,15 +2972,14 @@ static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2991 } 2972 }
2992} 2973}
2993 2974
2994void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 2975void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
2995 kuid_t auid, unsigned int sessionid, u32 secid)
2996{ 2976{
2997 struct audit_buffer *audit_buf; 2977 struct audit_buffer *audit_buf;
2998 2978
2999 audit_buf = xfrm_audit_start("SPD-add"); 2979 audit_buf = xfrm_audit_start("SPD-add");
3000 if (audit_buf == NULL) 2980 if (audit_buf == NULL)
3001 return; 2981 return;
3002 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2982 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3003 audit_log_format(audit_buf, " res=%u", result); 2983 audit_log_format(audit_buf, " res=%u", result);
3004 xfrm_audit_common_policyinfo(xp, audit_buf); 2984 xfrm_audit_common_policyinfo(xp, audit_buf);
3005 audit_log_end(audit_buf); 2985 audit_log_end(audit_buf);
@@ -3007,14 +2987,14 @@ void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
3007EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 2987EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3008 2988
3009void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 2989void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3010 kuid_t auid, unsigned int sessionid, u32 secid) 2990 bool task_valid)
3011{ 2991{
3012 struct audit_buffer *audit_buf; 2992 struct audit_buffer *audit_buf;
3013 2993
3014 audit_buf = xfrm_audit_start("SPD-delete"); 2994 audit_buf = xfrm_audit_start("SPD-delete");
3015 if (audit_buf == NULL) 2995 if (audit_buf == NULL)
3016 return; 2996 return;
3017 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2997 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3018 audit_log_format(audit_buf, " res=%u", result); 2998 audit_log_format(audit_buf, " res=%u", result);
3019 xfrm_audit_common_policyinfo(xp, audit_buf); 2999 xfrm_audit_common_policyinfo(xp, audit_buf);
3020 audit_log_end(audit_buf); 3000 audit_log_end(audit_buf);
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index fc5abd0b456f..9c4fbd8935f4 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -54,8 +54,7 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
54 int i; 54 int i;
55 for (i = 0; xfrm_mib_list[i].name; i++) 55 for (i = 0; xfrm_mib_list[i].name; i++)
56 seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name, 56 seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
57 snmp_fold_field((void __percpu **) 57 snmp_fold_field(net->mib.xfrm_statistics,
58 net->mib.xfrm_statistics,
59 xfrm_mib_list[i].entry)); 58 xfrm_mib_list[i].entry));
60 return 0; 59 return 0;
61} 60}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 8e9c781a6bba..0ab54134bb40 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -463,9 +463,7 @@ expired:
463 if (!err) 463 if (!err)
464 km_state_expired(x, 1, 0); 464 km_state_expired(x, 1, 0);
465 465
466 xfrm_audit_state_delete(x, err ? 0 : 1, 466 xfrm_audit_state_delete(x, err ? 0 : 1, true);
467 audit_get_loginuid(current),
468 audit_get_sessionid(current), 0);
469 467
470out: 468out:
471 spin_unlock(&x->lock); 469 spin_unlock(&x->lock);
@@ -562,7 +560,7 @@ EXPORT_SYMBOL(xfrm_state_delete);
562 560
563#ifdef CONFIG_SECURITY_NETWORK_XFRM 561#ifdef CONFIG_SECURITY_NETWORK_XFRM
564static inline int 562static inline int
565xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audit_info) 563xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
566{ 564{
567 int i, err = 0; 565 int i, err = 0;
568 566
@@ -572,10 +570,7 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
572 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 570 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
573 if (xfrm_id_proto_match(x->id.proto, proto) && 571 if (xfrm_id_proto_match(x->id.proto, proto) &&
574 (err = security_xfrm_state_delete(x)) != 0) { 572 (err = security_xfrm_state_delete(x)) != 0) {
575 xfrm_audit_state_delete(x, 0, 573 xfrm_audit_state_delete(x, 0, task_valid);
576 audit_info->loginuid,
577 audit_info->sessionid,
578 audit_info->secid);
579 return err; 574 return err;
580 } 575 }
581 } 576 }
@@ -585,18 +580,18 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
585} 580}
586#else 581#else
587static inline int 582static inline int
588xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audit_info) 583xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
589{ 584{
590 return 0; 585 return 0;
591} 586}
592#endif 587#endif
593 588
594int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info) 589int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
595{ 590{
596 int i, err = 0, cnt = 0; 591 int i, err = 0, cnt = 0;
597 592
598 spin_lock_bh(&net->xfrm.xfrm_state_lock); 593 spin_lock_bh(&net->xfrm.xfrm_state_lock);
599 err = xfrm_state_flush_secctx_check(net, proto, audit_info); 594 err = xfrm_state_flush_secctx_check(net, proto, task_valid);
600 if (err) 595 if (err)
601 goto out; 596 goto out;
602 597
@@ -612,9 +607,7 @@ restart:
612 607
613 err = xfrm_state_delete(x); 608 err = xfrm_state_delete(x);
614 xfrm_audit_state_delete(x, err ? 0 : 1, 609 xfrm_audit_state_delete(x, err ? 0 : 1,
615 audit_info->loginuid, 610 task_valid);
616 audit_info->sessionid,
617 audit_info->secid);
618 xfrm_state_put(x); 611 xfrm_state_put(x);
619 if (!err) 612 if (!err)
620 cnt++; 613 cnt++;
@@ -2128,14 +2121,10 @@ out_bydst:
2128 2121
2129void xfrm_state_fini(struct net *net) 2122void xfrm_state_fini(struct net *net)
2130{ 2123{
2131 struct xfrm_audit audit_info;
2132 unsigned int sz; 2124 unsigned int sz;
2133 2125
2134 flush_work(&net->xfrm.state_hash_work); 2126 flush_work(&net->xfrm.state_hash_work);
2135 audit_info.loginuid = INVALID_UID; 2127 xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
2136 audit_info.sessionid = (unsigned int)-1;
2137 audit_info.secid = 0;
2138 xfrm_state_flush(net, IPSEC_PROTO_ANY, &audit_info);
2139 flush_work(&net->xfrm.state_gc_work); 2128 flush_work(&net->xfrm.state_gc_work);
2140 2129
2141 WARN_ON(!list_empty(&net->xfrm.state_all)); 2130 WARN_ON(!list_empty(&net->xfrm.state_all));
@@ -2198,30 +2187,28 @@ static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2198 } 2187 }
2199} 2188}
2200 2189
2201void xfrm_audit_state_add(struct xfrm_state *x, int result, 2190void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
2202 kuid_t auid, unsigned int sessionid, u32 secid)
2203{ 2191{
2204 struct audit_buffer *audit_buf; 2192 struct audit_buffer *audit_buf;
2205 2193
2206 audit_buf = xfrm_audit_start("SAD-add"); 2194 audit_buf = xfrm_audit_start("SAD-add");
2207 if (audit_buf == NULL) 2195 if (audit_buf == NULL)
2208 return; 2196 return;
2209 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2197 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2210 xfrm_audit_helper_sainfo(x, audit_buf); 2198 xfrm_audit_helper_sainfo(x, audit_buf);
2211 audit_log_format(audit_buf, " res=%u", result); 2199 audit_log_format(audit_buf, " res=%u", result);
2212 audit_log_end(audit_buf); 2200 audit_log_end(audit_buf);
2213} 2201}
2214EXPORT_SYMBOL_GPL(xfrm_audit_state_add); 2202EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2215 2203
2216void xfrm_audit_state_delete(struct xfrm_state *x, int result, 2204void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
2217 kuid_t auid, unsigned int sessionid, u32 secid)
2218{ 2205{
2219 struct audit_buffer *audit_buf; 2206 struct audit_buffer *audit_buf;
2220 2207
2221 audit_buf = xfrm_audit_start("SAD-delete"); 2208 audit_buf = xfrm_audit_start("SAD-delete");
2222 if (audit_buf == NULL) 2209 if (audit_buf == NULL)
2223 return; 2210 return;
2224 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2211 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2225 xfrm_audit_helper_sainfo(x, audit_buf); 2212 xfrm_audit_helper_sainfo(x, audit_buf);
2226 audit_log_format(audit_buf, " res=%u", result); 2213 audit_log_format(audit_buf, " res=%u", result);
2227 audit_log_end(audit_buf); 2214 audit_log_end(audit_buf);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 51398ae6cda8..412d9dc3a873 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -597,9 +597,6 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
597 struct xfrm_state *x; 597 struct xfrm_state *x;
598 int err; 598 int err;
599 struct km_event c; 599 struct km_event c;
600 kuid_t loginuid = audit_get_loginuid(current);
601 unsigned int sessionid = audit_get_sessionid(current);
602 u32 sid;
603 600
604 err = verify_newsa_info(p, attrs); 601 err = verify_newsa_info(p, attrs);
605 if (err) 602 if (err)
@@ -615,8 +612,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
615 else 612 else
616 err = xfrm_state_update(x); 613 err = xfrm_state_update(x);
617 614
618 security_task_getsecid(current, &sid); 615 xfrm_audit_state_add(x, err ? 0 : 1, true);
619 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
620 616
621 if (err < 0) { 617 if (err < 0) {
622 x->km.state = XFRM_STATE_DEAD; 618 x->km.state = XFRM_STATE_DEAD;
@@ -676,9 +672,6 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
676 int err = -ESRCH; 672 int err = -ESRCH;
677 struct km_event c; 673 struct km_event c;
678 struct xfrm_usersa_id *p = nlmsg_data(nlh); 674 struct xfrm_usersa_id *p = nlmsg_data(nlh);
679 kuid_t loginuid = audit_get_loginuid(current);
680 unsigned int sessionid = audit_get_sessionid(current);
681 u32 sid;
682 675
683 x = xfrm_user_state_lookup(net, p, attrs, &err); 676 x = xfrm_user_state_lookup(net, p, attrs, &err);
684 if (x == NULL) 677 if (x == NULL)
@@ -703,8 +696,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
703 km_state_notify(x, &c); 696 km_state_notify(x, &c);
704 697
705out: 698out:
706 security_task_getsecid(current, &sid); 699 xfrm_audit_state_delete(x, err ? 0 : 1, true);
707 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
708 xfrm_state_put(x); 700 xfrm_state_put(x);
709 return err; 701 return err;
710} 702}
@@ -955,6 +947,20 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
955 return skb; 947 return skb;
956} 948}
957 949
950/* A wrapper for nlmsg_multicast() checking that nlsk is still available.
951 * Must be called with RCU read lock.
952 */
953static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
954 u32 pid, unsigned int group)
955{
956 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
957
958 if (nlsk)
959 return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
960 else
961 return -1;
962}
963
958static inline size_t xfrm_spdinfo_msgsize(void) 964static inline size_t xfrm_spdinfo_msgsize(void)
959{ 965{
960 return NLMSG_ALIGN(4) 966 return NLMSG_ALIGN(4)
@@ -1414,9 +1420,6 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1414 struct km_event c; 1420 struct km_event c;
1415 int err; 1421 int err;
1416 int excl; 1422 int excl;
1417 kuid_t loginuid = audit_get_loginuid(current);
1418 unsigned int sessionid = audit_get_sessionid(current);
1419 u32 sid;
1420 1423
1421 err = verify_newpolicy_info(p); 1424 err = verify_newpolicy_info(p);
1422 if (err) 1425 if (err)
@@ -1435,8 +1438,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1435 * a type XFRM_MSG_UPDPOLICY - JHS */ 1438 * a type XFRM_MSG_UPDPOLICY - JHS */
1436 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; 1439 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1437 err = xfrm_policy_insert(p->dir, xp, excl); 1440 err = xfrm_policy_insert(p->dir, xp, excl);
1438 security_task_getsecid(current, &sid); 1441 xfrm_audit_policy_add(xp, err ? 0 : 1, true);
1439 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
1440 1442
1441 if (err) { 1443 if (err) {
1442 security_xfrm_policy_free(xp->security); 1444 security_xfrm_policy_free(xp->security);
@@ -1673,13 +1675,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1673 NETLINK_CB(skb).portid); 1675 NETLINK_CB(skb).portid);
1674 } 1676 }
1675 } else { 1677 } else {
1676 kuid_t loginuid = audit_get_loginuid(current); 1678 xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
1677 unsigned int sessionid = audit_get_sessionid(current);
1678 u32 sid;
1679
1680 security_task_getsecid(current, &sid);
1681 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
1682 sid);
1683 1679
1684 if (err != 0) 1680 if (err != 0)
1685 goto out; 1681 goto out;
@@ -1704,13 +1700,9 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1704 struct net *net = sock_net(skb->sk); 1700 struct net *net = sock_net(skb->sk);
1705 struct km_event c; 1701 struct km_event c;
1706 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 1702 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1707 struct xfrm_audit audit_info;
1708 int err; 1703 int err;
1709 1704
1710 audit_info.loginuid = audit_get_loginuid(current); 1705 err = xfrm_state_flush(net, p->proto, true);
1711 audit_info.sessionid = audit_get_sessionid(current);
1712 security_task_getsecid(current, &audit_info.secid);
1713 err = xfrm_state_flush(net, p->proto, &audit_info);
1714 if (err) { 1706 if (err) {
1715 if (err == -ESRCH) /* empty table */ 1707 if (err == -ESRCH) /* empty table */
1716 return 0; 1708 return 0;
@@ -1894,16 +1886,12 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1894 struct km_event c; 1886 struct km_event c;
1895 u8 type = XFRM_POLICY_TYPE_MAIN; 1887 u8 type = XFRM_POLICY_TYPE_MAIN;
1896 int err; 1888 int err;
1897 struct xfrm_audit audit_info;
1898 1889
1899 err = copy_from_user_policy_type(&type, attrs); 1890 err = copy_from_user_policy_type(&type, attrs);
1900 if (err) 1891 if (err)
1901 return err; 1892 return err;
1902 1893
1903 audit_info.loginuid = audit_get_loginuid(current); 1894 err = xfrm_policy_flush(net, type, true);
1904 audit_info.sessionid = audit_get_sessionid(current);
1905 security_task_getsecid(current, &audit_info.secid);
1906 err = xfrm_policy_flush(net, type, &audit_info);
1907 if (err) { 1895 if (err) {
1908 if (err == -ESRCH) /* empty table */ 1896 if (err == -ESRCH) /* empty table */
1909 return 0; 1897 return 0;
@@ -1969,14 +1957,8 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1969 1957
1970 err = 0; 1958 err = 0;
1971 if (up->hard) { 1959 if (up->hard) {
1972 kuid_t loginuid = audit_get_loginuid(current);
1973 unsigned int sessionid = audit_get_sessionid(current);
1974 u32 sid;
1975
1976 security_task_getsecid(current, &sid);
1977 xfrm_policy_delete(xp, p->dir); 1960 xfrm_policy_delete(xp, p->dir);
1978 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid); 1961 xfrm_audit_policy_delete(xp, 1, true);
1979
1980 } else { 1962 } else {
1981 // reset the timers here? 1963 // reset the timers here?
1982 WARN(1, "Dont know what to do with soft policy expire\n"); 1964 WARN(1, "Dont know what to do with soft policy expire\n");
@@ -2012,13 +1994,8 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2012 km_state_expired(x, ue->hard, nlh->nlmsg_pid); 1994 km_state_expired(x, ue->hard, nlh->nlmsg_pid);
2013 1995
2014 if (ue->hard) { 1996 if (ue->hard) {
2015 kuid_t loginuid = audit_get_loginuid(current);
2016 unsigned int sessionid = audit_get_sessionid(current);
2017 u32 sid;
2018
2019 security_task_getsecid(current, &sid);
2020 __xfrm_state_delete(x); 1997 __xfrm_state_delete(x);
2021 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid); 1998 xfrm_audit_state_delete(x, 1, true);
2022 } 1999 }
2023 err = 0; 2000 err = 0;
2024out: 2001out:
@@ -2265,7 +2242,7 @@ static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2265 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0) 2242 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
2266 BUG(); 2243 BUG();
2267 2244
2268 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC); 2245 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE);
2269} 2246}
2270#else 2247#else
2271static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2248static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
@@ -2456,7 +2433,7 @@ static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
2456 return -EMSGSIZE; 2433 return -EMSGSIZE;
2457 } 2434 }
2458 2435
2459 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2436 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
2460} 2437}
2461 2438
2462static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c) 2439static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
@@ -2471,7 +2448,7 @@ static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event
2471 if (build_aevent(skb, x, c) < 0) 2448 if (build_aevent(skb, x, c) < 0)
2472 BUG(); 2449 BUG();
2473 2450
2474 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC); 2451 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS);
2475} 2452}
2476 2453
2477static int xfrm_notify_sa_flush(const struct km_event *c) 2454static int xfrm_notify_sa_flush(const struct km_event *c)
@@ -2497,7 +2474,7 @@ static int xfrm_notify_sa_flush(const struct km_event *c)
2497 2474
2498 nlmsg_end(skb, nlh); 2475 nlmsg_end(skb, nlh);
2499 2476
2500 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2477 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
2501} 2478}
2502 2479
2503static inline size_t xfrm_sa_len(struct xfrm_state *x) 2480static inline size_t xfrm_sa_len(struct xfrm_state *x)
@@ -2584,7 +2561,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2584 2561
2585 nlmsg_end(skb, nlh); 2562 nlmsg_end(skb, nlh);
2586 2563
2587 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2564 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
2588 2565
2589out_free_skb: 2566out_free_skb:
2590 kfree_skb(skb); 2567 kfree_skb(skb);
@@ -2675,7 +2652,7 @@ static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2675 if (build_acquire(skb, x, xt, xp) < 0) 2652 if (build_acquire(skb, x, xt, xp) < 0)
2676 BUG(); 2653 BUG();
2677 2654
2678 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); 2655 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE);
2679} 2656}
2680 2657
2681/* User gives us xfrm_user_policy_info followed by an array of 0 2658/* User gives us xfrm_user_policy_info followed by an array of 0
@@ -2789,7 +2766,7 @@ static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct
2789 if (build_polexpire(skb, xp, dir, c) < 0) 2766 if (build_polexpire(skb, xp, dir, c) < 0)
2790 BUG(); 2767 BUG();
2791 2768
2792 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2769 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
2793} 2770}
2794 2771
2795static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) 2772static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
@@ -2851,7 +2828,7 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
2851 2828
2852 nlmsg_end(skb, nlh); 2829 nlmsg_end(skb, nlh);
2853 2830
2854 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2831 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
2855 2832
2856out_free_skb: 2833out_free_skb:
2857 kfree_skb(skb); 2834 kfree_skb(skb);
@@ -2879,7 +2856,7 @@ static int xfrm_notify_policy_flush(const struct km_event *c)
2879 2856
2880 nlmsg_end(skb, nlh); 2857 nlmsg_end(skb, nlh);
2881 2858
2882 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2859 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
2883 2860
2884out_free_skb: 2861out_free_skb:
2885 kfree_skb(skb); 2862 kfree_skb(skb);
@@ -2948,7 +2925,7 @@ static int xfrm_send_report(struct net *net, u8 proto,
2948 if (build_report(skb, proto, sel, addr) < 0) 2925 if (build_report(skb, proto, sel, addr) < 0)
2949 BUG(); 2926 BUG();
2950 2927
2951 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC); 2928 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT);
2952} 2929}
2953 2930
2954static inline size_t xfrm_mapping_msgsize(void) 2931static inline size_t xfrm_mapping_msgsize(void)
@@ -3000,7 +2977,7 @@ static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3000 if (build_mapping(skb, x, ipaddr, sport) < 0) 2977 if (build_mapping(skb, x, ipaddr, sport) < 0)
3001 BUG(); 2978 BUG();
3002 2979
3003 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC); 2980 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING);
3004} 2981}
3005 2982
3006static bool xfrm_is_alive(const struct km_event *c) 2983static bool xfrm_is_alive(const struct km_event *c)
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 14d04e63b1f0..be491a74c1ed 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -147,7 +147,7 @@ struct security_class_mapping secclass_map[] = {
147 { "peer", { "recv", NULL } }, 147 { "peer", { "recv", NULL } },
148 { "capability2", 148 { "capability2",
149 { "mac_override", "mac_admin", "syslog", "wake_alarm", "block_suspend", 149 { "mac_override", "mac_admin", "syslog", "wake_alarm", "block_suspend",
150 NULL } }, 150 "audit_read", NULL } },
151 { "kernel_service", { "use_as_override", "create_files_as", NULL } }, 151 { "kernel_service", { "use_as_override", "create_files_as", NULL } },
152 { "tun_socket", 152 { "tun_socket",
153 { COMMON_SOCK_PERMS, "attach_queue", NULL } }, 153 { COMMON_SOCK_PERMS, "attach_queue", NULL } },
diff --git a/tools/net/bpf_exp.l b/tools/net/bpf_exp.l
index bf7be77ddd62..833a96611da6 100644
--- a/tools/net/bpf_exp.l
+++ b/tools/net/bpf_exp.l
@@ -92,6 +92,7 @@ extern void yyerror(const char *str);
92"#"?("cpu") { return K_CPU; } 92"#"?("cpu") { return K_CPU; }
93"#"?("vlan_tci") { return K_VLANT; } 93"#"?("vlan_tci") { return K_VLANT; }
94"#"?("vlan_pr") { return K_VLANP; } 94"#"?("vlan_pr") { return K_VLANP; }
95"#"?("rand") { return K_RAND; }
95 96
96":" { return ':'; } 97":" { return ':'; }
97"," { return ','; } 98"," { return ','; }
diff --git a/tools/net/bpf_exp.y b/tools/net/bpf_exp.y
index d15efc989ef5..e6306c51c26f 100644
--- a/tools/net/bpf_exp.y
+++ b/tools/net/bpf_exp.y
@@ -56,7 +56,7 @@ static void bpf_set_jmp_label(char *label, enum jmp_type type);
56%token OP_LDXI 56%token OP_LDXI
57 57
58%token K_PKT_LEN K_PROTO K_TYPE K_NLATTR K_NLATTR_NEST K_MARK K_QUEUE K_HATYPE 58%token K_PKT_LEN K_PROTO K_TYPE K_NLATTR K_NLATTR_NEST K_MARK K_QUEUE K_HATYPE
59%token K_RXHASH K_CPU K_IFIDX K_VLANT K_VLANP K_POFF 59%token K_RXHASH K_CPU K_IFIDX K_VLANT K_VLANP K_POFF K_RAND
60 60
61%token ':' ',' '[' ']' '(' ')' 'x' 'a' '+' 'M' '*' '&' '#' '%' 61%token ':' ',' '[' ']' '(' ')' 'x' 'a' '+' 'M' '*' '&' '#' '%'
62 62
@@ -164,6 +164,9 @@ ldb
164 | OP_LDB K_POFF { 164 | OP_LDB K_POFF {
165 bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, 165 bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
166 SKF_AD_OFF + SKF_AD_PAY_OFFSET); } 166 SKF_AD_OFF + SKF_AD_PAY_OFFSET); }
167 | OP_LDB K_RAND {
168 bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
169 SKF_AD_OFF + SKF_AD_RANDOM); }
167 ; 170 ;
168 171
169ldh 172ldh
@@ -212,6 +215,9 @@ ldh
212 | OP_LDH K_POFF { 215 | OP_LDH K_POFF {
213 bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, 216 bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
214 SKF_AD_OFF + SKF_AD_PAY_OFFSET); } 217 SKF_AD_OFF + SKF_AD_PAY_OFFSET); }
218 | OP_LDH K_RAND {
219 bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
220 SKF_AD_OFF + SKF_AD_RANDOM); }
215 ; 221 ;
216 222
217ldi 223ldi
@@ -265,6 +271,9 @@ ld
265 | OP_LD K_POFF { 271 | OP_LD K_POFF {
266 bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, 272 bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
267 SKF_AD_OFF + SKF_AD_PAY_OFFSET); } 273 SKF_AD_OFF + SKF_AD_PAY_OFFSET); }
274 | OP_LD K_RAND {
275 bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
276 SKF_AD_OFF + SKF_AD_RANDOM); }
268 | OP_LD 'M' '[' number ']' { 277 | OP_LD 'M' '[' number ']' {
269 bpf_set_curr_instr(BPF_LD | BPF_MEM, 0, 0, $4); } 278 bpf_set_curr_instr(BPF_LD | BPF_MEM, 0, 0, $4); }
270 | OP_LD '[' 'x' '+' number ']' { 279 | OP_LD '[' 'x' '+' number ']' {
diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c
index cfe0cdcda3de..c5baf9c591b7 100644
--- a/tools/net/bpf_jit_disasm.c
+++ b/tools/net/bpf_jit_disasm.c
@@ -43,8 +43,7 @@ static void get_exec_path(char *tpath, size_t size)
43 free(path); 43 free(path);
44} 44}
45 45
46static void get_asm_insns(uint8_t *image, size_t len, unsigned long base, 46static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
47 int opcodes)
48{ 47{
49 int count, i, pc = 0; 48 int count, i, pc = 0;
50 char tpath[256]; 49 char tpath[256];
@@ -107,13 +106,13 @@ static void put_klog_buff(char *buff)
107} 106}
108 107
109static int get_last_jit_image(char *haystack, size_t hlen, 108static int get_last_jit_image(char *haystack, size_t hlen,
110 uint8_t *image, size_t ilen, 109 uint8_t *image, size_t ilen)
111 unsigned long *base)
112{ 110{
113 char *ptr, *pptr, *tmp; 111 char *ptr, *pptr, *tmp;
114 off_t off = 0; 112 off_t off = 0;
115 int ret, flen, proglen, pass, ulen = 0; 113 int ret, flen, proglen, pass, ulen = 0;
116 regmatch_t pmatch[1]; 114 regmatch_t pmatch[1];
115 unsigned long base;
117 regex_t regex; 116 regex_t regex;
118 117
119 if (hlen == 0) 118 if (hlen == 0)
@@ -136,7 +135,7 @@ static int get_last_jit_image(char *haystack, size_t hlen,
136 135
137 ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so); 136 ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so);
138 ret = sscanf(ptr, "flen=%d proglen=%d pass=%d image=%lx", 137 ret = sscanf(ptr, "flen=%d proglen=%d pass=%d image=%lx",
139 &flen, &proglen, &pass, base); 138 &flen, &proglen, &pass, &base);
140 if (ret != 4) 139 if (ret != 4)
141 return 0; 140 return 0;
142 141
@@ -162,7 +161,7 @@ static int get_last_jit_image(char *haystack, size_t hlen,
162 assert(ulen == proglen); 161 assert(ulen == proglen);
163 printf("%d bytes emitted from JIT compiler (pass:%d, flen:%d)\n", 162 printf("%d bytes emitted from JIT compiler (pass:%d, flen:%d)\n",
164 proglen, pass, flen); 163 proglen, pass, flen);
165 printf("%lx + <x>:\n", *base); 164 printf("%lx + <x>:\n", base);
166 165
167 regfree(&regex); 166 regfree(&regex);
168 return ulen; 167 return ulen;
@@ -172,8 +171,7 @@ int main(int argc, char **argv)
172{ 171{
173 int len, klen, opcodes = 0; 172 int len, klen, opcodes = 0;
174 char *kbuff; 173 char *kbuff;
175 unsigned long base; 174 static uint8_t image[32768];
176 uint8_t image[4096];
177 175
178 if (argc > 1) { 176 if (argc > 1) {
179 if (!strncmp("-o", argv[argc - 1], 2)) { 177 if (!strncmp("-o", argv[argc - 1], 2)) {
@@ -189,9 +187,9 @@ int main(int argc, char **argv)
189 187
190 kbuff = get_klog_buff(&klen); 188 kbuff = get_klog_buff(&klen);
191 189
192 len = get_last_jit_image(kbuff, klen, image, sizeof(image), &base); 190 len = get_last_jit_image(kbuff, klen, image, sizeof(image));
193 if (len > 0 && base > 0) 191 if (len > 0)
194 get_asm_insns(image, len, base, opcodes); 192 get_asm_insns(image, len, opcodes);
195 193
196 put_klog_buff(kbuff); 194 put_klog_buff(kbuff);
197 195
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 750512ba2c88..c7493b8f9b0e 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -14,6 +14,12 @@ all: $(NET_PROGS)
14run_tests: all 14run_tests: all
15 @/bin/sh ./run_netsocktests || echo "sockettests: [FAIL]" 15 @/bin/sh ./run_netsocktests || echo "sockettests: [FAIL]"
16 @/bin/sh ./run_afpackettests || echo "afpackettests: [FAIL]" 16 @/bin/sh ./run_afpackettests || echo "afpackettests: [FAIL]"
17 17 @if /sbin/modprobe test_bpf ; then \
18 /sbin/rmmod test_bpf; \
19 echo "test_bpf: ok"; \
20 else \
21 echo "test_bpf: [FAIL]"; \
22 exit 1; \
23 fi
18clean: 24clean:
19 $(RM) $(NET_PROGS) 25 $(RM) $(NET_PROGS)